Browse Source

Linux_SDK_V0.9.5

thead_admin 1 year ago
commit
66ec850753
100 changed files with 55141 additions and 0 deletions
  1. 24 0
      .gitignore
  2. 142 0
      Makefile
  3. 19 0
      README.md
  4. 356 0
      driver/GPLHEADER
  5. 29 0
      driver/Kconfig.example
  6. 49 0
      driver/Makefile
  7. 168 0
      driver/Makefile.testing
  8. 29 0
      driver/README
  9. 161 0
      driver/build.mk
  10. 8 0
      driver/dmabuf_exporter/FindDmaBufExporter.cmake
  11. 36 0
      driver/dmabuf_exporter/Makefile
  12. 17 0
      driver/dmabuf_exporter/README
  13. 176 0
      driver/dmabuf_exporter/de_common.c
  14. 28 0
      driver/dmabuf_exporter/de_heap.h
  15. 468 0
      driver/dmabuf_exporter/de_heap_carveout.c
  16. 303 0
      driver/dmabuf_exporter/de_heap_coherent.c
  17. 212 0
      driver/dmabuf_exporter/de_heap_ion.c
  18. 46 0
      driver/dmabuf_exporter/de_heap_ion.h
  19. 205 0
      driver/dmabuf_exporter/de_heap_ion_example.c
  20. 496 0
      driver/dmabuf_exporter/de_heap_noncoherent.c
  21. 77 0
      driver/dmabuf_exporter/test/dma-map.c
  22. 61 0
      driver/dmabuf_exporter/test/dma-test.c
  23. 20 0
      driver/dmabuf_exporter/uapi/dmabuf_exporter.h
  24. 136 0
      driver/dmabuf_exporter/uapi/kernel_4x14/ion.h
  25. 203 0
      driver/dmabuf_exporter/uapi/kernel_4x4/ion.h
  26. 3 0
      driver/fenrir_loki/Makefile
  27. 159 0
      driver/fenrir_loki/loki-intc.c
  28. 110 0
      driver/fenrir_loki/loki-main.c
  29. 44 0
      driver/fenrir_loki/loki.h
  30. 33 0
      driver/img_mem/Makefile
  31. 395 0
      driver/img_mem/img_mem_anonymous.c
  32. 798 0
      driver/img_mem/img_mem_carveout.c
  33. 204 0
      driver/img_mem/img_mem_coherent.c
  34. 502 0
      driver/img_mem/img_mem_dmabuf.c
  35. 266 0
      driver/img_mem/img_mem_ion.c
  36. 2666 0
      driver/img_mem/img_mem_man.c
  37. 213 0
      driver/img_mem/img_mem_man_priv.h
  38. 173 0
      driver/img_mem/img_mem_ocm.c
  39. 1002 0
      driver/img_mem/img_mem_unified.c
  40. 189 0
      driver/img_mem/img_pdump.c
  41. 1449 0
      driver/img_mem/imgmmu/imgmmu.c
  42. 307 0
      driver/img_mem/imgmmu/kernel_heap.c
  43. 142 0
      driver/img_mem/imgmmu/mmu_defs.h
  44. 159 0
      driver/img_mem/imgmmu/mmulib/heap.h
  45. 449 0
      driver/img_mem/imgmmu/mmulib/mmu.h
  46. 4 0
      driver/include/hwdefs/aura_system.h
  47. 24 0
      driver/include/hwdefs/gyrus_system.h
  48. 4 0
      driver/include/hwdefs/magna_system.h
  49. 4 0
      driver/include/hwdefs/mirage_system.h
  50. 355 0
      driver/include/hwdefs/nn_sys_cr_gyrus.h
  51. 364 0
      driver/include/hwdefs/nn_sys_cr_vagus.h
  52. 8 0
      driver/include/hwdefs/vagus_system.h
  53. 5471 0
      driver/include/hwdefs/vha_cr_aura.h
  54. 4998 0
      driver/include/hwdefs/vha_cr_gyrus.h
  55. 6553 0
      driver/include/hwdefs/vha_cr_magna.h
  56. 3171 0
      driver/include/hwdefs/vha_cr_mirage.h
  57. 101 0
      driver/include/hwdefs/vha_tb.h
  58. 296 0
      driver/include/img_mem_man.h
  59. 55 0
      driver/include/nexef_plat.h
  60. 118 0
      driver/include/uapi/img_mem_man.h
  61. 49 0
      driver/include/uapi/version.h
  62. 423 0
      driver/include/uapi/vha.h
  63. 116 0
      driver/include/uapi/vha_errors.h
  64. 72 0
      driver/include/vha_drv_common.h
  65. 24 0
      driver/nexef_platform/Makefile
  66. 110 0
      driver/nexef_platform/README.md
  67. 1799 0
      driver/nexef_platform/nexef_plat.c
  68. 12 0
      driver/nexef_platform/set_fpga_freq.py
  69. 137 0
      driver/vha/Makefile
  70. 4213 0
      driver/vha/multi/vha_dev.c
  71. 261 0
      driver/vha/multi/vha_mmu.c
  72. 229 0
      driver/vha/multi/vha_mt19937.c
  73. 93 0
      driver/vha/multi/vha_mt19937.h
  74. 391 0
      driver/vha/multi/vha_regs.h
  75. 264 0
      driver/vha/multi/vha_sc_dbg.c
  76. 1896 0
      driver/vha/multi/vha_wm.c
  77. 104 0
      driver/vha/platform/vha_plat.h
  78. 862 0
      driver/vha/platform/vha_plat_apollo.c
  79. 386 0
      driver/vha/platform/vha_plat_dt.c
  80. 78 0
      driver/vha/platform/vha_plat_dt.h
  81. 156 0
      driver/vha/platform/vha_plat_dt_example.c
  82. 60 0
      driver/vha/platform/vha_plat_dt_example.dts
  83. 81 0
      driver/vha/platform/vha_plat_dt_fenrir.dts
  84. 361 0
      driver/vha/platform/vha_plat_dummy.c
  85. 641 0
      driver/vha/platform/vha_plat_emu.c
  86. 1004 0
      driver/vha/platform/vha_plat_frost.c
  87. 491 0
      driver/vha/platform/vha_plat_nexef.c
  88. 1152 0
      driver/vha/platform/vha_plat_odin.c
  89. 1065 0
      driver/vha/platform/vha_plat_orion.c
  90. 36 0
      driver/vha/platform/vha_plat_param_thead_light_fpga_c910.h
  91. 483 0
      driver/vha/platform/vha_plat_pci.c
  92. 371 0
      driver/vha/platform/vha_plat_thead.c
  93. 181 0
      driver/vha/platform/vha_plat_thead_light.c
  94. 122 0
      driver/vha/platform/vha_plat_thead_light_fpga_c910.c
  95. 750 0
      driver/vha/single/vha_cnn.c
  96. 1581 0
      driver/vha/single/vha_dev.c
  97. 190 0
      driver/vha/single/vha_dev_ax2.c
  98. 207 0
      driver/vha/single/vha_dev_ax3.c
  99. 241 0
      driver/vha/single/vha_mmu.c
  100. 191 0
      driver/vha/single/vha_regs.h

+ 24 - 0
.gitignore

@@ -0,0 +1,24 @@
+/.auto.deps
+/.config.cmd
+/.config.old
+/..config.tmp
+/.config
+*.tmp
+*.depend
+*.o
+*.a
+*.o.d
+*.o.cmd
+*.a.cmd
+*.ko.cmd
+*.ko
+*.mod
+*.mod.c
+*.mod.cmd
+*.order
+*.orig
+*.patched
+Module.symvers
+output/
+obj/
+imgtvm/

+ 142 - 0
Makefile

@@ -0,0 +1,142 @@
+##
+ # Copyright (C) 2020 Alibaba Group Holding Limited
+##
+test = $(shell if [ -f "../.param" ]; then echo "exist"; else echo "noexist"; fi)
+ifeq ("$(test)", "exist")
+  include ../.param
+endif
+
+SDK_VER=v0.9
+
+# Configurations options
+CONFIG_DEBUG_MODE=0
+ROOTFS_INSTALL=0
+CONFIG_DUMMY_DRIVER=0
+
+# Board
+CONFIG_BOARD_LIGHT_FPGA_C910_ARRAY:=light_fpga_fm_c910 light-fm-fpga
+CONFIG_BOARD_LIGHT_ARRAY:=light-fm light-%
+
+ifneq ($(filter $(CONFIG_BOARD_LIGHT_FPGA_C910_ARRAY),$(BOARD_NAME)),)
+  TARGET_NPU="IMG-AX3386"
+  TARGE_CFG=CONFIG_VHA_THEAD_LIGHT_FPGA_C910=y
+else
+  ifneq ($(filter $(CONFIG_BOARD_LIGHT_ARRAY),$(BOARD_NAME)),)
+    TARGET_NPU="IMG-AX3386"
+    TARGE_CFG=CONFIG_VHA_THEAD_LIGHT=y
+  else
+    $(error "Undefined target board:$(BOARD_NAME)")
+  endif
+endif
+
+ifeq ($(CONFIG_DUMMY_DRIVER),1)
+  TARGE_CFG=CONFIG_VHA_DUMMY=y
+endif
+
+
+CONFIG_BUILD_DRV_EXTRA_PARAM:=""
+
+ifeq ("$(BUILD_SYSTEM)","YOCTO_BUILD")
+  export PATH_TO_SYSROOT=${SYSROOT_DIR}
+  export TOOLSCHAIN_PATH=${TOOLCHAIN_DIR}
+  export TOOLCHAIN_HOST=${CROSS_COMPILE}
+else
+  export PATH_TO_SYSROOT=${BUILDROOT_DIR}/output/host/riscv64-buildroot-linux-gnu/sysroot
+  export TOOLSCHAIN_PATH=${BUILDROOT_DIR}/output/host
+  export TOOLCHAIN_HOST=${TOOLSCHAIN_PATH}/bin/riscv64-unknown-linux-gnu-
+endif
+export PATH_TO_BUILDROOT=$(BUILDROOT_DIR)
+
+DIR_TARGET_BASE=bsp/npu
+DIR_TARGET_KO  =bsp/npu/ko
+
+MODULE_NAME=NPU
+BUILD_LOG_START="\033[47;30m>>> $(MODULE_NAME) $@ begin\033[0m"
+BUILD_LOG_END  ="\033[47;30m<<< $(MODULE_NAME) $@ end\033[0m"
+
+#
+# Do a parallel build with multiple jobs, based on the number of CPUs online
+# in this system: 'make -j8' on a 8-CPU system, etc.
+#
+# (To override it, run 'make JOBS=1' and similar.)
+#
+ifeq ($(JOBS),)
+  JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null)
+  ifeq ($(JOBS),)
+    JOBS := 1
+  endif
+endif
+
+all:    info driver install_local install_rootfs
+.PHONY: info driver install_local install_rootfs \
+        install_prepare clean_driver clean_local clean_rootfs clean
+
+info:
+	@echo $(BUILD_LOG_START)
+	@echo "  ====== Build Info from repo project ======"
+	@echo "    BUILD_SYSTEM="$(BUILD_SYSTEM)
+	@echo "    BUILDROOT_DIR="$(BUILDROOT_DIR)
+	@echo "    SYSROOT_DIR="$(SYSROOT_DIR)
+	@echo "    CROSS_COMPILE="$(CROSS_COMPILE)
+	@echo "    LINUX_DIR="$(LINUX_DIR)
+	@echo "    ARCH="$(ARCH)
+	@echo "    BOARD_NAME="$(BOARD_NAME)
+	@echo "    KERNEL_ID="$(KERNELVERSION)
+	@echo "    KERNEL_DIR="$(LINUX_DIR)
+	@echo "    CC="$(CC)
+	@echo "    CXX="$(CXX)
+	@echo "    LD="$(LD)
+	@echo "    LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)
+	@echo "    rpath="$(rpath)
+	@echo "    rpath-link="$(rpath-link)
+	@echo "    INSTALL_DIR_ROOTFS="$(INSTALL_DIR_ROOTFS)
+	@echo "    INSTALL_DIR_SDK="$(INSTALL_DIR_SDK)
+	@echo "  ====== Build config by current module ======"
+	@echo "    TARGET_NPU="$(TARGET_NPU)
+	@echo "    CONFIG_DEBUG_MODE="$(CONFIG_DEBUG_MODE)
+	@echo "    TARGE_CFG="$(TARGE_CFG)
+	@echo "    CONFIG_BUILD_DRV_EXTRA_PARAM="$(CONFIG_BUILD_DRV_EXTRA_PARAM)
+	@echo "    SDK_VERSION="$(SDK_VER)
+	@echo $(BUILD_LOG_END)
+
+driver:
+	@echo $(BUILD_LOG_START)
+	make -C driver -f build.mk $(TARGE_CFG)
+	@echo $(BUILD_LOG_END)
+
+clean_driver:
+	@echo $(BUILD_LOG_START)
+	make -C driver -f build.mk $(TARGE_CFG) clean
+	rm -rf ./output/rootfs/$(DIR_TARGET_KO)
+	@echo $(BUILD_LOG_END)
+
+install_prepare:
+	mkdir -p ./output/rootfs/$(DIR_TARGET_KO)
+
+install_local: driver install_prepare
+	@echo $(BUILD_LOG_START)
+	find ./driver -name "*.ko" | xargs -i cp -f {} ./output/rootfs/$(DIR_TARGET_KO)
+	@if [ `command -v tree` != "" ]; then \
+	    tree ./output/rootfs;             \
+	fi
+	@echo $(BUILD_LOG_END)
+
+install_rootfs: install_local
+	@echo $(BUILD_LOG_START)
+	@if [ $(ROOTFS_INSTALL) -eq 1 ]; then \
+	    mkdir -p $(INSTALL_DIR_ROOTFS)/$(DIR_TARGET_BASE); \
+	    rm -rf $(INSTALL_DIR_ROOTFS)/$(DIR_TARGET_BASE)/*; \
+	    cp -r output/rootfs/$(DIR_TARGET_BASE)/* $(INSTALL_DIR_ROOTFS)/$(DIR_TARGET_BASE); \
+	fi
+	@echo $(BUILD_LOG_END)
+
+clean_local:
+	rm -rf ./output
+
+clean_rootfs:
+	if [ -d "$(INSTALL_DIR_ROOTFS)/$(DIR_TARGET_BASE)" ]; then \
+	    rm -rf $(INSTALL_DIR_ROOTFS)/$(DIR_TARGET_BASE); \
+	fi
+
+clean: clean_driver clean_local clean_rootfs 
+

+ 19 - 0
README.md

@@ -0,0 +1,19 @@
+How to get the code
+
+git clone git@gitlab.alibaba-inc.com:thead-linux/npu-ax3386-kernel.git
+How to build
+
+Build within repo project
+Build out of repo project
+Build a buildroot of SoC project
+Build kernel module:
+cd driver
+Modify PATH_TO_BUILDROOT in driver/build.mk
+make -f build.mk CONFIG_VHA_THEAD_LIGHT_FPGA_C910=y
+Description of each directories
+
+driver/: Linux kernel module Driver.
+The Vendor packages come from and extract target directories
+
+From: https://partnerportal.imgtec.com/Component/Details/22492 (Upload Date: 25-Aug-2020)
+NNA-REL_2.6-cl5777119-Linux-gpl_src.tar.gz: driver/

+ 356 - 0
driver/GPLHEADER

@@ -0,0 +1,356 @@
+
+   NOTE! This copyright does *not* cover user programs that use kernel
+ services by normal system calls - this is merely considered normal use
+ of the kernel, and does *not* fall under the heading of "derived work".
+ Also note that the GPL below is copyrighted by the Free Software
+ Foundation, but the instance of code that it refers to (the Linux
+ kernel) is copyrighted by me and others who actually wrote it.
+
+ Also note that the only valid version of the GPL as far as the kernel
+ is concerned is _this_ particular version of the license (ie v2, not
+ v2.2 or v3.x or whatever), unless explicitly otherwise stated.
+
+			Linus Torvalds
+
+----------------------------------------
+
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.

+ 29 - 0
driver/Kconfig.example

@@ -0,0 +1,29 @@
+menuconfig VHA
+       tristate "IMG neural network accelerator"
+       select GENERIC_ALLOCATOR
+
+if VHA
+choice 
+      prompt "VHA platform"
+      config VHA_DUMMY
+             bool "driver runs without hardware"
+      config VHA_DT_EXAMPLE
+             depends on OF
+             bool "driver uses device tree"
+      config VHA_ORION
+             bool "driver runs on IMG Orion/SFF platform"
+endchoice
+config TARGET_OSID
+      int
+      default 0
+choice
+      prompt "VHA hardware series"
+      config HW_AX2
+             bool "driver runs AX2145 and AX2185 hardware"
+      config HW_AX3
+             bool "driver runs AX31xx/AX33xx/AX35xx hardware"
+      config HW_AX3_MC
+             bool "driver runs AXAX3797 multicore hardware"
+endchoice
+endif
+

+ 49 - 0
driver/Makefile

@@ -0,0 +1,49 @@
+ifeq ($(CONFIG_VHA_NEXEF), y)
+  obj-$(CONFIG_VHA) += nexef_platform/
+endif
+obj-$(CONFIG_VHA) += vha/ img_mem/
+#obj-$(CONFIG_DMA_SHARED_BUFFER) += dmabuf_exporter/
+obj-$(CONFIG_LOKI) += fenrir_loki/
+
+subdir-ccflags-y += -I$(src)/include
+# if CONFIG_DYNAMIC_DEBUG is not defined kernel wide as in Android 11+, 
+# we still can use the facility if CONFIG_DYNAMIC_DEBUG_CORE is defined
+subdir-ccflags-$(CONFIG_DYNAMIC_DEBUG_CORE) += -DDYNAMIC_DEBUG_MODULE
+
+# if building the kernel module in-tree, these config options
+# should be put into a Kconfig file.
+# if building out-of-tree, defining them here is as good as any.
+# CONFIG_VHA: build the VHA driver
+
+# hardware options:
+# CONFIG_VHA_DUMMY:        driver runs without hardware
+# CONFIG_VHA_DUMMY_HW_SIM: driver runs without hardware but simulates hw processing time
+# CONFIG_VHA_PCI:          driver runs with generic PCI hardware
+# CONFIG_VHA_EMU:          driver runs with hardware emulator using legacy ICE interface
+# CONFIG_VHA_FROST:        driver runs with hardware emulator using ICE2 interface
+# CONFIG_VHA_APOLLO:       driver runs with IMG Apollo TCF5/TCF6 fpga hardware
+# CONFIG_VHA_ORION:        driver runs with IMG Orion/SFF hardware
+# CONFIG_VHA_ODIN:         driver runs with IMG Odin TCF5/TCFVU fpga hardware
+# CONFIG_VHA_NEXEF:        driver runs with the 3NX-F base platform driver
+# CONFIG_VHA_DT_EXAMPLE:   driver example using a device tree
+
+# CONFIG_VHA_SERIES : either AX2 or AX3
+# CONFIG_HW_AX2:      driver runs AX2145 and AX2185 hardware
+# CONFIG_HW_AX3:      driver runs AX31xx/AX33xx/AX35xx hardware
+# CONFIG_HW_AX3_MC/CONFIG_HW_MULITCORE: driver runs AXAX3797 multicore hardware
+
+# system configuration options
+# CONFIG_VHA_SYS_MIRAGE:   driver runs with Mirage system configuration file. It is set by default when CONFIG_HW_AX2 is set. 
+# CONFIG_VHA_SYS_AURA:     driver runs with Aura system configuration file. It is set by default when CONFIG_HW_AX3 is set. 
+# CONFIG_VHA_SYS_VAGUS:    driver runs with Vagus system configuration file. Applicable when CONFIG_HW_AX3 is set.
+# CONFIG_VHA_SYS_MAGNA:    driver runs with Magna system configuration file. It is set by default when CONFIG_HW_MULTICORE is set.
+
+# misc options:
+# CONFIG_TARGET_OSID:           driver is compiled for selected OS id. Default: 0 (OS0), Applicable for CONFIG_HW_AX3 only. 
+# CONFIG_VHA_MMU_MIRRORED_CTX:  driver is compiled with mirrored MMU page tables regarding MODEL & IO. Default: enabled, Applicable for CONFIG_HW_AX3 only. 
+# CONFIG_VHA_SCF:               driver is compiled with safety critical features. Default: enabled, Applicable for CONFIG_VHA_SYS_VAGUS & CONFIG_VHA_SYS_MAGNA.
+# CONFIG_VHA_NCORES:            driver is compiled for specified number of cores. Default: 6, Applicable for CONFIG_HW_MULITCORE.
+# CONFIG_VHA_ENHANCED_APM:      driver is compiled to support Enhanced Active Power Management. Default: enabled, Applicable for CONFIG_HW_MULITCORE.
+# CONFIG_VHA_LO_PRI_SUBSEGS:    driver is compiled to support low priority subsegments. Default: disabled, Applicable for CONFIG_HW_AX3 only.
+ 
+

+ 168 - 0
driver/Makefile.testing

@@ -0,0 +1,168 @@
+include Makefile
+
+# if building the kernel module in-tree, these config options
+# should be put into a Kconfig file.
+# if building out-of-tree, defining them here is as good as any.
+# CONFIG_VHA: build the VHA driver
+export CONFIG_VHA := m
+
+# hardware options:
+# CONFIG_VHA_DUMMY:        driver runs without hardware
+# CONFIG_VHA_DUMMY_HW_SIM: driver runs without hardware but simulates hw processing time
+# CONFIG_VHA_PCI:          driver runs with generic PCI hardware
+# CONFIG_VHA_EMU:          driver runs with hardware emulator using legacy ICE interface
+# CONFIG_VHA_FROST:        driver runs with hardware emulator using ICE2 interface
+# CONFIG_VHA_APOLLO:       driver runs with IMG Apollo fpga baseboard
+# CONFIG_VHA_ORION:        driver runs with IMG Orion/Sirius hardware
+# CONFIG_VHA_ODIN:         driver runs with IMG Sleipnir/Odin fpga baseboard
+# CONFIG_VHA_NEXEF:        driver runs with the 3NX-F base platform driver and Gyrus system configuration file
+# CONFIG_VHA_DT_EXAMPLE:   driver example using a device tree
+
+# CONFIG_VHA_SERIES :      either AX2 or AX3
+# CONFIG_HW_AX2:           driver runs AX2145 and AX2185 hardware
+# CONFIG_HW_AX3:           driver runs AX31xx/AX33xx/AX35xx hardware
+# CONFIG_HW_AX3_MC/CONFIG_HW_MULITCORE: driver runs AXAX3797 multicore hardware
+#
+# system configuration options
+# CONFIG_VHA_SYS_MIRAGE:   driver runs with Mirage system configuration file. It is set by default when CONFIG_HW_AX2 is set. 
+# CONFIG_VHA_SYS_AURA:     driver runs with Aura system configuration file. It is set by default when CONFIG_HW_AX3 is set. 
+# CONFIG_VHA_SYS_VAGUS:    driver runs with Vagus system configuration file. Applicable when CONFIG_HW_AX3 is set.
+# CONFIG_VHA_SYS_MAGNA:    driver runs with Magna system configuration file. It is set by default when CONFIG_HW_MULTICORE is set.
+
+# misc options:
+# CONFIG_TARGET_OSID:           driver is compiled for selected OS id. Default: 0 (OS0), Applicable for CONFIG_HW_AX3 only. 
+# CONFIG_VHA_MMU_MIRRORED_CTX:  driver is compiled with mirrored MMU page tables regarding MODEL & IO. Default: enabled, Applicable for CONFIG_HW_AX3 only. 
+# CONFIG_VHA_SCF:               driver is compiled with safety critical features. Default: enabled, Applicable for CONFIG_VHA_SYS_VAGUS & CONFIG_VHA_SYS_MAGNA.
+# CONFIG_VHA_NCORES:            driver is compiled for specified number of cores. Default: 6, Applicable for CONFIG_HW_MULITCORE.
+# CONFIG_VHA_ENHANCED_APM:      driver is compiled to support Enhanced Active Power Management. Default: enabled, Applicable for CONFIG_HW_MULITCORE.
+
+# Aliases
+CONFIG_VHA_APOLLO := $(CONFIG_VHA_FPGA)
+CONFIG_HW_AX3_MC  := $(CONFIG_HW_MULTICORE)
+
+subdir-ccflags-y += -Wall -g
+# this should not be needed, but on our fpga, ubuntu kernel 4.4.0-116 does not work without it
+subdir-ccflags-$(CONFIG_VHA_APOLLO) += -DRETPOLINE
+subdir-ccflags-$(CONFIG_VHA_ODIN) += -DRETPOLINE
+
+# fail if target not specified
+ifeq ($(CONFIG_VHA_PCI)$(CONFIG_VHA_EMU)$(CONFIG_VHA_FROST)$(CONFIG_VHA_APOLLO)$(CONFIG_VHA_DUMMY)$(CONFIG_VHA_DUMMY_HW_SIM)$(CONFIG_VHA_ORION)$(CONFIG_VHA_ODIN)$(CONFIG_VHA_DT_EXAMPLE)${CONFIG_VHA_NEXEF},)
+  $(error no VHA platform specified. Try CONFIG_VHA_DUMMY=y or CONFIG_VHA_APOLLO=y or CONFIG_VHA_ODIN=y or CONFIG_VHA_ORION=y etc)
+endif
+
+#------------------------------------
+# for internal use
+#------------------------------------
+export CONFIG_FUNCTONAL_TEST_CONTROL := y
+export CONFIG_FORCE_IOREG_DEBUG := y
+
+ifeq ($(KERNELRELEASE),)
+
+KDIR ?= /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+# Hardware variant
+ifeq ($(CONFIG_HW_AX2)$(CONFIG_HW_AX3)$(CONFIG_HW_AX3_MC),)
+  $(info no HW variant specified. Defaulting to Mirage: CONFIG_HW_AX2=y. To build for Aura, please specify: CONFIG_HW_AX3=y, To build for Magna, please specify: CONFIG_HW_AX3_MC=y (or CONFIG_HW_MULTICORE=y))
+  export CONFIG_HW_AX2 := y
+endif
+
+ifeq ($(CONFIG_HW_AX2), y)
+  $(info Building Mirage target!)
+  
+  ifeq ($(CONFIG_VHA_SCF), y)
+    $(error Safety cirtical features not supported by Mirage!)
+  endif
+  
+  ifeq ($(CONFIG_VHA_LO_PRI_SUBSEGS), y)
+    $(error Low priority subsegments not supported by Mirage!)
+  endif
+  
+  export CONFIG_VHA_MMU_MIRRORED_CTX ?= n
+  
+  ifeq ($(CONFIG_VHA_MMU_MIRRORED_CTX),)
+    $(error Separate MMU contexts not supported by Mirage!)
+  endif
+endif
+
+ifeq ($(CONFIG_HW_AX3_MC), y)
+    ifeq ($(CONFIG_VHA_LO_PRI_SUBSEGS), y)
+      $(error Low priority subsegments not supported by Magna!)
+    endif
+
+    export CONFIG_HW_AX3 := y
+    export CONFIG_VHA_MMU_MIRRORED_CTX := y
+    export CONFIG_VHA_SYS_MAGNA := y
+    export CONFIG_VHA_NCORES ?= 6
+    export CONFIG_VHA_SCF ?= y
+    export CONFIG_VHA_ENHANCED_APM ?= y
+    
+    $(info Building Magna target for $(CONFIG_VHA_NCORES) core(s)!)
+
+    # Safety critical features
+    ifeq ($(CONFIG_VHA_SCF), y)
+      $(info Building with Magna safety cirtical features !)
+    endif
+    
+    # Selective APM
+    ifeq ($(CONFIG_VHA_ENHANCED_APM), y)
+      $(info Building with Enhanced APM feature !)
+    endif
+endif
+
+ifeq ($(CONFIG_HW_AX3), y) 
+  ifeq ($(CONFIG_HW_AX3_MC),)
+    # Default OS target = 0
+    export CONFIG_TARGET_OSID ?= 0
+    export CONFIG_VHA_MMU_MIRRORED_CTX ?= y
+
+    $(info Building Aura/Vagus target for OS$(CONFIG_TARGET_OSID) !)
+
+    ifeq ($(CONFIG_VHA_MMU_MIRRORED_CTX),y)
+      $(info Building target with mirrored mmu contexts!)
+    else  
+      $(info Building target with separate mmu contexts!)
+    endif
+
+    # System config
+    ifeq ($(CONFIG_VHA_SYS_AURA)$(CONFIG_VHA_SYS_VAGUS),)
+      $(info no system config specified. Defaulting to Aura: CONFIG_VHA_SYS_AURA=y. To build for Vagus, please specify: CONFIG_VHA_SYS_VAGUS=y)
+      export CONFIG_VHA_SYS_AURA := y
+    endif
+
+    ifeq ($(CONFIG_VHA_LO_PRI_SUBSEGS), y)
+      $(info Building with Low priority subsegments QoS feature !)
+    endif
+    
+    ifeq ($(CONFIG_VHA_SYS_VAGUS),)
+      ifeq ($(CONFIG_VHA_SCF), y)
+        # No safety critical features
+        $(error Safety cirtical features not supported by Aura!)
+      endif   
+    else
+      export CONFIG_VHA_SCF ?= y
+      # Safety critical features
+      ifeq ($(CONFIG_VHA_SCF), y)
+        $(info Building with Vagus safety cirtical features !)
+      endif
+    endif
+  endif  
+endif
+
+# kernel warning levels: as high as possible:
+# W=12 and W=123 seem to warn about linux kernel headers, so use W=1.
+KWARN := W=1
+
+ifneq (,$(shell which sparse))
+# C=1: use 'sparse' for extra warnings, if it is avail.
+SPARSE := C=1
+endif
+
+modules:
+	$(MAKE) -C $(KDIR) M=$(PWD) $(SPARSE) $(KWARN) EXTRA_CFLAGS="$(EXTRA_CFLAGS)"
+clean:
+	$(MAKE) -C $(KDIR) M=$(PWD) clean
+check:
+	cd $(KDIR); scripts/checkpatch.pl -f `find $(PWD)/vha $(PWD)/include $(PWD)/img_mem -name "*\.[ch]"`
+
+endif # KERNELRELEASE

+ 29 - 0
driver/README

@@ -0,0 +1,29 @@
+TO ADD to kernel source tree:
+$ cp -r . /my/path/to/kernel/drivers/directory/nna
+$ editor /my/path/to/kernel/drivers/directory/Makefile  # add reference to nna
+$ editor Kconfig                                        # add CONFIG_VHA etc
+
+TO BUILD a dummy driver:
+$ make KDIR=/path/to/linux/kernel CONFIG_VHA_DUMMY=y
+TO BUILD a QEMU PCI driver:
+$ make CONFIG_VHA_PCI=y
+
+TO INSTALL using system memory: 
+$ insmod img_mem/img_mem.ko
+$ insmod vha/vha.ko
+
+TO CREATE SOME CARVEOUT MEMORY:
+edit /etc/default/grub:
+     GRUB_CMDLINE_LINUX="mem=768m"
+# update-grub; reboot
+
+TO INSTALL using CARVEOUT:
+$ insmod img_mem/img_mem.ko
+$ insmod vha/vha.ko carveout_phys_start=$((768*1048576)) carveout_size=$((255*1048576))
+# (top 1MB of iomem is reserved!)
+
+TO ENABLE EXTRA DEBUG:
+$ echo module vha +p     | sudo tee /sys/kernel/debug/dynamic_debug/control
+$ echo module img_mem +p | sudo tee /sys/kernel/debug/dynamic_debug/control
+OR:
+$ echo "file */vha/* +p" | sudo tee /sys/kernel/debug/dynamic_debug/control

+ 161 - 0
driver/build.mk

@@ -0,0 +1,161 @@
+##
+ # Copyright (C) 2020 Alibaba Group Holding Limited
+##
+include Makefile
+
+ifeq ($(BUILDROOT_DIR),)
+  $(error BUILDROOT_DIR is empty)
+endif
+
+LINUX_DIR     ?= $(BUILDROOT_DIR)/output/build/linux-custom
+TOOLCHAIN_DIR ?= $(BUILDROOT_DIR)/output/host/bin
+CROSS_COMPILE ?= ${TOOLCHAIN_DIR}/bin/riscv64-unknown-linux-gnu-
+export ARCH   ?= riscv
+
+# if building the kernel module in-tree, these config options
+# should be put into a Kconfig file.
+# if building out-of-tree, defining them here is as good as any.
+# CONFIG_VHA: build the VHA driver
+export CONFIG_VHA := m
+
+# hardware options:
+# CONFIG_VHA_DUMMY:                 driver runs without hardware
+# CONFIG_VHA_THEAD_LIGHT_FPGA_C910: driver runs with T-Head Light-FPGA hw platform using a device tree
+
+# system configuration options
+# CONFIG_VHA_SYS_MIRAGE:   driver runs with Mirage system configuration file. It is set by default when CONFIG_HW_AX2 is set.
+# CONFIG_VHA_SYS_AURA:     driver runs with Aura system configuration file. It is set by default when CONFIG_HW_AX3 is set.
+# CONFIG_VHA_SYS_VAGUS:    driver runs with Vagus system configuration file. Applicable when CONFIG_HW_AX3 is set.
+
+subdir-ccflags-y += -Wall -g
+EXTRA_CFLAGS :=
+
+# fail if target not specified
+ifeq ($(CONFIG_VHA_THEAD_LIGHT_FPGA_C910),y)
+  EXTRA_CFLAGS += -DCONFIG_VHA_THEAD_LIGHT_FPGA_C910
+  export CONFIG_NAME=CONFIG_VHA_THEAD_LIGHT_FPGA_C910
+  export CONFIG_HW_AX3 := y
+else ifeq ($(CONFIG_VHA_THEAD_LIGHT),y)
+  EXTRA_CFLAGS += -DCONFIG_VHA_THEAD_LIGHT
+  export CONFIG_NAME=CONFIG_VHA_THEAD_LIGHT
+  export CONFIG_HW_AX3 := y
+  export CONFIG_VHA_LO_PRI_SUBSEGS := y
+else ifeq ($(CONFIG_VHA_DUMMY),y)
+  export CONFIG_NAME=CONFIG_VHA_DUMMY
+  export CONFIG_HW_AX3 := y
+else
+  $(error no VHA platform specified. Try CONFIG_VHA_DUMMY=y or CONFIG_VHA_THEAD_XXX=y etc)
+endif
+
+#------------------------------------
+# for internal use
+#------------------------------------
+
+ifeq ($(KERNELRELEASE),)
+
+KDIR ?= $(LINUX_DIR)
+
+PWD := $(shell pwd)
+
+# Hardware layout
+ifeq ($(CONFIG_HW_AX2)$(CONFIG_HW_AX3),)
+  $(info no HW layout specified. Defaulting to Mirage: CONFIG_HW_AX2=y. To build for Aura, please specify: CONFIG_HW_AX3=y)
+  export CONFIG_HW_AX2 := y
+endif
+
+ifeq ($(CONFIG_HW_AX2), y)
+  $(info Building Mirage target!)
+
+  ifeq ($(CONFIG_VHA_SCF), y)
+    $(error Safety cirtical features not supported by Mirage!)
+  endif
+
+  export CONFIG_VHA_MMU_MIRRORED_CTX ?= n
+
+  ifeq ($(CONFIG_VHA_MMU_MIRRORED_CTX),)
+    $(error Separate MMU contexts not supported by Mirage!)
+  endif
+endif
+
+ifeq ($(CONFIG_HW_AX3), y)
+  # Default OS target = 0
+  export CONFIG_TARGET_OSID ?= 0
+  $(info Building Aura/Vagus target for OS$(CONFIG_TARGET_OSID) !)
+
+  export CONFIG_VHA_MMU_MIRRORED_CTX ?= y
+  ifeq ($(CONFIG_VHA_MMU_MIRRORED_CTX),y)
+    $(info Building Aura/Vagus target with mirrored mmu contexts!)
+  else
+    $(info Building Aura/Vagus target with separate mmu contexts!)
+  endif
+
+  # System config
+  ifeq ($(CONFIG_VHA_SYS_AURA)$(CONFIG_VHA_SYS_VAGUS),)
+    # $(info no system config specified. Defaulting to Aura: CONFIG_VHA_SYS_AURA=y. To build for Vagus, please specify: CONFIG_VHA_SYS_VAGUS=y)
+    export CONFIG_VHA_SYS_AURA := y
+  endif
+
+  ifeq ($(CONFIG_VHA_SYS_VAGUS),)
+    ifeq ($(CONFIG_VHA_SCF), y)
+      # No safety critical features
+      $(error Safety cirtical features not supported by Aura!)
+    endif
+  else
+    # Safety critical features
+    ifeq ($(CONFIG_VHA_SCF), y)
+      $(info Building with Vagus safety cirtical features !)
+    endif
+  endif
+endif
+
+# kernel warning levels: as high as possible:
+# W=12 and W=123 seem to warn about linux kernel headers, so use W=1.
+KWARN := W=1
+
+ifneq (,$(shell which sparse))
+# C=1: use 'sparse' for extra warnings, if it is avail.
+SPARSE := C=1
+endif
+
+all: info modules
+	@echo Build \"$(CONFIG_NAME)\" finished
+
+info:
+	@echo
+	@echo =====================================
+	@echo ==== The build options are below ====
+	@echo =====================================
+	@echo CONFIG_NAME=$(CONFIG_NAME)
+	@echo BUILDROOT_DIR=$(BUILDROOT_DIR)
+	@echo LINUX_DIR=$(LINUX_DIR)
+#	@echo PATH_TO_SYSROOT=$(PATH_TO_SYSROOT)
+	@echo TOOLCHAIN_DIR=$(TOOLCHAIN_DIR)
+	@echo CROSS_COMPILE=$(CROSS_COMPILE)
+	@echo KDIR=$(KDIR)
+	@echo ARCH=$(ARCH)
+	@echo subdir-ccflags-y=$(subdir-ccflags-y)
+	@echo EXTRA_CFLAGS=$(EXTRA_CFLAGS)
+	@echo
+	@echo CONFIG_VHA=$(CONFIG_VHA)
+	@echo CONFIG_HW_AX3=$(CONFIG_HW_AX3)
+	@echo CONFIG_TARGET_OSID=$(CONFIG_TARGET_OSID)
+	@echo CONFIG_VHA_MMU_MIRRORED_CTX=$(CONFIG_VHA_MMU_MIRRORED_CTX)
+	@echo CONFIG_VHA_SYS_MIRAGE=$(CONFIG_VHA_SYS_MIRAGE)
+	@echo CONFIG_VHA_SYS_AURA=$(CONFIG_VHA_SYS_AURA)
+	@echo CONFIG_VHA_SYS_VAGUS=$(CONFIG_VHA_SYS_VAGUS)
+	@echo CONFIG_VHA_SCF=$(CONFIG_VHA_SCF)
+	@echo CONFIG_VHA_NEXEF=$(CONFIG_VHA_NEXEF)
+	@echo CONFIG_LOKI=$(CONFIG_LOKI)
+	@echo =====================================
+	@echo
+
+modules: info
+	$(MAKE) -C $(KDIR) M=$(PWD) $(SPARSE) $(KWARN) EXTRA_CFLAGS="$(EXTRA_CFLAGS)" CROSS_COMPILE=$(CROSS_COMPILE)
+clean:
+	if [ -d "$(KDIR)" ]; then \
+	    $(MAKE) -C $(KDIR) M=$(PWD) clean; \
+	fi
+check:
+	cd $(KDIR); scripts/checkpatch.pl -f `find $(PWD)/vha $(PWD)/include $(PWD)/img_mem -name "*\.[ch]"`
+
+endif # KERNELRELEASE

+ 8 - 0
driver/dmabuf_exporter/FindDmaBufExporter.cmake

@@ -0,0 +1,8 @@
+# Locate the userspace headers of dmabuf_exporter
+#
+
+get_filename_component(DMABUF_EXPORTER_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+set (DMABUF_EXPORTER_FOUND TRUE)
+set (DMABUF_EXPORTER_INCLUDE_DIR ${DMABUF_EXPORTER_PREFIX}/uapi)
+

+ 36 - 0
driver/dmabuf_exporter/Makefile

@@ -0,0 +1,36 @@
+#
+# Makefile for dmabuf exporter implementations
+#
+
+ifdef CONFIG_ION
+obj-$(CONFIG_VHA) += dmabuf_exporter_ion.o
+dmabuf_exporter_ion-objs := de_common.o de_heap_ion.o
+dmabuf_exporter_ion-objs += de_heap_ion_example.o
+
+# detect ION header in Linux Kernel tree
+# srctree is needed here for kernels built with separate object dir (O=)
+ifneq ($(wildcard $(srctree)/include/linux/ion.h),)
+# some kernel trees have this non-standard path
+ccflags-y += -DIMG_KERNEL_ION_HEADER="<linux/ion.h>"
+else
+# this is the default location
+# the vanilla linux kernel does not export ion.h to include/linux
+# adding -I to the entire directory would expose many internal header files
+# so we use this somewhat ugly hack to use only this one with full path
+# realpath is needed to expand full path, some kernel trees set srctree to .
+ccflags-y += -DIMG_KERNEL_ION_HEADER="<$(realpath $(srctree))/drivers/staging/android/ion/ion.h>"
+ccflags-y += -DIMG_KERNEL_ION_PRIV_HEADER="<$(realpath $(srctree))/drivers/staging/android/ion/ion_priv.h>"
+endif
+endif
+
+ifdef CONFIG_DMA_SHARED_BUFFER
+obj-$(CONFIG_VHA) += dmabuf_exporter_coherent.o
+dmabuf_exporter_coherent-objs := de_common.o de_heap_coherent.o
+obj-$(CONFIG_VHA) += dmabuf_exporter_noncoherent.o
+dmabuf_exporter_noncoherent-objs := de_common.o de_heap_noncoherent.o
+endif
+
+ifdef CONFIG_GENERIC_ALLOCATOR
+obj-$(CONFIG_VHA) += dmabuf_exporter_carveout.o
+dmabuf_exporter_carveout-objs := de_common.o de_heap_carveout.o
+endif

+ 17 - 0
driver/dmabuf_exporter/README

@@ -0,0 +1,17 @@
+1. How to build
+
+make -C /lib/modules/$(uname -r)/build M=/path/to/dmabuf_exporter modules
+
+2. Carveout example
+
+sudo insmod dmabuf_exporter_carveout.ko carveout_base=0x38000000 carveout_size=0x8000000
+
+3. Coherent example
+
+sudo insmod dmabuf_exporter_coherent.ko
+
+3. Non-Coherent example
+
+sudo insmod dmabuf_exporter_noncoherent.ko cache_type=3
+
+cache_type: 1-cached, 2-uncached, 3-writecombine

+ 176 - 0
driver/dmabuf_exporter/de_common.c

@@ -0,0 +1,176 @@
+/*
+ * de_common.h
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/miscdevice.h>
+
+#include "uapi/dmabuf_exporter.h"
+
+#include "de_heap.h"
+
+/*
+ * Because this file is used in all modules, the kernel build system does
+ * not define KBUILD_MODNAME. This causes a build failure in kernels where
+ * dynamic debug is enabled, in all instances of pr_debug().
+ *
+ * dynamic debug messages for this file will use this name
+ */
+#ifndef KBUILD_MODNAME
+#define KBUILD_MODNAME "dmabuf_exporter"
+#endif
+
+static struct miscdevice dmabuf_miscdevice;
+
+static int de_file_open(struct inode *inode, struct file *file)
+{
+	pr_debug("%s\n", __func__);
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static int dmabuf_ioctl_create(struct file *file, unsigned long arg_size)
+{
+	void *private_data;
+	size_t size;
+	int ret;
+
+	pr_debug("%s: private_data %p\n", __func__, file->private_data);
+
+	size = (arg_size + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
+	pr_debug("%s: requested size %lu PAGE_SIZE %lu actual size %zu\n",
+		 __func__, arg_size, PAGE_SIZE, size);
+
+	if (file->private_data) {
+		pr_err("%s: buffer already created!\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = de_heap_buffer_create(size, PAGE_SIZE, &private_data);
+	if (ret)
+		return ret;
+
+	file->private_data = private_data;
+	return 0;
+}
+
+static int dmabuf_ioctl_export(struct file *file, unsigned long flags)
+{
+	pr_debug("%s: private_data %p\n", __func__, file->private_data);
+
+	if (file->private_data)
+		return de_heap_export_fd(file->private_data, flags);
+
+	pr_err("%s: buffer has not been created!\n", __func__);
+	return -ENODEV;
+}
+
+static int de_file_release(struct inode *inode, struct file *file)
+{
+	pr_debug("%s: private_data %p\n", __func__, file->private_data);
+
+	if (file->private_data) {
+		de_heap_buffer_free(file->private_data);
+		file->private_data = NULL;
+	}
+
+	return 0;
+}
+
+static long de_file_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	pr_debug("%s: cmd %x arg %lx\n", __func__, cmd, arg);
+
+	switch (cmd) {
+	case DMABUF_IOCTL_CREATE:
+#ifdef CONFIG_COMPAT
+	case COMPAT_DMABUF_IOCTL_CREATE:
+#endif
+		return dmabuf_ioctl_create(file, arg);
+
+	case DMABUF_IOCTL_EXPORT:
+#ifdef CONFIG_COMPAT
+	case COMPAT_DMABUF_IOCTL_EXPORT:
+#endif
+		return dmabuf_ioctl_export(file, arg);
+
+	default:
+		pr_err("%s: unknown cmd %x\n", __func__, cmd);
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations dmabuf_fops = {
+	.owner = THIS_MODULE,
+	.open = de_file_open,
+	.release = de_file_release,
+	.unlocked_ioctl = de_file_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = de_file_ioctl,
+#endif
+};
+
+static int __init dmabuf_device_init(void)
+{
+	int ret;
+
+	pr_info("%s\n", __func__);
+
+	dmabuf_miscdevice.minor = 128;
+	dmabuf_miscdevice.name = "dmabuf";
+	dmabuf_miscdevice.fops = &dmabuf_fops;
+	dmabuf_miscdevice.parent = NULL;
+	dmabuf_miscdevice.mode = 0666;
+
+	ret = misc_register(&dmabuf_miscdevice);
+	if (ret < 0) {
+		pr_err("%s: failed to register misc device %s\n",
+		       __func__, dmabuf_miscdevice.name);
+		return ret;
+	}
+	pr_info("%s: registered misc device %s\n",
+		__func__, dmabuf_miscdevice.name);
+
+	ret = de_heap_heap_init();
+	if (ret < 0) {
+		misc_deregister(&dmabuf_miscdevice);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit dmabuf_device_deinit(void)
+{
+	pr_info("%s\n", __func__);
+
+	de_heap_heap_deinit();
+
+	misc_deregister(&dmabuf_miscdevice);
+}
+
+module_init(dmabuf_device_init);
+module_exit(dmabuf_device_deinit);
+
+MODULE_AUTHOR("GPL");
+MODULE_DESCRIPTION("DMA-BUF test driver");
+MODULE_LICENSE("GPL v2");
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 28 - 0
driver/dmabuf_exporter/de_heap.h

@@ -0,0 +1,28 @@
+/*
+ * de_heap.h
+ */
+
+#ifndef DE_HEAP_H
+#define DE_HEAP_H
+
+#include <linux/types.h>
+
+int de_heap_buffer_create(size_t size, unsigned long align,
+			  void **private_data);
+int de_heap_export_fd(void *private_data, unsigned long flags);
+void de_heap_buffer_free(void *private_data);
+
+int de_heap_heap_init(void);
+void de_heap_heap_deinit(void);
+
+#endif /* DE_HEAP_H */
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 468 - 0
driver/dmabuf_exporter/de_heap_carveout.c

@@ -0,0 +1,468 @@
+/*
+ * de_heap_carveout.c
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/genalloc.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "de_heap.h"
+
+/*
+ * module parameters
+ */
+
+static unsigned int use_pci = 0;
+module_param(use_pci, uint, 0444);
+MODULE_PARM_DESC(use_pci, "use PCI bar memory (default: false)");
+
+static unsigned int cpu_map = 1;
+module_param(cpu_map, uint, 0444);
+MODULE_PARM_DESC(cpu_map, "map memory to CPU (default: true)");
+
+/* mandatory carveout parameters (use_pci = 0) */
+
+static unsigned long carveout_base = 0;
+module_param(carveout_base, ulong, 0444);
+MODULE_PARM_DESC(carveout_base, "physical base address. "
+		"mandatory when use_pci is false");
+
+static unsigned long carveout_size = 0;
+module_param(carveout_size, ulong, 0444);
+MODULE_PARM_DESC(carveout_size, "physical size in bytes. "
+		"mandatory when use_pci is false");
+
+/* mandatory pci parameters (use_pci = 1) */
+
+static unsigned int pci_vendor = 0;
+module_param(pci_vendor, uint, 0444);
+MODULE_PARM_DESC(pci_vendor, "PCI vendor id. mandatory when use_pci is true");
+
+static unsigned int pci_product = 0;
+module_param(pci_product, uint, 0444);
+MODULE_PARM_DESC(pci_product, "PCI product id. mandatory when use_pci is true");
+
+static int pci_bar = -1;
+module_param(pci_bar, int, 0444);
+MODULE_PARM_DESC(pci_bar, "PCI bar index. mandatory when use_pci is true");
+
+/* optional pci parameters (use_pci = 1) */
+
+static unsigned long pci_size = 0;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. "
+		"used when use_pci is true. "
+		"when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset = 0;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. "
+		"used when use_pci is true. optional (default: 0)");
+
+static bool use_sg_dma = true;
+module_param(use_sg_dma, bool, 0444);
+MODULE_PARM_DESC(use_sg_dma,
+		"Sets sg_dma_address/len info");
+
+/*
+ * internal values
+ */
+static phys_addr_t pool_base;
+
+/* 12 bits (4096 bytes) */
+#define POOL_ALLOC_ORDER 12
+
+static struct gen_pool *heap_pool;
+
+static struct pci_dev *pci_device;
+
+struct buffer {
+	phys_addr_t phys;
+	size_t size;
+	struct sg_table *sg_table;
+	struct dma_buf *dma_buf;
+	dma_addr_t dma_base;
+	unsigned int dma_size;
+};
+
+/*
+ * dmabuf ops
+ */
+
+static struct sg_table *de_carveout_map_dma(struct dma_buf_attachment *attach,
+							enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+
+	pr_debug("%s\n", __func__);
+
+	if (use_sg_dma) {
+		sg_dma_address(buffer->sg_table->sgl) = buffer->dma_base;
+		sg_dma_len(buffer->sg_table->sgl) = buffer->dma_size;
+	}
+
+	return buffer->sg_table;
+}
+
+static void de_carveout_unmap_dma(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+
+	pr_debug("%s\n", __func__);
+	if (use_sg_dma) {
+		sg_dma_address(buffer->sg_table->sgl) = (~(dma_addr_t)0);
+		sg_dma_len(buffer->sg_table->sgl) = 0;
+	}
+}
+
+static void de_carveout_release(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+
+	pr_info("%s phys address 0x%llx size %zu\n",
+		__func__, (unsigned long long)buffer->phys, buffer->size);
+
+	sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+	gen_pool_free(heap_pool, buffer->phys, buffer->size);
+	kfree(buffer);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+static void *de_carveout_kmap_atomic(struct dma_buf *buf, unsigned long page)
+{
+	pr_err("%s not supported\n", __func__);
+	return NULL;
+}
+#endif
+
+static int de_carveout_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct buffer *buffer = dmabuf->priv;
+
+	pr_debug("%s\n", __func__);
+
+	if (!cpu_map) {
+		pr_err("%s not allowed (cpu_map is false)\n", __func__);
+		return -EIO;
+	}
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return remap_pfn_range(vma, vma->vm_start,
+						 page_to_pfn(sg_page(buffer->sg_table->sgl)),
+						 buffer->sg_table->sgl->length,
+						 vma->vm_page_prot);
+}
+
+static void *de_carveout_kmap(struct dma_buf *dma_buf, unsigned long page)
+{
+	struct buffer *buffer = dma_buf->priv;
+	void *ptr;
+
+	if (!cpu_map) {
+		pr_err("%s not allowed (cpu_map is false)\n", __func__);
+		return NULL;
+	}
+
+	ptr = (void __force *)ioremap(buffer->phys, buffer->size);
+	if (!ptr) {
+		pr_err("%s:carveout ioremap failed\n", __func__);
+		return NULL;
+	}
+
+	return ptr;
+}
+
+static void de_carveout_kunmap(struct dma_buf *buf, unsigned long page,
+						 void *vaddr)
+{
+	pr_debug("%s\n", __func__);
+
+	if (vaddr)
+		iounmap((void __iomem __force *)vaddr);
+}
+
+static void *de_carveout_vmap(struct dma_buf *buf)
+{
+	return de_carveout_kmap(buf, 0);
+}
+
+static void de_carveout_vunmap(struct dma_buf *buf, void *kptr)
+{
+	de_carveout_kunmap(buf, 0, kptr);
+}
+
+static const struct dma_buf_ops dmabuf_ops = {
+	.attach = NULL, /* optional */
+	.detach = NULL, /* optional */
+	.map_dma_buf = de_carveout_map_dma,
+	.unmap_dma_buf = de_carveout_unmap_dma,
+	.release = de_carveout_release,
+	.begin_cpu_access = NULL, /* optional */
+	.end_cpu_access = NULL, /* optional */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+	.kmap_atomic = de_carveout_kmap_atomic,
+	.kunmap_atomic = NULL, /* optional */
+	.kmap = de_carveout_kmap,
+	.kunmap = de_carveout_kunmap, /* optional */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+	.map_atomic = de_carveout_kmap_atomic,
+	.unmap_atomic = NULL, /* optional */
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	.map = de_carveout_kmap,
+	.unmap = de_carveout_kunmap, /* optional */
+#endif
+#endif
+	.mmap = de_carveout_mmap,
+	.vmap = de_carveout_vmap,
+	.vunmap = de_carveout_vunmap,
+};
+
+int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
+{
+	struct buffer *buffer;
+	struct dma_buf *dma_buf;
+	int ret;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+
+	pr_info("%s:carveout size %zu\n", __func__, size);
+
+	buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
+	if (!buffer) {
+		pr_err("%s:carveout failed to allocate buffer\n", __func__);
+		return -ENOMEM;
+	}
+
+	buffer->phys = gen_pool_alloc(heap_pool, size);
+	if (!buffer->phys) {
+		pr_err("%s:carveout gen_pool_alloc failed for size %zu\n",
+					 __func__, size);
+		ret = -ENOMEM;
+		goto free_buffer;
+	}
+	buffer->size = size;
+
+	buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!buffer->sg_table) {
+		pr_err("%s:carveout failed to allocate sg_table\n", __func__);
+		ret = -ENOMEM;
+		goto free_alloc;
+	}
+
+	ret = sg_alloc_table(buffer->sg_table, 1, GFP_KERNEL);
+	if (ret) {
+		pr_err("%s:carveout sg_alloc_table failed\n", __func__);
+		goto free_sg_table_mem;
+	}
+	sg_set_page(buffer->sg_table->sgl, pfn_to_page(PFN_DOWN(buffer->phys)),
+				PAGE_ALIGN(buffer->size), 0);
+
+	/* Store dma info */
+	buffer->dma_base = buffer->phys;
+	if (use_pci) {
+		buffer->dma_base -= pool_base;
+		buffer->dma_base += pci_offset;
+	}
+	buffer->dma_size = PAGE_ALIGN(size);
+
+	if (use_sg_dma) {
+		/* No mapping yet */
+		sg_dma_address(buffer->sg_table->sgl) = (~(dma_addr_t)0);
+		sg_dma_len(buffer->sg_table->sgl) = 0;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
+#else
+	exp_info.ops = &dmabuf_ops;
+	exp_info.size = size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+	exp_info.resv = NULL;
+	dma_buf = dma_buf_export(&exp_info);
+#endif
+	if (IS_ERR(dma_buf)) {
+		pr_err("%s:carveout dma_buf_export failed\n", __func__);
+		ret = PTR_ERR(dma_buf);
+		goto free_sg_table;
+	}
+	buffer->dma_buf = dma_buf;
+
+	*private_data = buffer;
+
+	pr_info("%s:carveout phys address 0x%llx size %zu\n",
+		__func__, (unsigned long long)buffer->phys, buffer->size);
+	return 0;
+
+free_sg_table:
+	sg_free_table(buffer->sg_table);
+free_sg_table_mem:
+	kfree(buffer->sg_table);
+free_alloc:
+	gen_pool_free(heap_pool, buffer->phys, buffer->size);
+free_buffer:
+	kfree(buffer);
+	return ret;
+}
+
+void de_heap_buffer_free(void *private_data)
+{
+	struct buffer *buffer = private_data;
+
+	pr_info("%s:carveout phys address 0x%llx size %zu\n",
+		__func__, (unsigned long long)buffer->phys, buffer->size);
+
+	dma_buf_put(buffer->dma_buf);
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	struct buffer *buffer = private_data;
+	struct dma_buf *dma_buf = buffer->dma_buf;
+	int ret;
+
+	pr_debug("%s:carveout %p\n", __func__, buffer);
+
+	get_dma_buf(dma_buf);
+	ret = dma_buf_fd(dma_buf, flags);
+	if (ret < 0) {
+		pr_err("%s:carveout dma_buf_fd failed\n", __func__);
+		dma_buf_put(dma_buf);
+		return ret;
+	}
+
+	pr_info("%s:carveout phys address 0x%llx export fd %d\n",
+		__func__, (unsigned long long)buffer->phys, ret);
+	return ret;
+}
+
+int de_heap_heap_init(void)
+{
+	size_t pool_size;
+	int ret;
+
+	pr_debug("%s:carveout\n", __func__);
+
+	if (use_pci) {
+		unsigned long bar_base, bar_len;
+
+		if (pci_vendor == 0 || pci_product == 0 || pci_bar < 0) {
+			pr_err("%s:carveout missing pci parameters\n",
+						 __func__);
+			return -EFAULT;
+		}
+
+		pci_device = pci_get_device(pci_vendor, pci_product, NULL);
+		if (pci_device == NULL) {
+			pr_err("%s:carveout PCI device not found\n", __func__);
+			return -EFAULT;
+		}
+
+		bar_base = pci_resource_start(pci_device, pci_bar);
+		if (bar_base == 0) {
+			pr_err("%s:carveout PCI bar %d not found\n",
+						 __func__, pci_bar);
+			ret = -EFAULT;
+			goto free_pci_device;
+		}
+
+		bar_len = pci_resource_len(pci_device, pci_bar);
+		if (bar_len == 0) {
+			pr_err("%s:carveout PCI bar %d has zero length\n",
+						 __func__, pci_bar);
+			ret =  -EFAULT;
+			goto free_pci_device;
+		}
+		pr_info("%s:carveout PCI bar %d start %#lx length %ld\n",
+			__func__, pci_bar, bar_base, bar_len);
+
+		if (pci_size == 0)
+			pci_size = bar_len;
+
+		if (pci_offset + pci_size > bar_len) {
+			pr_err("%s:carveout pci_offset and size exceeds bar\n",
+						 __func__);
+			ret =  -EFAULT;
+			goto free_pci_device;
+		}
+
+		pool_base = bar_base + pci_offset;
+		pool_size = pci_size;
+	} else {
+		pci_device = NULL;
+
+		if (carveout_base == 0) {
+			pr_err("%s:carveout carveout_base not defined\n",
+						 __func__);
+			return -EFAULT;
+		}
+		if (carveout_size == 0) {
+			pr_err("%s:carveout carveout_size not defined\n",
+						 __func__);
+			return -EFAULT;
+		}
+
+		pool_base = carveout_base;
+		pool_size = carveout_size;
+	}
+
+	heap_pool = gen_pool_create(POOL_ALLOC_ORDER, -1);
+	if (!heap_pool) {
+		pr_err("%s:carveout gen_pool_create failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_pci_device;
+	}
+
+	ret = gen_pool_add(heap_pool, (unsigned long)pool_base, pool_size, -1);
+	if (ret) {
+		pr_err("%s:carveout gen_pool_add failed\n", __func__);
+		goto free_pool;
+	}
+
+	pr_info("%s:carveout base %#llx size %zu\n", __func__,
+		(unsigned long long)pool_base, pool_size);
+	return 0;
+
+free_pool:
+	gen_pool_destroy(heap_pool);
+free_pci_device:
+	if (pci_device)
+		pci_dev_put(pci_device);
+	return ret;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:carveout\n", __func__);
+
+	gen_pool_destroy(heap_pool);
+
+	if (pci_device)
+		pci_dev_put(pci_device);
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 303 - 0
driver/dmabuf_exporter/de_heap_coherent.c

@@ -0,0 +1,303 @@
+/*
+ * de_heap_coherent.c
+ */
+
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+#include <asm/cacheflush.h>
+#else
+#include <asm/set_memory.h>
+#endif
+#endif /* CONFIG_X86 */
+
+#include "de_heap.h"
+
+#define MEMORY_ALLOCATION_FLAGS (GFP_HIGHUSER | __GFP_ZERO)
+
+struct buffer {
+	size_t size;
+	void *vaddr;
+	struct sg_table *sg_table;
+	dma_addr_t handle;
+};
+
+/*
+ * dmabuf ops
+ */
+
+static void de_coherent_release(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+
+	pr_info("%s phys address 0x%llx\n",
+		__func__, (unsigned long long int)buffer->handle);
+
+	sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+
+#ifdef CONFIG_X86
+	set_memory_wb((unsigned long)buffer->vaddr,
+					(buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
+#endif
+	dma_free_coherent(NULL, buffer->size, buffer->vaddr, buffer->handle);
+
+	kfree(buffer);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+static void *de_coherent_kmap_atomic(struct dma_buf *buf, unsigned long page)
+{
+	pr_debug("%s\n", __func__);
+
+	return NULL;
+}
+#endif
+
+static struct sg_table *de_coherent_map_dma(struct dma_buf_attachment *attach,
+							enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+
+	pr_debug("%s\n", __func__);
+
+	return buffer->sg_table;
+}
+
+static void de_coherent_unmap_dma(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	pr_debug("%s\n", __func__);
+}
+
+static int de_coherent_mmap(struct dma_buf *dmabuf,
+					struct vm_area_struct *vma)
+{
+	struct buffer *buffer = dmabuf->priv;
+	unsigned long user_count, count, pfn, off;
+
+	/*
+	 * we could use dma_mmap_coherent() here, but it hard-codes
+	 * an uncached behaviour and the kernel complains on x86 for
+	 * a double mapping with different semantics (write-combine and
+	 * uncached). Instead, we re-implement here the mapping.
+	 * code copied from dma_common_mmap()
+	 */
+
+	pr_debug("%s\n", __func__);
+
+	user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	count = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+	pfn = page_to_pfn(virt_to_page(buffer->vaddr));
+	off = vma->vm_pgoff;
+
+	if (off >= count || user_count > (count - off))
+		return ENXIO;
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return remap_pfn_range(vma, vma->vm_start, pfn + off,
+						 user_count << PAGE_SHIFT,
+						 vma->vm_page_prot);
+}
+
+static void *de_coherent_kmap(struct dma_buf *dma_buf, unsigned long page)
+{
+	struct buffer *buffer = dma_buf->priv;
+
+	pr_debug("%s\n", __func__);
+
+	/* kernel memory mapping has been done at allocation time */
+	return buffer->vaddr;
+}
+
+static void de_coherent_kunmap(struct dma_buf *buf, unsigned long page,
+						 void *vaddr)
+{
+	pr_debug("%s\n", __func__);
+}
+
+static void *de_coherent_vmap(struct dma_buf *buf)
+{
+	return de_coherent_kmap(buf, 0);
+}
+
+static void de_coherent_vunmap(struct dma_buf *buf, void *kptr)
+{
+	de_coherent_kunmap(buf, 0, kptr);
+}
+
+static const struct dma_buf_ops dmabuf_ops = {
+	.attach = NULL, /* optional */
+	.detach = NULL, /* optional */
+	.map_dma_buf = de_coherent_map_dma,
+	.unmap_dma_buf = de_coherent_unmap_dma,
+	.release = de_coherent_release,
+	.begin_cpu_access = NULL, /* optional */
+	.end_cpu_access = NULL, /* optional */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+	.kmap_atomic = de_coherent_kmap_atomic,
+	.kunmap_atomic = NULL, /* optional */
+	.kmap = de_coherent_kmap,
+	.kunmap = de_coherent_kunmap, /* optional */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+	.map_atomic = de_coherent_kmap_atomic,
+	.unmap_atomic = NULL, /* optional */
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	.map = de_coherent_kmap,
+	.unmap = de_coherent_kunmap, /* optional */
+#endif
+#endif
+	.mmap = de_coherent_mmap,
+	.vmap = de_coherent_vmap,
+	.vunmap = de_coherent_vunmap,
+};
+
+int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
+{
+	struct buffer *buffer;
+	struct dma_buf *dma_buf;
+	int ret;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+
+	pr_info("%s:coherent size %zu\n", __func__, size);
+
+	buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
+	if (!buffer) {
+		pr_err("%s:coherent failed to allocate buffer\n", __func__);
+		return -ENOMEM;
+	}
+	buffer->size = size;
+
+	buffer->vaddr = dma_alloc_coherent(NULL, size, &buffer->handle,
+						 MEMORY_ALLOCATION_FLAGS);
+	if (!buffer->vaddr) {
+		pr_err("%s:coherent dma_alloc_coherent failed for size %zu\n",
+					 __func__, size);
+		ret = -ENOMEM;
+		goto dma_alloc_coherent_failed;
+	}
+#ifdef CONFIG_X86
+	set_memory_wc((unsigned long)buffer->vaddr,
+					(buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
+#endif
+
+	buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!buffer->sg_table) {
+		pr_err("%s:coherent failed to allocate sg_table\n", __func__);
+		ret = -ENOMEM;
+		goto sg_table_malloc_failed;
+	}
+
+	ret = sg_alloc_table(buffer->sg_table, 1, GFP_KERNEL);
+	if (ret) {
+		pr_err("%s:coherent sg_alloc_table failed\n", __func__);
+		goto sg_alloc_table_failed;
+	}
+	sg_set_page(buffer->sg_table->sgl, virt_to_page(buffer->vaddr),
+				PAGE_ALIGN(size), 0);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
+#else
+	exp_info.ops = &dmabuf_ops;
+	exp_info.size = size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+	exp_info.resv = NULL;
+	dma_buf = dma_buf_export(&exp_info);
+#endif
+	if (IS_ERR(dma_buf)) {
+		pr_err("%s:coherent dma_buf_export failed\n", __func__);
+		ret = PTR_ERR(dma_buf);
+		goto dma_buf_export_failed;
+	}
+
+	dma_buf->priv = buffer;
+	*private_data = dma_buf;
+
+	pr_info("%s:coherent phys address 0x%llx virtual addr %p size %zu\n",
+		__func__, (unsigned long long int)buffer->handle,
+		buffer->vaddr, size);
+	return 0;
+
+dma_buf_export_failed:
+	sg_free_table(buffer->sg_table);
+sg_alloc_table_failed:
+	kfree(buffer->sg_table);
+sg_table_malloc_failed:
+#ifdef CONFIG_X86
+	set_memory_wb((unsigned long)buffer->vaddr,
+					(buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
+#endif
+	dma_free_coherent(NULL, size, buffer->vaddr, buffer->handle);
+dma_alloc_coherent_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	struct dma_buf *dma_buf = private_data;
+	struct buffer *buffer = dma_buf->priv;
+	int ret;
+
+	pr_debug("%s:coherent %p\n", __func__, dma_buf);
+
+	get_dma_buf(dma_buf);
+	ret = dma_buf_fd(dma_buf, flags);
+	if (ret < 0) {
+		pr_err("%s:coherent dma_buf_fd failed\n", __func__);
+		dma_buf_put(dma_buf);
+		return ret;
+	}
+
+	pr_info("%s:coherent phys address 0x%llx export fd %d\n",
+		__func__, (unsigned long long int)buffer->handle, ret);
+	return ret;
+}
+
+void de_heap_buffer_free(void *private_data)
+{
+	struct dma_buf *dma_buf = private_data;
+	struct buffer *buffer = dma_buf->priv;
+
+	pr_info("%s:coherent phys address 0x%llx\n",
+		__func__, (unsigned long long int)buffer->handle);
+
+	dma_buf_put(dma_buf);
+}
+
+int de_heap_heap_init(void)
+{
+	pr_info("%s:coherent\n", __func__);
+	return 0;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:coherent\n", __func__);
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 212 - 0
driver/dmabuf_exporter/de_heap_ion.c

@@ -0,0 +1,212 @@
+/*
+ * de_heap_ion.c
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include "de_heap.h"
+#include "de_heap_ion.h"
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))
+static struct ion_client *dmabuf_ion_client;
+int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
+{
+	struct ion_handle *ion_handle;
+	unsigned int heap_mask;
+	unsigned int heap_flags;
+
+	pr_info("%s:check %zu\n", __func__, size);
+	pr_debug("%s:ion size %zu\n", __func__, size);
+
+	heap_mask = de_heap_ion_get_heap_mask();
+	pr_info("%s: heap mask = %x\n", __func__, heap_mask);
+	heap_flags = de_heap_ion_get_heap_flags();
+	pr_info("%s: heap flags = %x\n", __func__, heap_flags);
+	pr_info("%s:ion_alloc dmabuf_ion_client = %lx, size = %i, , align = %i \n", __func__, (long unsigned int)dmabuf_ion_client, (int)size, (int)align);
+	ion_handle = ion_alloc(dmabuf_ion_client, size, align,
+			       heap_mask, heap_flags);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		pr_err("%s:ion ion_alloc failed, ion_handle = %li\n", __func__, PTR_ERR(ion_handle));
+		if (IS_ERR(ion_handle)) {
+			return PTR_ERR(ion_handle);
+		} else {
+			return -ENOMEM;
+		}
+	} else {
+		pr_info("%s:ion handle %p size %zu\n", __func__, ion_handle, size);
+	}
+
+	*private_data = ion_handle;
+
+	return 0;
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	struct ion_handle *ion_handle = private_data;
+	int ret;
+
+	pr_debug("%s:ion\n", __func__);
+
+	ret = ion_share_dma_buf_fd(dmabuf_ion_client, ion_handle);
+	if (ret < 0) {
+		pr_err("%s:ion ion_share_dma_buf_fd failed\n", __func__);
+		return ret;
+	}
+
+	pr_info("%s:ion handle %p export fd %d\n", __func__, ion_handle, ret);
+	return ret;
+}
+
+void de_heap_buffer_free(void *private_data)
+{
+	struct ion_handle *ion_handle = private_data;
+
+	pr_info("%s:ion handle %p\n", __func__, ion_handle);
+
+	ion_free(dmabuf_ion_client, ion_handle);
+}
+
+int de_heap_heap_init(void)
+{
+	pr_info("%s:ion\n", __func__);
+
+	dmabuf_ion_client = de_heap_ion_create_ion_client();
+	pr_err("%s:dmabuf_ion_client = %li \n", __func__, (long)dmabuf_ion_client);
+	if (!dmabuf_ion_client) {
+		pr_err("%s:ion failed to get an ion client %lx \n", __func__, (long)dmabuf_ion_client);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:ion\n", __func__);
+
+	de_heap_ion_destroy_ion_client(dmabuf_ion_client);
+}
+
+
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)))
+
+int de_heap_heap_init(void)
+{
+	pr_info("%s:ion\n", __func__);
+
+	return 0;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:ion\n", __func__);
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	pr_info("%s:\n", __func__);
+	return 0;
+}
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)))
+
+static struct ion_client *dmabuf_ion_client;
+int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
+{
+	struct ion_handle *ion_handle;
+	unsigned int heap_mask;
+	unsigned int heap_flags;
+
+	pr_info("%s:check %zu\n", __func__, size);
+	pr_debug("%s:ion size %zu\n", __func__, size);
+
+	heap_mask = de_heap_ion_get_heap_mask();
+	pr_info("%s: heap mask = %x\n", __func__, heap_mask);
+	heap_flags = de_heap_ion_get_heap_flags();
+	pr_info("%s: heap flags = %x\n", __func__, heap_flags);
+	pr_info("%s:ion_alloc dmabuf_ion_client = %lx, size = %i, , align = %i \n", __func__, (long unsigned int)dmabuf_ion_client, (int)size, (int)align);
+	ion_handle = ion_alloc(dmabuf_ion_client, size, align,
+			       heap_mask, heap_flags);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		pr_err("%s:ion ion_alloc failed, ion_handle = %li\n", __func__, PTR_ERR(ion_handle));
+		if (IS_ERR(ion_handle)) {
+			return PTR_ERR(ion_handle);
+		} else {
+			return -ENOMEM;
+		}
+	} else {
+		pr_info("%s:ion handle %p size %zu\n", __func__, ion_handle, size);
+	}
+
+	*private_data = ion_handle;
+
+	return 0;
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	struct ion_handle *ion_handle = private_data;
+	int ret;
+
+	pr_debug("%s:ion\n", __func__);
+
+	ret = ion_share_dma_buf_fd(dmabuf_ion_client, ion_handle);
+	if (ret < 0) {
+		pr_err("%s:ion ion_share_dma_buf_fd failed\n", __func__);
+		return ret;
+	}
+
+	pr_info("%s:ion handle %p export fd %d\n", __func__, ion_handle, ret);
+	return ret;
+}
+
+void de_heap_buffer_free(void *private_data)
+{
+	struct ion_handle *ion_handle = private_data;
+
+	pr_info("%s:ion handle %p\n", __func__, ion_handle);
+
+	ion_free(dmabuf_ion_client, ion_handle);
+}
+
+int de_heap_heap_init(void)
+{
+	pr_info("%s:ion\n", __func__);
+
+	dmabuf_ion_client = de_heap_ion_create_ion_client();
+	pr_err("%s:dmabuf_ion_client = %li \n", __func__, (long)dmabuf_ion_client);
+	if (!dmabuf_ion_client) {
+		pr_err("%s:ion failed to get an ion client %lx \n", __func__, (long)dmabuf_ion_client);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:ion\n", __func__);
+
+	de_heap_ion_destroy_ion_client(dmabuf_ion_client);
+}
+
+
+#else
+#error "kernel not supported"
+#endif
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 46 - 0
driver/dmabuf_exporter/de_heap_ion.h

@@ -0,0 +1,46 @@
+/*
+ * de_heap_ion.h
+ *
+ * platform specific interface for de_heap_ion
+ *
+ */
+
+#ifndef DE_HEAP_ION_H
+#define DE_HEAP_ION_H
+
+#include <linux/version.h>
+
+/*
+ * gcc preprocessor defines "linux" as "1".
+ * [ http://stackoverflow.com/questions/19210935 ]
+ * IMG_KERNEL_ION_HEADER can be <linux/ion.h>, which expands to <1/ion.h>
+ */
+#undef linux
+#include IMG_KERNEL_ION_HEADER
+
+/*
+ * fetch the ion heap number (argument to ion_alloc)
+ */
+unsigned int de_heap_ion_get_heap_mask(void);
+
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)))
+unsigned int de_heap_ion_get_heap_id_mask(void);
+#endif
+
+/*
+ * fetch the ion flags (argument to ion_alloc)
+ */
+unsigned int de_heap_ion_get_heap_flags(void);
+
+/*
+ * fetch an ion client instance
+ *
+ * the implementation of this usually depends on ion_device (ion_client_create)
+ * which is platform specific
+ */
+struct ion_client *de_heap_ion_create_ion_client(void);
+
+struct ion_client *de_heap_ion_destroy_ion_client(struct ion_client *dmabuf_ion_client);
+
+#endif /* DE_HEAP_ION_H */

+ 205 - 0
driver/dmabuf_exporter/de_heap_ion_example.c

@@ -0,0 +1,205 @@
+/*
+ * de_heap_ion_example.c
+ */
+
+#include "de_heap_ion.h"
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+
+#undef linux
+#include IMG_KERNEL_ION_HEADER
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))
+#include IMG_KERNEL_ION_PRIV_HEADER
+#endif
+#define linux 1
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))
+static struct ion_device *idev;
+static struct ion_client *client=NULL;
+static struct ion_heap **heaps;
+#endif
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))
+static void *carveout_ptr;
+static void *chunk_ptr;
+
+static struct ion_platform_heap dummy_heaps[] = {
+		{
+			.id	= ION_HEAP_TYPE_SYSTEM,
+			.type	= ION_HEAP_TYPE_SYSTEM,
+			.name	= "system",
+		},
+		{
+			.id	= ION_HEAP_TYPE_SYSTEM_CONTIG,
+			.type	= ION_HEAP_TYPE_SYSTEM_CONTIG,
+			.name	= "system contig",
+		},
+		{
+			.id	= ION_HEAP_TYPE_CARVEOUT,
+			.type	= ION_HEAP_TYPE_CARVEOUT,
+			.name	= "carveout",
+			.size	= SZ_4M,
+		},
+		{
+			.id	= ION_HEAP_TYPE_CHUNK,
+			.type	= ION_HEAP_TYPE_CHUNK,
+			.name	= "chunk",
+			.size	= SZ_4M,
+			.align	= SZ_16K,
+			.priv	= (void *)(SZ_16K),
+		},
+};
+
+static struct ion_platform_data dummy_ion_pdata = {
+	.nr = ARRAY_SIZE(dummy_heaps),
+	.heaps = dummy_heaps,
+};
+#endif
+
+unsigned int de_heap_ion_get_heap_mask(void)
+{
+	return 0x42;
+}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)))
+unsigned int de_heap_ion_get_heap_id_mask(void)
+{
+	return 1<<ION_HEAP_TYPE_SYSTEM;
+}
+#endif
+
+unsigned int de_heap_ion_get_heap_flags(void)
+{
+	return 0;
+}
+
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))
+struct ion_client *de_heap_ion_create_ion_client(void)
+{
+	int i, err;
+	pr_info("%s:\n", __func__);
+	/*
+	 * usually involves fetching an ion_device from the system
+	 * and calling ion_client_create()
+	 */
+	pr_info("%s:check ion_device_create \n", __func__);
+	idev = ion_device_create(NULL);
+	if (IS_ERR(idev)) {
+		pr_err("%s:ion ion_device_create failed %li \n", __func__, PTR_ERR(idev));
+		return (struct ion_client *)idev;
+	}
+
+	pr_info("%s:check kcalloc \n", __func__);
+	heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
+			GFP_KERNEL);
+	pr_info("%s:heaps = %lx \n", __func__, (long unsigned int)heaps);
+	if (!heaps) {
+		pr_err("%s:ion kcalloc heaps = %lx \n", __func__, (long unsigned int)heaps);
+		ion_device_destroy(idev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Allocate a dummy carveout heap */
+	carveout_ptr = alloc_pages_exact(dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+				GFP_KERNEL);
+	if (carveout_ptr) {
+		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =	virt_to_phys(carveout_ptr);
+	}	else {
+		pr_err("ion_dummy: Could not allocate carveout\n");
+	}
+
+	/* Allocate a dummy chunk heap */
+	chunk_ptr = alloc_pages_exact(dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+				GFP_KERNEL);
+	if (chunk_ptr) {
+		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+	} else {
+		pr_err("ion_dummy: Could not allocate chunk\n");
+	}
+
+	for (i = 0; i < dummy_ion_pdata.nr; i++) {
+		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && !heap_data->base) {
+			pr_info("ion_dummy: ION_HEAP_TYPE_CARVEOUT skipped heap_data->base == %lx \n", heap_data->base);
+			continue;
+		}
+
+		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) {
+			pr_info("ion_dummy: ION_HEAP_TYPE_CHUNK skipped heap_data->base == %lx \n", heap_data->base);
+			continue;
+		}
+
+		heaps[i] = ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			pr_info("ion_dummy: ion_heap_create failed, returned = %lx, for heap id = %d \n", (unsigned long)(heaps[i]), i);
+			err = PTR_ERR(heaps[i]);
+			goto err;
+		}
+		ion_device_add_heap(idev, heaps[i]);
+	}
+
+	pr_info("%s:ion ion_device_create success idev = %lx \n", __func__, (long)idev);
+	client = ion_client_create(idev, "ion_client");
+	if (IS_ERR_OR_NULL(client)) {
+		pr_info("%s:ion ion_client_create failed idev = %lx client = %li\n", __func__, (long)idev, PTR_ERR(client));
+		ion_device_destroy(idev);
+		return (client);
+	}
+
+	return client;
+
+err:
+	for (i = 0; i < dummy_ion_pdata.nr; ++i)
+		ion_heap_destroy(heaps[i]);
+	kfree(heaps);
+
+	if (carveout_ptr) {
+		free_pages_exact(carveout_ptr,
+				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+		carveout_ptr = NULL;
+	}
+	if (chunk_ptr) {
+		free_pages_exact(chunk_ptr,
+				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+		chunk_ptr = NULL;
+	}
+	ion_device_destroy(idev);
+	return ERR_PTR(err);
+
+	return client;
+}
+
+/*
+ * returns error code or success (0)
+ */
+struct ion_client *de_heap_ion_destroy_ion_client(struct ion_client *dmabuf_ion_client)
+{
+	pr_info("%s:de_heap_ion_destroy_ion_client \n", __func__);
+	if (IS_ERR(idev) || idev == NULL) {
+		pr_err("%s:ion device not present idev = %li \n", __func__, (idev==NULL) ? (long)NULL : PTR_ERR(idev));
+		return (struct ion_client *)(idev);
+	}
+	if (dmabuf_ion_client>0) {
+		ion_client_destroy(dmabuf_ion_client);
+	}
+	ion_device_destroy(idev);
+	idev = NULL;
+	return NULL;
+}
+
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)))
+#else
+#error "Linux kernel not supported"
+#endif
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 496 - 0
driver/dmabuf_exporter/de_heap_noncoherent.c

@@ -0,0 +1,496 @@
+/*
+ * de_heap_noncoherent.c
+ */
+
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_X86
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+#include <asm/cacheflush.h>
+#else
+#include <asm/set_memory.h>
+#endif
+#endif /* CONFIG_X86 */
+
+#include "de_heap.h"
+
+#define MEMORY_ALLOCATION_FLAGS (GFP_DMA32 | __GFP_ZERO)
+
+enum mem_cache_type {
+	MEM_TYPE_CACHED        = 1,
+	MEM_TYPE_UNCACHED      = 2,
+	MEM_TYPE_WRITECOMBINE  = 3,
+};
+
+static unsigned int cache_type = MEM_TYPE_WRITECOMBINE;
+module_param(cache_type, uint, 0444);
+MODULE_PARM_DESC(cache_type,
+		"Memory cache type: 1-cached, 2-uncached, 3-writecombine");
+
+struct buffer {
+	size_t size;
+	void *vaddr;
+	struct sg_table *sg_table;
+	enum dma_data_direction dma_dir;
+	struct device* client;
+	int fd; /* Just for tracking */
+};
+
+/*
+ * dmabuf ops
+ */
+static void de_noncoherent_kunmap(struct dma_buf *buf, unsigned long page,
+						 void *vaddr);
+
+static void de_noncoherent_release(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+	struct scatterlist *sgl;
+
+	pr_info("%s fd:%d\n", __func__, buffer->fd);
+
+	if (unlikely(buffer->vaddr))
+		de_noncoherent_kunmap(buf, 0, buffer->vaddr);
+
+	sgl = buffer->sg_table->sgl;
+	while (sgl) {
+		struct page *page = sg_page(sgl);
+
+		if (page) {
+#ifdef CONFIG_X86
+			set_memory_wb((unsigned long)page_address(page), 1);
+#endif
+			__free_page(page);
+		}
+		sgl = sg_next(sgl);
+	}
+	sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+
+	kfree(buffer);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+static void *de_noncoherent_kmap_atomic(struct dma_buf *buf, unsigned long page)
+{
+	pr_debug("%s\n", __func__);
+
+	return NULL;
+}
+#endif
+
+static struct sg_table *de_noncoherent_map_dma(struct dma_buf_attachment *attach,
+							enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+	struct scatterlist *sgl = buffer->sg_table->sgl;
+
+	pr_info("%s\n", __func__);
+
+	if (buffer->client) {
+		pr_err("%s client already attached!\n", __func__);
+		return NULL;
+	}
+
+	/* We are only checking if buffer is mapable */
+	while (sgl) {
+		struct page *page  = sg_page(sgl);
+		dma_addr_t dma_addr;
+
+		pr_debug("%s:%d phys %#llx length %d\n",
+			__func__, __LINE__,
+			(unsigned long long)sg_phys(sgl), sgl->length);
+
+		if(!page)
+			WARN_ONCE(1, "Page does not exist!");
+
+		dma_addr = dma_map_page(attach->dev, page, 0, PAGE_SIZE,
+					DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(attach->dev, dma_addr)) {
+				pr_err("%s dma_map_page failed!\n", __func__);
+				return NULL;;
+		}
+		dma_unmap_page(attach->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+#ifdef CONFIG_X86
+		{
+			if (cache_type == MEM_TYPE_CACHED)
+				set_memory_wb((unsigned long)page_address(page), 1);
+			else if (cache_type == MEM_TYPE_WRITECOMBINE)
+				set_memory_wc((unsigned long)page_address(page), 1);
+			else if (cache_type == MEM_TYPE_UNCACHED)
+				set_memory_uc((unsigned long)page_address(page), 1);
+		}
+#endif
+		sgl = sg_next(sgl);
+	}
+	buffer->client = attach->dev;
+
+	return buffer->sg_table;
+}
+
+static void de_noncoherent_unmap_dma(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+
+	pr_info("%s\n", __func__);
+
+	buffer->client = NULL;
+}
+
+static int de_noncoherent_begin_cpu_access(struct dma_buf *dmabuf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+					size_t start, size_t len,
+#endif
+					enum dma_data_direction direction)
+{
+	struct buffer *buffer = dmabuf->priv;
+	struct sg_table *sgt = buffer->sg_table;
+	int ret;
+
+	pr_info("%s\n", __func__);
+
+	if (!buffer->client) {
+		pr_err("%s client is NULL\n", __func__);
+		return -EFAULT;
+	}
+
+	if (buffer->dma_dir == DMA_NONE) {
+		ret = dma_map_sg(buffer->client, sgt->sgl, sgt->orig_nents,
+				direction);
+		if (ret <= 0) {
+			pr_err("%s dma_map_sg failed!\n", __func__);
+			return -EFAULT;
+		}
+		sgt->nents = ret;
+		buffer->dma_dir = direction;
+	}
+
+	if (buffer->dma_dir == DMA_FROM_DEVICE)
+		dma_sync_sg_for_cpu(buffer->client, sgt->sgl, sgt->orig_nents,
+						DMA_FROM_DEVICE);
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void de_noncoherent_end_cpu_access(struct dma_buf *dmabuf,
+					size_t start, size_t len,
+					enum dma_data_direction direction)
+#else
+static int de_noncoherent_end_cpu_access(struct dma_buf *dmabuf,
+					enum dma_data_direction direction)
+#endif
+{
+	struct buffer *buffer = dmabuf->priv;
+	struct sg_table *sgt = buffer->sg_table;
+
+	pr_info("%s\n", __func__);
+
+	if (!buffer->client) {
+		pr_err("%s client is NULL\n", __func__);
+		goto exit;
+	}
+
+	if (buffer->dma_dir == DMA_NONE)
+		goto exit;
+
+	if (buffer->dma_dir == DMA_TO_DEVICE)
+		dma_sync_sg_for_cpu(buffer->client, sgt->sgl, sgt->orig_nents,
+					DMA_TO_DEVICE);
+
+	dma_unmap_sg(buffer->client, sgt->sgl,
+			sgt->orig_nents, buffer->dma_dir);
+
+	buffer->dma_dir = DMA_NONE;
+exit:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+	return 0;
+#endif
+	;
+}
+
+static int de_noncoherent_mmap(struct dma_buf *dmabuf,
+					struct vm_area_struct *vma)
+{
+	struct buffer *buffer = dmabuf->priv;
+	struct scatterlist *sgl = buffer->sg_table->sgl;
+	unsigned long addr;
+
+	pr_debug("%s\n", __func__);
+
+	/* pgprot_t cached by default */
+	if (cache_type == MEM_TYPE_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else if (cache_type == MEM_TYPE_UNCACHED)
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	addr = vma->vm_start;
+	while (sgl) {
+		dma_addr_t phys = sg_phys(sgl); /* sg_dma_address ? */
+		unsigned long pfn = phys >> PAGE_SHIFT;
+		unsigned int len = sgl->length;
+		int ret;
+
+		ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
+		if (ret)
+			return ret; 
+
+		addr += len;
+		sgl = sg_next(sgl);
+	}
+
+	return 0;
+}
+
+static void *de_noncoherent_kmap(struct dma_buf *dma_buf, unsigned long page)
+{
+	struct buffer *buffer = dma_buf->priv;
+	struct scatterlist *sgl = buffer->sg_table->sgl;
+	unsigned int num_pages = sg_nents(sgl);
+	struct page **pages;
+	pgprot_t prot;
+	int i;
+
+	pr_debug("%s\n", __func__);
+
+	/* NOTE: Ignoring pages param, we have the info info sgt */
+	if (buffer->vaddr)
+		return buffer->vaddr;
+
+	pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		pr_err("%s failed to allocate memory for pages\n", __func__);
+		return NULL;
+	}
+
+	prot = PAGE_KERNEL;
+	/* CACHED by default */
+	if (cache_type == MEM_TYPE_WRITECOMBINE)
+		prot = pgprot_writecombine(prot);
+	else if (cache_type == MEM_TYPE_UNCACHED)
+		prot = pgprot_noncached(prot);
+
+	i = 0;
+	while (sgl) {
+		pages[i++] = sg_page(sgl);
+		sgl = sg_next(sgl);
+	}
+
+	buffer->vaddr = vmap(pages, num_pages, VM_MAP, prot);
+	kfree(pages);
+
+	return buffer->vaddr;
+}
+
+static void de_noncoherent_kunmap(struct dma_buf *buf, unsigned long page,
+						 void *vaddr)
+{
+	struct buffer *buffer = buf->priv;
+
+	pr_debug("%s\n", __func__);
+
+	if (buffer->vaddr != vaddr || !buffer->vaddr) {
+		pr_warn("%s called with wrong address %p != %p\n",
+				__func__, vaddr, buffer->vaddr);
+		return;
+	}
+
+	vunmap(buffer->vaddr);
+	buffer->vaddr = NULL;
+}
+
+static void *de_noncoherent_vmap(struct dma_buf *buf)
+{
+	return de_noncoherent_kmap(buf, 0);
+}
+
+static void de_noncoherent_vunmap(struct dma_buf *buf, void *kptr)
+{
+	de_noncoherent_kunmap(buf, 0, kptr);
+}
+
+static const struct dma_buf_ops dmabuf_ops = {
+	.attach = NULL, /* optional */
+	.detach = NULL, /* optional */
+	.map_dma_buf = de_noncoherent_map_dma,
+	.unmap_dma_buf = de_noncoherent_unmap_dma,
+	.release = de_noncoherent_release,
+	.begin_cpu_access = de_noncoherent_begin_cpu_access,
+	.end_cpu_access = de_noncoherent_end_cpu_access,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+	.kmap_atomic = de_noncoherent_kmap_atomic,
+	.kunmap_atomic = NULL, /* optional */
+	.kmap = de_noncoherent_kmap,
+	.kunmap = de_noncoherent_kunmap, /* optional */
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+	.map_atomic = de_noncoherent_kmap_atomic,
+	.unmap_atomic = NULL, /* optional */
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	.map = de_noncoherent_kmap,
+	.unmap = de_noncoherent_kunmap, /* optional */
+#endif
+#endif
+	.mmap = de_noncoherent_mmap,
+	.vmap = de_noncoherent_vmap,
+	.vunmap = de_noncoherent_vunmap,
+};
+
+int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
+{
+	struct buffer *buffer;
+	struct dma_buf *dma_buf;
+	struct scatterlist *sgl;
+	int ret;
+	int pages;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+
+	pr_info("%s:noncoherent size %zu\n", __func__, size);
+
+	buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
+	if (!buffer) {
+		pr_err("%s:noncoherent failed to allocate buffer\n", __func__);
+		return -ENOMEM;
+	}
+	buffer->size = size;
+
+	buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!buffer->sg_table) {
+		pr_err("%s:noncoherent failed to allocate sg_table\n", __func__);
+		ret = -ENOMEM;
+		goto sg_table_malloc_failed;
+	}
+
+	pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+	ret = sg_alloc_table(buffer->sg_table, pages, GFP_KERNEL);
+	if (ret) {
+		pr_err("%s:noncoherent sg_alloc_table failed\n", __func__);
+		goto sg_alloc_table_failed;
+	}
+
+	sgl = buffer->sg_table->sgl;
+	while (sgl) {
+		struct page *page;
+
+		page = alloc_page(MEMORY_ALLOCATION_FLAGS);
+		if (!page) {
+			pr_err("%s alloc_page failed!\n", __func__);
+			ret = -ENOMEM;
+			goto alloc_page_failed;
+		}
+
+		sg_set_page(sgl, page, PAGE_SIZE, 0);
+		sgl = sg_next(sgl);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
+	dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
+#else
+	exp_info.ops = &dmabuf_ops;
+	exp_info.size = size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+	exp_info.resv = NULL;
+	dma_buf = dma_buf_export(&exp_info);
+#endif
+	if (IS_ERR(dma_buf)) {
+		pr_err("%s:noncoherent dma_buf_export failed\n", __func__);
+		ret = PTR_ERR(dma_buf);
+		goto dma_buf_export_failed;
+	}
+
+	buffer->dma_dir = DMA_NONE;
+	dma_buf->priv = buffer;
+	*private_data = dma_buf;
+
+	pr_info("%s:noncoherent size %zu\n",
+		__func__, size);
+	return 0;
+
+alloc_page_failed:
+	sgl = buffer->sg_table->sgl;
+	while (sgl) {
+		struct page *page = sg_page(sgl);
+
+		if (page) {
+#ifdef CONFIG_X86
+			set_memory_wb((unsigned long)page_address(page), 1);
+#endif
+			__free_page(page);
+		}
+		sgl = sg_next(sgl);
+	}
+dma_buf_export_failed:
+	sg_free_table(buffer->sg_table);
+sg_alloc_table_failed:
+	kfree(buffer->sg_table);
+sg_table_malloc_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int de_heap_export_fd(void *private_data, unsigned long flags)
+{
+	struct dma_buf *dma_buf = private_data;
+	struct buffer *buffer = dma_buf->priv;
+	int ret;
+
+	pr_debug("%s:noncoherent %p\n", __func__, dma_buf);
+
+	get_dma_buf(dma_buf);
+	buffer->fd = ret = dma_buf_fd(dma_buf, flags);
+	if (ret < 0) {
+		pr_err("%s:noncoherent dma_buf_fd failed\n", __func__);
+		dma_buf_put(dma_buf);
+		return ret;
+	}
+
+	pr_info("%s:noncoherent export fd %d\n",
+		__func__, ret);
+	return ret;
+}
+
+void de_heap_buffer_free(void *private_data)
+{
+	struct dma_buf *dma_buf = private_data;
+	struct buffer *buffer = dma_buf->priv;
+
+	pr_info("%s:noncoherent fd:%d\n", __func__, buffer->fd);
+
+	dma_buf_put(dma_buf);
+}
+
+int de_heap_heap_init(void)
+{
+	pr_info("%s:noncoherent cache_type:%d\n", __func__, cache_type);
+	return 0;
+}
+
+void de_heap_heap_deinit(void)
+{
+	pr_info("%s:noncoherent\n", __func__);
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 77 - 0
driver/dmabuf_exporter/test/dma-map.c

@@ -0,0 +1,77 @@
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <dmabuf_exporter.h>
+
+int main(int argc, char **argv)
+{
+    int fd;
+    int buff_fd;
+    unsigned long bufSize;
+    void *uptr;
+    int i;
+
+    if (argc < 2) {
+        printf("%s buffersize\n", argv[0]);
+        return 1;
+    }
+    bufSize = atoi(argv[1]);
+
+    fd = open("/dev/dmabuf", O_RDWR);
+    if (fd < 0) {
+        perror("open");
+        return 1;
+    }
+
+    if (ioctl(fd, DMABUF_IOCTL_CREATE, bufSize)) {
+        perror("ioctl DMABUF_IOCTL_CREATE");
+        return 1;
+    }
+
+    buff_fd = ioctl(fd, DMABUF_IOCTL_EXPORT, 0);
+    if (buff_fd < 0) {
+        printf("error exporting\n");
+        return 1;
+    }
+    printf("export fd : %d\n", buff_fd);
+
+    uptr = mmap(NULL, bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, buff_fd, 0);
+    if (uptr == MAP_FAILED) {
+        perror("mmap");
+        return 1;
+    }
+    printf("mapped to %p\n", uptr);
+
+    for (i = 0; getpagesize() * i < bufSize; i++) {
+        int *iptr = (int *)((uintptr_t)uptr + getpagesize() * i);
+        int val = (0xbeef << 16) | i;
+        
+        printf("write [%p] = %x\n", iptr, val);
+        *iptr = val;
+    }
+    for (i = 0; getpagesize() * i < bufSize; i++) {
+        int *iptr = (int *)((uintptr_t)uptr + getpagesize() * i);
+        int val = (0xbeef << 16) | i;
+
+        printf("read [%p] = %x [expected %x]\n", iptr, *iptr, val);
+        if (*iptr != val) {
+            printf("!!! ERROR !!!\n");
+            return 1;
+        }
+    }
+    sleep(3);
+
+    close(buff_fd);
+    close(fd);
+
+    return 0;
+}

+ 61 - 0
driver/dmabuf_exporter/test/dma-test.c

@@ -0,0 +1,61 @@
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <errno.h>
+#include <string.h>
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <dmabuf_exporter.h>
+
+int main(int argc, char **argv)
+{
+    int fd;
+    int buff_fd;
+    int nBuffs;
+    unsigned long bufSize;
+    int i;
+
+    if (argc < 3) {
+        printf("%s <# buffers> buffersize\n", argv[0]);
+        return 1;
+    }
+    nBuffs = atoi(argv[1]);
+    bufSize = atoi(argv[2]);
+
+    for (i = 0; i < nBuffs; i++) {
+        void *uptr;
+
+        fd = open("/dev/dmabuf", O_RDWR);
+        if (fd < 0) {
+            perror("open");
+            return 1;
+        }
+
+        if (ioctl(fd, DMABUF_IOCTL_CREATE, bufSize)) {
+            perror("ioctl DMABUF_IOCTL_CREATE");
+            return 1;
+        }
+
+        buff_fd = ioctl(fd, DMABUF_IOCTL_EXPORT, 0);
+
+        if (buff_fd < 0) {
+            printf("error exporting\n");
+            return 1;
+        }
+        printf("export fd : %d\n", buff_fd);
+
+        uptr = mmap(NULL, bufSize, PROT_READ | PROT_WRITE, MAP_SHARED, buff_fd, 0);
+        if (uptr == MAP_FAILED) {
+            perror("mmap");
+            return 1;
+        }
+        printf("mapped to %p\n", uptr);
+    }
+
+    sleep(5);
+    return 0;
+}

+ 20 - 0
driver/dmabuf_exporter/uapi/dmabuf_exporter.h

@@ -0,0 +1,20 @@
+/*
+  User API for dmabuf_exporter
+*/
+
+#ifndef _DMABUF_EXPORTER_H
+#define _DMABUF_EXPORTER_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define DMABUF_IOCTL_BASE   'D'
+#define DMABUF_IOCTL_CREATE _IOR(DMABUF_IOCTL_BASE, 0, unsigned long)
+#define DMABUF_IOCTL_EXPORT _IOR(DMABUF_IOCTL_BASE, 1, unsigned long)
+#ifdef CONFIG_COMPAT
+#define COMPAT_DMABUF_IOCTL_CREATE _IOR(DMABUF_IOCTL_BASE, 0, unsigned int)
+#define COMPAT_DMABUF_IOCTL_EXPORT _IOR(DMABUF_IOCTL_BASE, 1, unsigned int)
+#endif
+
+#endif /* _DMABUF_EXPORTER_H */
+

+ 136 - 0
driver/dmabuf_exporter/uapi/kernel_4x14/ion.h

@@ -0,0 +1,136 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ *				 carveout heap, allocations are physically
+ *				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
+ * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
+ *				 is used to identify the heaps, so only 32
+ *				 total heap types are supported
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CHUNK,
+	ION_HEAP_TYPE_DMA,
+	ION_HEAP_TYPE_CUSTOM, /*
+			       * must be last so device specific heaps always
+			       * are at the end of this enum
+			       */
+};
+
+#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+
+/*
+ * mappings of this buffer should be cached, ion will do cache maintenance
+ * when the buffer is mapped for dma
+ */
+#define ION_FLAG_CACHED 1
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:		size of the allocation
+ * @heap_id_mask:	mask of heap ids to allocate from
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to
+ *			refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	__u64 len;
+	__u32 heap_id_mask;
+	__u32 flags;
+	__u32 fd;
+	__u32 unused;
+};
+
+#define MAX_HEAP_NAME			32
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @heap_id - heap id for the heap
+ */
+struct ion_heap_data {
+	char name[MAX_HEAP_NAME];
+	__u32 type;
+	__u32 heap_id;
+	__u32 reserved0;
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+	__u32 cnt; /* Total number of heaps to be copied */
+	__u32 reserved0; /* align to 64bits */
+	__u64 heaps; /* buffer to be populated */
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * available Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 8, \
+					struct ion_heap_query)
+
+#endif /* _UAPI_LINUX_ION_H */

+ 203 - 0
driver/dmabuf_exporter/uapi/kernel_4x4/ion.h

@@ -0,0 +1,203 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ *				 carveout heap, allocations are physically
+ *				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
+ * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
+ *				 is used to identify the heaps, so only 32
+ *				 total heap types are supported
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CHUNK,
+	ION_HEAP_TYPE_DMA,
+	ION_HEAP_TYPE_CUSTOM, /*
+			       * must be last so device specific heaps always
+			       * are at the end of this enum
+			       */
+	ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1		/*
+					 * mappings of this buffer should be
+					 * cached, ion will do cache
+					 * maintenance when the buffer is
+					 * mapped for dma
+					*/
+#define ION_FLAG_CACHED_NEEDS_SYNC 2	/*
+					 * mappings of this buffer will created
+					 * at mmap time, if this is set
+					 * caches must be managed
+					 * manually
+					 */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:		size of the allocation
+ * @align:		required alignment of the allocation
+ * @heap_id_mask:	mask of heap ids to allocate from
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to
+ *			refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int heap_id_mask;
+	unsigned int flags;
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	ion_user_handle_t handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happen automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */

+ 3 - 0
driver/fenrir_loki/Makefile

@@ -0,0 +1,3 @@
+obj-${CONFIG_LOKI} += loki.o
+loki-y += loki-main.o
+loki-y += loki-intc.o

+ 159 - 0
driver/fenrir_loki/loki-intc.c

@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Code for Fenrir's Loki interrupt controller.
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#include "loki.h"
+
+
+static void loki_mask_irq(struct irq_data *d)
+{
+    struct loki_drvdata *pdata = irq_data_get_irq_chip_data(d);
+
+    u32 reg = pdata->readreg32(pdata, REG_LOKI_INTERRUPT_ENABLE);
+
+    pdata->writereg32(pdata, REG_LOKI_INTERRUPT_ENABLE, reg & ~(LOKI_INTERRUPT_DUT0));
+}
+
+static void loki_unmask_irq(struct irq_data *d)
+{
+    struct loki_drvdata *pdata = irq_data_get_irq_chip_data(d);
+
+    u32 reg = pdata->readreg32(pdata, REG_LOKI_INTERRUPT_ENABLE);
+    pdata->writereg32(pdata, REG_LOKI_INTERRUPT_ENABLE, reg | LOKI_INTERRUPT_DUT0);
+}
+
+static struct irq_chip fenrir_loki = {
+        .name		= "loki-intc",
+        .irq_mask	= loki_mask_irq,
+        .irq_unmask	= loki_unmask_irq,
+};
+
+static int loki_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+    struct irq_chip *chip = &fenrir_loki;
+
+    irq_domain_set_info(d, irq, hw, chip, d->host_data,
+                        handle_level_irq, NULL, NULL);
+    irq_set_status_flags(irq, IRQ_LEVEL);
+
+    return 0;
+}
+
+static irqreturn_t loki_isrcb(int irq, void *dev_id)
+{
+    struct platform_device *pdev = (struct platform_device *)dev_id;
+    struct loki_drvdata *pdata;
+    u32 reg, mask, timeout;
+
+    if (!pdev) {
+        pr_err("LOKI: pdev not set!?\n");
+        return IRQ_NONE;
+    }
+
+    pdata = platform_get_drvdata(pdev);
+
+    if (!pdata) {
+        pr_err("LOKI: pdata not set!?\n");
+        return IRQ_NONE;
+    }
+
+    reg = pdata->readreg32(pdata, REG_LOKI_INTERRUPT_STATUS);
+    mask = pdata->readreg32(pdata, REG_LOKI_INTERRUPT_ENABLE);
+    timeout = pdata->readreg32(pdata, REG_LOKI_INTERRUPT_TIMEOUT_CLR);
+
+    dev_dbg(&pdev->dev, "Got an interrupt. %X - %X - %X\n", reg, mask, timeout);
+
+    /* Check the timeout register just in case */
+    if (timeout != 0) {
+        dev_warn(&pdev->dev, "Interrupt timeout fired. Will need to be reset\n");
+    }
+
+    if (reg & mask) {
+        if (reg & LOKI_INTERRUPT_TESTINT) {
+            dev_warn(&pdev->dev, "Test interrupt fired! Was it on purpose?\n");
+            /* Disable the interrupt */
+            pdata->writereg32(pdata, REG_LOKI_INTERRUPT_TEST, 0);
+        }
+        else if (reg & LOKI_INTERRUPT_DUT0) {
+            int logical_irq_num;
+            /* trigger registered IRQ, if any */
+            logical_irq_num = irq_find_mapping(pdata->intc.domain, 0);
+            generic_handle_irq(logical_irq_num);
+        }
+
+        /* Clear interrupts */
+        pdata->writereg32(pdata, REG_LOKI_INTERRUPT_CLR, reg);
+
+        return IRQ_HANDLED;
+    }
+
+    return IRQ_NONE;
+}
+
+static const struct irq_domain_ops loki_intc_ops = {
+        .xlate = irq_domain_xlate_onecell,
+        .map = loki_intc_map,
+};
+
+int loki_intc_probe(struct platform_device *pdev)
+{
+    struct device *dev = &pdev->dev;
+    struct device_node *of_node = pdev->dev.of_node;
+    int ret = 0;
+    struct loki_drvdata *priv_data = platform_get_drvdata(pdev);;
+
+    dev_dbg(dev, "Going to register loki's intc...");
+
+    /* Setup the interrupt controller */
+    priv_data->writereg32(priv_data, REG_LOKI_INTERRUPT_ENABLE, LOKI_INTERRUPT_BASE);
+    priv_data->writereg32(priv_data, REG_LOKI_INTERRUPT_CLR, LOKI_INTERRUPT_BASE | LOKI_INTERRUPT_DUT0);
+    priv_data->writereg32(priv_data, REG_LOKI_INTERRUPT_TIMEOUT_CLR, 0x2);
+
+    priv_data->intc.irq_num = irq_of_parse_and_map(of_node, 0);
+    if (priv_data->intc.irq_num == 0) {
+        dev_err(dev, "Could not map IRQ\n");
+        ret = -ENXIO;
+        goto exit;
+    }
+
+    ret = devm_request_irq(dev, priv_data->intc.irq_num, &loki_isrcb, IRQF_SHARED, DEVICE_NAME, pdev);
+    if (ret) {
+        dev_err(dev, "Failed to request irq\n");
+        ret = -ENXIO;
+        goto exit;
+    }
+
+    priv_data->intc.domain = irq_domain_add_linear(of_node, 1, &loki_intc_ops, priv_data);
+    if (!priv_data->intc.domain) {
+        dev_err(dev, "Unable to create IRQ domain\n");
+        ret = -ENXIO;
+        goto exit;
+    }
+
+exit:
+    return ret;
+}
+
+int loki_intc_remove(struct platform_device *pdev)
+{
+    struct loki_drvdata *pdata = platform_get_drvdata(pdev);
+
+    irq_dispose_mapping(irq_find_mapping(pdata->intc.domain, 0));
+    irq_domain_remove(pdata->intc.domain);
+    return 0;
+}

+ 110 - 0
driver/fenrir_loki/loki-main.c

@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Code for Fenrir's Loki.
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include "loki.h"
+
+static inline unsigned int loki_readreg32(struct loki_drvdata *pdata, unsigned long offset)
+{
+    void __iomem *reg = (void __iomem *)pdata->regbase + offset;
+    return ioread32(reg);
+}
+
+static inline void loki_writereg32(struct loki_drvdata *pdata, unsigned long offset, int val)
+{
+    void __iomem *reg = (void __iomem *)pdata->regbase + offset;
+    iowrite32(val, reg);
+}
+
+static int loki_probe(struct platform_device *pdev)
+{
+    struct device *dev = &pdev->dev;
+    struct device_node *of_node = pdev->dev.of_node;
+    int ret = 0;
+    struct loki_drvdata *priv_data;
+    uint32_t memif_cache, memif_prot;
+
+    dev_dbg(dev, "Probe...");
+
+    priv_data = devm_kzalloc(dev, sizeof(struct loki_drvdata), GFP_KERNEL);
+    if (!priv_data) {
+        pr_err("Memory allocation error, aborting.\n");
+        ret = -ENOMEM;
+        goto exit;
+    }
+
+    priv_data->regbase = of_iomap(of_node, 0);
+    if (!priv_data->regbase) {
+        dev_err(dev, "Unable to map local interrupt registers\n");
+        ret = -ENXIO;
+        goto exit;
+    }
+
+    priv_data->writereg32 = loki_writereg32;
+    priv_data->readreg32 = loki_readreg32;
+
+
+    /* Reset the DUT */
+    priv_data->writereg32(priv_data, REG_LOKI_EXTERNAL_RESET, 0);
+    udelay(10);
+    priv_data->writereg32(priv_data, REG_LOKI_EXTERNAL_RESET, 1);
+
+    platform_set_drvdata(pdev, priv_data);
+
+    /* Get optional data from the Device Tree */
+    if (!of_property_read_u64(pdev->dev.of_node, "memif-cache",
+                              (uint64_t *)&memif_cache)) {
+        dev_info(dev, "Setting memif_cache to %X from the DT\n", memif_cache);
+    }
+    priv_data->writereg32(priv_data, REG_LOKI_MEMIF_CACHE_SET, memif_cache);
+
+    if (!of_property_read_u64(pdev->dev.of_node, "memif-prot",
+                              (uint64_t *)&memif_prot)) {
+        dev_info(dev, "Setting memif_prot to %X from the DT\n", memif_prot);
+    }
+    priv_data->writereg32(priv_data, REG_LOKI_MEMIF_PROT_SET, memif_prot);
+
+    loki_intc_probe(pdev);
+
+exit:
+    return ret;
+}
+
+static int loki_remove(struct platform_device *pdev)
+{
+    struct loki_drvdata *pdata = platform_get_drvdata(pdev);
+
+    loki_intc_remove(pdev);
+
+    return 0;
+}
+
+static const struct of_device_id loki_dt_ids[] = {
+        { .compatible = "img,loki", },
+        {},
+};
+MODULE_DEVICE_TABLE(of, loki_dt_ids);
+
+static struct platform_driver loki_device_driver = {
+        .probe		= loki_probe,
+        .remove		= loki_remove,
+        .driver		= {
+                .name	= DEVICE_NAME,
+                .of_match_table	= of_match_ptr(loki_dt_ids),
+        }
+};
+module_platform_driver(loki_device_driver);
+
+MODULE_AUTHOR("Imagination Technologies");
+MODULE_DESCRIPTION("Fenrir Loki driver");
+MODULE_LICENSE("GPL v2");

+ 44 - 0
driver/fenrir_loki/loki.h

@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fenrir's Loki header
+ */
+#ifndef LOKI_H
+#define LOKI_H
+
+#define DEVICE_NAME "loki_intc"
+
+#define REG_LOKI_EXTERNAL_RESET            (0x0084)
+#define REG_LOKI_INTERRUPT_STATUS          (0x0100)
+#define REG_LOKI_INTERRUPT_ENABLE          (0x0104)
+#define REG_LOKI_INTERRUPT_CLR             (0x010C)
+#define REG_LOKI_INTERRUPT_TEST            (0x0110)
+#define REG_LOKI_INTERRUPT_TIMEOUT_CLR     (0x0114)
+#define REG_LOKI_INTERRUPT_TIMEOUT         (0x0118)
+
+#define REG_LOKI_MEMIF_CACHE_SET           (0x0230)
+#define REG_LOKI_MEMIF_PROT_SET            (0x0234)
+
+/* interrupt bits definitions */
+#define LOKI_INTERRUPT_MASTER_ENABLE       (1 << 31)
+#define LOKI_INTERRUPT_TESTINT             (1 << 30)
+#define LOKI_INTERRUPT_DUT0                (1 <<  0)
+
+#define LOKI_INTERRUPT_BASE                 (LOKI_INTERRUPT_MASTER_ENABLE | LOKI_INTERRUPT_TESTINT)
+
+struct loki_intc_drvdata {
+    struct irq_domain *domain;
+    int irq_num;
+};
+
+struct loki_drvdata {
+    void __iomem *regbase;
+    unsigned int (*readreg32)(struct loki_drvdata *pdata, unsigned long offset);
+    void (*writereg32)(struct loki_drvdata *pdata, unsigned long offset, int val);
+
+    struct loki_intc_drvdata intc;
+};
+
+int loki_intc_probe(struct platform_device *pdev);
+int loki_intc_remove(struct platform_device *pdev);
+
+#endif /* LOKI_H */

+ 33 - 0
driver/img_mem/Makefile

@@ -0,0 +1,33 @@
+img_mem-y                           := img_mem_man.o
+img_mem-$(CONFIG_GENERIC_ALLOCATOR) += img_mem_carveout.o
+# removed building ION, kernel 4.14 ABI changes not implemented yet
+img_mem-$(CONFIG_ION)               += img_mem_ion.o
+img_mem-$(CONFIG_DMA_SHARED_BUFFER) += img_mem_dmabuf.o
+img_mem-y                           += img_mem_unified.o img_mem_coherent.o
+img_mem-y                           += img_mem_anonymous.o
+img_mem-y                           += img_mem_ocm.o
+img_mem-y                           += img_pdump.o
+
+
+ifeq ($(CONFIG_ION), y)
+CFLAGS_img_mem_ion.o += -Idrivers/staging/android/ion
+endif
+
+# IMGMMU: These should be obsoleted
+ccflags-y += -I$(src)/imgmmu/mmulib
+# IMGMMU: code
+img_mem-y                           += imgmmu/imgmmu.o
+img_mem-y                           += imgmmu/kernel_heap.o
+
+obj-$(CONFIG_VHA) += img_mem.o
+
+# Alias for backward compatibility
+CONFIG_HW_AX3_MC        := $(CONFIG_HW_MULTICORE)
+# Magna does not use multiple OSes approach
+ifeq ($(CONFIG_HW_AX3), y)
+  ifeq ($(CONFIG_HW_AX3_MC),)
+      ccflags-y += -DOSID=$(CONFIG_TARGET_OSID)
+  endif
+endif
+
+ccflags-y +=-DDEFAULT_SYMBOL_NAMESPACE=IMG_MEM

+ 395 - 0
driver/img_mem/img_mem_anonymous.c

@@ -0,0 +1,395 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_anonymous.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+static int trace_physical_pages;
+
+struct buffer_data {
+	struct sg_table *sgt;
+	enum img_mem_attr mattr;  /* memory attributes */
+};
+
+static int anonymous_heap_import(struct device *device, struct heap *heap,
+						size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+						struct buffer *buffer)
+{
+	struct buffer_data *data;
+	unsigned long cpu_addr = (unsigned long)buf_hnd;
+	struct sg_table *sgt;
+	struct page **pages;
+	struct scatterlist *sgl;
+	int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+	int ret;
+	int i;
+
+	pr_debug("%s:%d buffer %d (0x%p) cpu_addr %#lx for PID:%d\n",
+			__func__, __LINE__, buffer->id, buffer,
+			cpu_addr, task_pid_nr(current));
+
+	/* Check alignment */
+	if (cpu_addr & (PAGE_SIZE-1)) {
+		pr_err("%s wrong alignment of %#lx address!\n",
+				__func__, cpu_addr);
+		return -EFAULT;
+	}
+
+	pages = kmalloc_array(num_pages, sizeof(struct page *),
+			GFP_KERNEL | __GFP_ZERO);
+	if (!pages) {
+		pr_err("%s failed to allocate memory for pages\n", __func__);
+		return -ENOMEM;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+	down_read(&current->mm->mmap_sem);
+#else
+	down_read(&current->mm->mmap_lock);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+	ret = get_user_pages(
+			cpu_addr, num_pages,
+			FOLL_WRITE,
+			pages, NULL);
+#else
+	pr_err("%s get_user_pages not supported for this kernel version\n",
+				__func__);
+	ret = -1;
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+	up_read(&current->mm->mmap_sem);
+#else
+	up_read(&current->mm->mmap_lock);
+#endif
+	if (ret != num_pages) {
+		pr_err("%s failed to get_user_pages count:%d for %#lx address\n",
+				__func__, num_pages, cpu_addr);
+		ret = -ENOMEM;
+		goto get_user_pages_failed;
+	}
+
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt) {
+		ret = -ENOMEM;
+		goto alloc_sgt_failed;
+	}
+
+	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+	if (ret) {
+		pr_err("%s failed to allocate sgt with num_pages\n", __func__);
+		goto alloc_sgt_pages_failed;
+	}
+
+	data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!data) {
+		ret = -ENOMEM;
+		goto alloc_priv_failed;
+	}
+
+	for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
+		struct page *page = pages[i];
+		sg_set_page(sgl, page, PAGE_SIZE, 0);
+
+		/* Sanity check if physical address is
+		 * accessible from the device PoV */
+		if (~dma_get_mask(device) & sg_phys(sgl)) {
+			pr_err("%s physical address is out of dma_mask,"
+					" and probably won't be accessible by the core!\n",
+					__func__);
+			ret = -ERANGE;
+			goto dma_mask_check_failed;
+		}
+
+		if (trace_physical_pages)
+			pr_info("%s:%d phys %#llx length %d\n",
+				 __func__, __LINE__,
+				 (unsigned long long)sg_phys(sgl), sgl->length);
+	}
+
+	pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
+		 buffer->id, sgt->orig_nents);
+
+	data->sgt = sgt;
+	data->mattr = attr;
+	buffer->priv = data;
+
+	ret = dma_map_sg(buffer->device, sgt->sgl, sgt->orig_nents,
+				DMA_BIDIRECTIONAL);
+	if (ret <= 0) {
+		pr_err("%s dma_map_sg failed!\n", __func__);
+		goto dma_mask_check_failed;
+	}
+
+	kfree(pages);
+	return 0;
+
+dma_mask_check_failed:
+	kfree(data);
+alloc_priv_failed:
+	sg_free_table(sgt);
+alloc_sgt_pages_failed:
+	kfree(sgt);
+get_user_pages_failed:
+	for (i = 0; i < num_pages; i++)
+		if (pages[i])
+			put_page(pages[i]);
+alloc_sgt_failed:
+	kfree(pages);
+	return ret;
+}
+
+static void anonymous_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct sg_table *sgt = data->sgt;
+	struct scatterlist *sgl;
+	bool dirty = false;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	dma_unmap_sg(buffer->device, sgt->sgl, sgt->orig_nents,
+			DMA_BIDIRECTIONAL);
+
+	if (buffer->kptr) {
+		pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
+		dirty = true;
+		vunmap(buffer->kptr);
+		buffer->kptr = NULL;
+	}
+
+	sgl = sgt->sgl;
+	while (sgl) {
+		struct page *page = sg_page(sgl);
+		if (page) {
+			if (dirty)
+				set_page_dirty(page);
+			put_page(page);
+		}
+		sgl = sg_next(sgl);
+	}
+
+	sg_free_table(sgt);
+	kfree(sgt);
+	kfree(data);
+}
+
+static int anonymous_heap_map_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	struct scatterlist *sgl = sgt->sgl;
+	unsigned int num_pages = sg_nents(sgl);
+	struct page **pages;
+	pgprot_t prot;
+	int i;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	if (buffer->kptr) {
+		pr_warn("%s called for already mapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		pr_err("%s failed to allocate memory for pages\n", __func__);
+		return -ENOMEM;
+	}
+
+	prot = PAGE_KERNEL;
+	/* CACHED by default */
+	if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		prot = pgprot_writecombine(prot);
+	else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
+		prot = pgprot_noncached(prot);
+
+	i = 0;
+	while (sgl) {
+		pages[i++] = sg_page(sgl);
+		sgl = sg_next(sgl);
+	}
+
+	buffer->kptr = vmap(pages, num_pages, VM_MAP, prot);
+	kfree(pages);
+	if (!buffer->kptr) {
+		pr_err("%s vmap failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer->kptr);
+
+	return 0;
+}
+
+static int anonymous_heap_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct sg_table *sgt = data->sgt;
+	struct scatterlist *sgl;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	if (!buffer->kptr) {
+		pr_warn("%s called for unmapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	pr_debug("%s:%d buffer %d kunmap from 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer->kptr);
+	vunmap(buffer->kptr);
+	buffer->kptr = NULL;
+
+	sgl = sgt->sgl;
+	while (sgl) {
+		struct page *page = sg_page(sgl);
+		if (page) {
+			set_page_dirty(page);
+		}
+		sgl = sg_next(sgl);
+	}
+
+	return 0;
+}
+
+static int anonymous_get_sg_table(struct heap *heap, struct buffer *buffer,
+						 struct sg_table **sg_table, bool *use_sg_dma)
+{
+	struct buffer_data *data = buffer->priv;
+
+	*sg_table = data->sgt;
+	*use_sg_dma = false;
+	return 0;
+}
+
+static void anonymous_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		dma_sync_sg_for_device(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_TO_DEVICE);
+		dma_sync_sg_for_cpu(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_FROM_DEVICE);
+	}
+}
+
+static void anonymous_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		dma_sync_sg_for_cpu(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_FROM_DEVICE);
+	}
+}
+
+static void anonymous_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops anonymous_heap_ops = {
+	.alloc = NULL,
+	.import = anonymous_heap_import,
+	.free = anonymous_heap_free,
+	.map_um = NULL,
+	.unmap_um = NULL,
+	.map_km = anonymous_heap_map_km,
+	.unmap_km = anonymous_heap_unmap_km,
+	.get_sg_table = anonymous_get_sg_table,
+	.get_page_array = NULL,
+	.sync_cpu_to_dev = anonymous_sync_cpu_to_dev,
+	.sync_dev_to_cpu = anonymous_sync_dev_to_cpu,
+	.set_offset = NULL,
+	.destroy = anonymous_heap_destroy,
+};
+
+int img_mem_anonymous_init(const struct heap_config *heap_cfg, struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	heap->ops = &anonymous_heap_ops;
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 798 - 0
driver/img_mem/img_mem_carveout.c

@@ -0,0 +1,798 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_carveout.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-buf.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/genalloc.h>
+#include <linux/version.h>
+
+#include <asm/cacheflush.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+/* Default allocation order */
+#define POOL_ALLOC_ORDER_BASE PAGE_SHIFT
+
+struct heap_data {
+	struct gen_pool *pool;
+};
+
+struct buffer_data {
+	unsigned long addr; /* addr returned by genalloc */
+	uint64_t *addrs; /* array of physical addresses, upcast to 64-bit */
+	enum img_mem_attr mattr;  /* memory attributes */
+	struct vm_area_struct *mapped_vma; /* Needed for cache manipulation */
+	/* exporter via dmabuf */
+	struct sg_table *sgt;
+	bool exported;
+	struct dma_buf *dma_buf;
+	dma_addr_t dma_base;
+	unsigned int dma_size;
+};
+
+static int trace_physical_pages;
+static int trace_mmap_fault;
+
+/*
+ * dmabuf wrapper ops
+ */
+static struct sg_table *carveout_map_dmabuf(struct dma_buf_attachment *attach,
+		enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+	struct buffer_data *buffer_data;
+
+	if (!buffer)
+		return NULL;
+
+	pr_debug("%s\n", __func__);
+
+	buffer_data = buffer->priv;
+	sg_dma_address(buffer_data->sgt->sgl) = buffer_data->dma_base;
+	sg_dma_len(buffer_data->sgt->sgl) = buffer_data->dma_size;
+
+	return buffer_data->sgt;
+}
+
+static void carveout_unmap_dmabuf(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+	struct buffer_data *buffer_data;
+
+	if (!buffer)
+		return;
+
+	pr_debug("%s\n", __func__);
+
+	buffer_data = buffer->priv;
+	sg_dma_address(buffer_data->sgt->sgl) = (~(dma_addr_t)0);
+	sg_dma_len(buffer_data->sgt->sgl) = 0;
+}
+
+/* Called when when ref counter reaches zero! */
+static void carveout_release_dmabuf(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+
+	if (!buffer)
+		return;
+
+	buffer_data = buffer->priv;
+	pr_debug("%s %p\n", __func__, buffer_data);
+	if (!buffer_data)
+		return;
+
+	buffer_data->exported = false;
+}
+
+/* Called on file descriptor mmap */
+static int carveout_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+	struct scatterlist *sgl;
+	unsigned long addr;
+
+	if (!buffer)
+		return -EINVAL;
+
+	buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %#lx\n",
+		__func__, __LINE__,
+		vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	sgl = buffer_data->sgt->sgl;
+	addr = vma->vm_start;
+	while (sgl) {
+		dma_addr_t phys = sg_phys(sgl);
+		unsigned long pfn = phys >> PAGE_SHIFT;
+		unsigned int len = sgl->length;
+		int ret;
+
+		if (vma->vm_end < (addr + len)) {
+			unsigned long size = vma->vm_end - addr;
+			pr_debug("%s:%d buffer %d (0x%p) truncating len=%#x to size=%#lx\n",
+				__func__, __LINE__,
+				buffer->id, buffer, len, size);
+			WARN(round_up(size, PAGE_SIZE) != size,
+				"VMA size %#lx not page aligned\n", size);
+			len = size;
+			if (!len) /* VM space is smaller than allocation */
+				break;
+		}
+
+		ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
+		if (ret)
+			return ret;
+
+		addr += len;
+		sgl = sg_next(sgl);
+	}
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+static void *carveout_kmap_dmabuf(struct dma_buf *buf, unsigned long page)
+{
+	pr_err("%s not supported\n", __func__);
+	return NULL;
+}
+#endif
+
+static int carveout_heap_map_km(struct heap *heap, struct buffer *buffer);
+static int carveout_heap_unmap_km(struct heap *heap, struct buffer *buffer);
+
+static void *carveout_vmap_dmabuf(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+	struct heap *heap;
+
+	if (!buffer)
+		return NULL;
+
+	heap = buffer->heap;
+
+	if (carveout_heap_map_km(heap, buffer))
+		return NULL;
+
+	pr_debug("%s:%d buffer %d kptr 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+
+	return buffer->kptr;
+}
+
+static void carveout_vunmap_dmabuf(struct dma_buf *buf, void *kptr)
+{
+	struct buffer *buffer = buf->priv;
+	struct heap *heap;
+
+	if (!buffer)
+		return;
+
+	heap = buffer->heap;
+
+	pr_debug("%s:%d buffer %d kptr 0x%p (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer->kptr, kptr);
+
+	if (buffer->kptr == kptr)
+		carveout_heap_unmap_km(heap, buffer);
+}
+
+static const struct dma_buf_ops carveout_dmabuf_ops = {
+	.map_dma_buf = carveout_map_dmabuf,
+	.unmap_dma_buf = carveout_unmap_dmabuf,
+	.release = carveout_release_dmabuf,
+	.mmap = carveout_mmap_dmabuf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+	.kmap_atomic = carveout_kmap_dmabuf,
+	.kmap = carveout_kmap_dmabuf,
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+	.map_atomic = carveout_kmap_dmabuf,
+	.map = carveout_kmap_dmabuf,
+#endif
+#endif
+	.vmap = carveout_vmap_dmabuf,
+	.vunmap = carveout_vunmap_dmabuf,
+};
+
+static int carveout_heap_export(struct device *device, struct heap *heap,
+						 size_t size, enum img_mem_attr attr,
+						 struct buffer *buffer, uint64_t* buf_hnd)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct dma_buf *dma_buf;
+	int ret, fd;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!buffer_data)
+		/* Nothing to export ? */
+		return -ENOMEM;
+
+	if (buffer_data->exported) {
+		pr_err("%s: already exported!\n", __func__);
+		return -EBUSY;
+	}
+
+	if (!buffer_data->sgt) {
+		/* Create for the very first time */
+		buffer_data->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+		if (!buffer_data->sgt) {
+			pr_err("%s: failed to allocate sg_table\n", __func__);
+			return -ENOMEM;
+		}
+
+		ret = sg_alloc_table(buffer_data->sgt, 1, GFP_KERNEL);
+		if (ret) {
+			pr_err("%s: sg_alloc_table failed\n", __func__);
+			goto free_sgt_mem;
+		}
+		sg_set_page(buffer_data->sgt->sgl,
+				pfn_to_page(PFN_DOWN(buffer_data->addr+heap->options.carveout.offs)),
+				PAGE_ALIGN(size), 0);
+		/* Store dma info */
+		if (heap->to_dev_addr)
+			buffer_data->dma_base = heap->to_dev_addr(&heap->options,
+					buffer_data->addr+heap->options.carveout.offs);
+		else
+			buffer_data->dma_base = buffer_data->addr+heap->options.carveout.offs;
+
+		buffer_data->dma_size = PAGE_ALIGN(size);
+		/* No mapping yet */
+		sg_dma_address(buffer_data->sgt->sgl) = (~(dma_addr_t)0);
+		sg_dma_len(buffer_data->sgt->sgl) = 0;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+	dma_buf = dma_buf_export(buffer_data, &carveout_dmabuf_ops,
+			size, O_RDWR);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
+	dma_buf = dma_buf_export(buffer_data, &carveout_dmabuf_ops,
+			size, O_RDWR, NULL);
+#else
+	exp_info.ops = &carveout_dmabuf_ops;
+	exp_info.size = size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+	exp_info.resv = NULL;
+	dma_buf = dma_buf_export(&exp_info);
+#endif
+	if (IS_ERR(dma_buf)) {
+		pr_err("%s:dma_buf_export failed\n", __func__);
+		ret = PTR_ERR(dma_buf);
+		return ret;
+	}
+
+	get_dma_buf(dma_buf);
+	fd = dma_buf_fd(dma_buf, 0);
+	if (fd < 0) {
+		pr_err("%s: dma_buf_fd failed\n", __func__);
+		dma_buf_put(dma_buf);
+		return -EFAULT;
+	}
+	buffer_data->dma_buf = dma_buf;
+	buffer_data->exported = true;
+	*buf_hnd = (uint64_t)fd;
+
+	return 0;
+
+free_sgt_mem:
+	kfree(buffer_data->sgt);
+	buffer_data->sgt = NULL;
+
+	return ret;
+}
+
+static int carveout_heap_alloc(struct device *device, struct heap *heap,
+						 size_t size, enum img_mem_attr attr,
+						 struct buffer *buffer)
+{
+	struct heap_data *heap_data = heap->priv;
+	struct buffer_data *buffer_data;
+	phys_addr_t phys_addr;
+	size_t pages, page;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	buffer_data = kzalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!buffer_data)
+		return -ENOMEM;
+
+	pages = size / PAGE_SIZE;
+	buffer_data->addrs = kmalloc_array(pages, sizeof(uint64_t), GFP_KERNEL);
+	if (!buffer_data->addrs) {
+		kfree(buffer_data);
+		return -ENOMEM;
+	}
+
+	buffer_data->mattr = attr;
+
+	buffer_data->addr = gen_pool_alloc(heap_data->pool, size);
+	if (!buffer_data->addr) {
+		pr_err("%s gen_pool_alloc failed!\n", __func__);
+		kfree(buffer_data->addrs);
+		kfree(buffer_data);
+		return -ENOMEM;
+	}
+
+	/* The below assigns buffer_data->addr-> 1:1 mapping */
+	phys_addr = gen_pool_virt_to_phys(heap_data->pool,
+			buffer_data->addr + heap->options.carveout.offs);
+
+	page = 0;
+	while (page < pages) {
+		if (trace_physical_pages)
+			pr_info("%s phys %llx\n",
+				__func__, (unsigned long long)phys_addr);
+		buffer_data->addrs[page++] = phys_addr;
+		phys_addr += PAGE_SIZE;
+	};
+
+	buffer->priv = buffer_data;
+
+	pr_debug("%s buffer %d phys %#llx size %zu attrs %x\n", __func__,
+		buffer->id,
+		(unsigned long long)buffer_data->addrs[0],
+		size,
+		attr);
+	return 0;
+}
+
+static void carveout_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct heap_data *heap_data = heap->priv;
+	struct buffer_data *buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	/* If forgot to unmap */
+	if (heap->options.carveout.put_kptr && buffer->kptr) {
+		heap->options.carveout.put_kptr(buffer->kptr);
+		buffer->kptr = NULL;
+	}
+
+	if (buffer_data->dma_buf) {
+		dma_buf_put(buffer_data->dma_buf);
+		buffer_data->dma_buf->priv = NULL;
+	}
+
+	if (buffer_data->sgt) {
+		sg_free_table(buffer_data->sgt);
+		kfree(buffer_data->sgt);
+		buffer_data->sgt = NULL;
+	}
+
+	if (buffer_data->mapped_vma)
+		buffer_data->mapped_vma->vm_private_data = NULL;
+
+	gen_pool_free(heap_data->pool, buffer_data->addr, buffer->actual_size);
+	kfree(buffer_data->addrs);
+	kfree(buffer_data);
+}
+
+static void _mmap_open(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data = buffer->priv;
+
+	buffer_data->mapped_vma = vma;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+}
+
+static void _mmap_close(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data;
+
+	if (!buffer)
+		return;
+
+	buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+	buffer_data->mapped_vma = NULL;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static vm_fault_t _mmap_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#else
+static int _mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#endif
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data = buffer->priv;
+	phys_addr_t phys_addr;
+	pgoff_t offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	unsigned long addr = vmf->address;
+#else
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+#endif
+
+	if (trace_mmap_fault) {
+		pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+				__func__, __LINE__, buffer->id, buffer, vma);
+		pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
+			__func__, __LINE__,
+			vma->vm_start, vma->vm_end,
+			vma->vm_end - vma->vm_start);
+	}
+
+	offset = (addr - vma->vm_start) >> PAGE_SHIFT;
+
+	if (offset > (buffer->actual_size / PAGE_SIZE)) {
+		pr_err("%s:%d offs:%ld\n",
+			__func__, __LINE__, offset);
+		return VM_FAULT_SIGBUS;
+	}
+
+	phys_addr = buffer_data->addrs[0] + (offset * PAGE_SIZE);
+
+	if (trace_mmap_fault)
+		pr_info("%s:%d vmf pgoff %#lx vmf addr %lx offs :%ld phys:%#llx\n",
+			__func__, __LINE__, vmf->pgoff, addr, offset, phys_addr);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+	return vmf_insert_pfn(vma, addr, phys_addr >> PAGE_SHIFT);
+#else
+	{
+		int err = vm_insert_pfn(vma, addr, phys_addr >> PAGE_SHIFT);
+		switch (err) {
+		case 0:
+		case -EAGAIN:
+		case -ERESTARTSYS:
+		case -EINTR:
+		case -EBUSY:
+			return VM_FAULT_NOPAGE;
+		case -ENOMEM:
+			return VM_FAULT_OOM;
+		}
+
+		return VM_FAULT_SIGBUS;
+	}
+#endif
+}
+
+/* vma ops->fault handler is used to track user space mappings
+ * (inspired by other gpu/drm drivers from the kernel source tree)
+ * to properly call cache handling ops when the mapping is destroyed
+ * (when user calls unmap syscall).
+ * vma flags are used to choose a correct direction.
+ * The above facts allows us to do automatic cache flushing/invalidation.
+ *
+ * Examples:
+ *  mmap() -> .open -> invalidate buffer cache
+ *  .. read content from buffer
+ *  unmap() -> .close -> do nothing
+ *
+ *  mmap() -> .open -> do nothing
+ *  .. write content to buffer
+ *  unmap() -> .close -> flush buffer cache
+ */
+static struct vm_operations_struct carveout_mmap_vm_ops = {
+	.open = _mmap_open,
+	.close = _mmap_close,
+	.fault = _mmap_fault,
+};
+
+static int carveout_heap_map_um(struct heap *heap, struct buffer *buffer,
+						 struct vm_area_struct *vma)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
+		__func__, __LINE__,
+		vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	/* CACHED by default */
+	if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	else if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	vma->vm_ops = &carveout_mmap_vm_ops;
+	vma->vm_flags |= VM_PFNMAP;
+	vma->vm_private_data = buffer;
+	vma->vm_pgoff = 0;
+
+	_mmap_open(vma);
+
+	return 0;
+}
+
+static int carveout_heap_map_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	if (buffer->kptr) {
+		pr_warn("%s called for already mapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	if (heap->options.carveout.get_kptr)
+		buffer->kptr = heap->options.carveout.get_kptr(
+				buffer_data->addrs[0],
+				buffer->actual_size,
+				buffer_data->mattr);
+	else if (heap->options.carveout.kptr)
+		buffer->kptr = heap->options.carveout.kptr +
+				(buffer_data->addrs[0] - heap->options.carveout.phys);
+	else
+		return -ENOMEM;
+
+	if (!buffer->kptr)
+		return -ENOMEM;
+
+	pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p size:%zu\n", __func__, __LINE__,
+		buffer->id, buffer, buffer->kptr, buffer->actual_size);
+
+	return 0;
+}
+
+static int carveout_heap_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer, buffer->kptr);
+
+	if (!buffer->kptr) {
+		pr_warn("%s called for unmapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	if (heap->options.carveout.put_kptr)
+		heap->options.carveout.put_kptr(buffer->kptr);
+
+	buffer->kptr = NULL;
+
+	return 0;
+}
+
+static int carveout_heap_get_page_array(struct heap *heap,
+					struct buffer *buffer,
+					uint64_t **addrs)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	*addrs = buffer_data->addrs;
+	return 0;
+}
+
+static int carveout_set_offset(struct heap *heap, size_t offs)
+{
+	if (heap->options.carveout.offs > heap->options.carveout.size) {
+		pr_err("%s offset exceeds size!\n", __func__);
+		return -EINVAL;
+	}
+
+	heap->options.carveout.offs = offs;
+
+	return 0;
+}
+
+static void carveout_cache_update(struct vm_area_struct *vma)
+{
+	if (!vma)
+		return;
+
+	pr_debug("%s vma start:%lx end:%lx\n",
+		__func__, vma->vm_start, vma->vm_end);
+
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+	/* This function is not exported for modules by ARM kernel */
+	flush_cache_range(vma, vma->vm_start, vma->vm_end);
+#else
+	/* Tentative for the SFF, this function is exported by the kernel... */
+	/* vivt_flush_cache_range(vma, vma->vm_start, vma->vm_end); */
+#endif
+}
+
+static void carveout_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
+		carveout_cache_update(buffer_data->mapped_vma);
+}
+
+static void carveout_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
+		carveout_cache_update(buffer_data->mapped_vma);
+}
+
+static void carveout_heap_destroy(struct heap *heap)
+{
+	struct heap_data *heap_data = heap->priv;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	gen_pool_destroy(heap_data->pool);
+	kfree(heap_data);
+}
+
+static struct heap_ops carveout_heap_ops = {
+	.export = carveout_heap_export,
+	.alloc = carveout_heap_alloc,
+	.import = NULL,
+	.free = carveout_heap_free,
+	.map_um = carveout_heap_map_um,
+	.unmap_um = NULL,
+	.map_km = carveout_heap_map_km,
+	.unmap_km = carveout_heap_unmap_km,
+	.get_sg_table = NULL,
+	.get_page_array = carveout_heap_get_page_array,
+	.sync_cpu_to_dev = carveout_sync_cpu_to_dev,
+	.sync_dev_to_cpu = carveout_sync_dev_to_cpu,
+	.set_offset = carveout_set_offset,
+	.destroy = carveout_heap_destroy,
+};
+
+int img_mem_carveout_init(const struct heap_config *config, struct heap *heap)
+{
+	struct heap_data *heap_data;
+	unsigned long virt_start;
+	int ret;
+	int pool_order = POOL_ALLOC_ORDER_BASE +
+			heap->options.carveout.pool_order;
+
+	if (heap->options.carveout.offs > heap->options.carveout.size) {
+		pr_err("%s offset exceeds size!\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s phys base:%#llx kptr %p (offs:%llx) size:%zu order:%d\n", __func__,
+		 (unsigned long long)heap->options.carveout.phys,
+		 heap->options.carveout.kptr,
+		 (unsigned long long)heap->options.carveout.offs,
+		 heap->options.carveout.size,
+		 pool_order);
+
+	if (config->options.carveout.kptr &&
+			(heap->options.carveout.put_kptr || heap->options.carveout.get_kptr)) {
+		pr_err("%s can't use static & dynamic kernel mapping at the same time!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!config->options.carveout.kptr &&
+			!(heap->options.carveout.put_kptr && heap->options.carveout.get_kptr)) {
+		pr_warn("%s no kernel mapping method available!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (heap->options.carveout.phys & ((1<<pool_order)-1)) {
+		pr_err("%s phys addr (%#llx) is not aligned to allocation order!\n",
+				__func__, (unsigned long long)heap->options.carveout.phys);
+		return -EINVAL;
+	}
+
+	if (heap->options.carveout.size == 0) {
+		pr_err("%s size cannot be zero!\n", __func__);
+		return -EINVAL;
+	}
+
+	heap_data = kmalloc(sizeof(struct heap_data), GFP_KERNEL);
+	if (!heap_data)
+		return -ENOMEM;
+
+	heap_data->pool = gen_pool_create(pool_order, -1);
+	if (!heap_data->pool) {
+		pr_err("%s gen_pool_create failed\n", __func__);
+		ret = -ENOMEM;
+		goto pool_create_failed;
+	}
+
+	/* Operating in no offset mode -> virtual == phys
+	 * However when physical address == 0 (unlikely) we need to distinguish
+	 * if address returned from gen_pool_alloc is an error or valid address,
+	 * so add a const offset.
+	 */
+	virt_start = (unsigned long)heap->options.carveout.phys;
+	if (!virt_start)
+		virt_start = 1<<pool_order;
+
+	ret = gen_pool_add_virt(heap_data->pool, virt_start,
+			heap->options.carveout.phys,
+			heap->options.carveout.size,
+			-1);
+	if (ret) {
+		pr_err("%s gen_pool_add_virt failed\n", __func__);
+		goto pool_add_failed;
+	}
+
+	heap->ops = &carveout_heap_ops;
+	heap->priv = heap_data;
+
+	return 0;
+
+pool_add_failed:
+	gen_pool_destroy(heap_data->pool);
+pool_create_failed:
+	kfree(heap_data);
+	return ret;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 204 - 0
driver/img_mem/img_mem_coherent.c

@@ -0,0 +1,204 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+struct buffer_data {
+	void *kptr;
+	dma_addr_t dma_handle; /* addr returned by dma_alloc_coherent */
+	uint64_t *addrs; /* array of physical addresses, upcast to 64-bit */
+	struct device *dev;
+	size_t size;
+
+};
+
+static int trace_physical_pages;
+
+static int coherent_heap_alloc(struct device *device, struct heap *heap,
+			       size_t size, enum img_mem_attr attr,
+			       struct buffer *buffer)
+{
+	struct buffer_data *buffer_data;
+	phys_addr_t phys_addr;
+	size_t pages, page;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	buffer_data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!buffer_data)
+		return -ENOMEM;
+
+	pages = size / PAGE_SIZE;
+	buffer_data->addrs = kmalloc_array(pages, sizeof(uint64_t), GFP_KERNEL);
+	if (!buffer_data->addrs) {
+		kfree(buffer_data);
+		return -ENOMEM;
+	}
+
+	buffer_data->dev = device;
+	buffer_data->size = size;
+	buffer_data->kptr = dma_alloc_coherent(device,
+				size,
+				&buffer_data->dma_handle,
+				heap->options.coherent.gfp_flags);
+	if (!buffer_data->kptr) {
+		pr_err("%s dma_alloc_coherent failed!\n", __func__);
+		kfree(buffer_data->addrs);
+		kfree(buffer_data);
+		return -ENOMEM;
+	}
+	buffer->kptr = (void *)buffer_data->kptr;
+
+	page = 0;
+	phys_addr = buffer_data->dma_handle;
+	while (page < pages) {
+		if (trace_physical_pages)
+			pr_info("%s phys %llx\n",
+				 __func__, (unsigned long long)phys_addr);
+		buffer_data->addrs[page++] = phys_addr;
+		phys_addr += PAGE_SIZE;
+	};
+
+	buffer->priv = buffer_data;
+
+	pr_debug("%s buffer %d kptr %p phys %#llx size %zu\n", __func__,
+		 buffer->id, buffer->kptr,
+		 (unsigned long long)buffer_data->addrs[0], size);
+	return 0;
+}
+
+static void coherent_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	dma_free_coherent(buffer_data->dev, buffer_data->size,
+			  buffer_data->kptr, buffer_data->dma_handle);
+	kfree(buffer_data->addrs);
+	kfree(buffer_data);
+}
+
+static int coherent_heap_map_um(struct heap *heap, struct buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	unsigned long pfn = *buffer_data->addrs >> PAGE_SHIFT;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
+		 __func__, __LINE__,
+		 vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return remap_pfn_range(vma, vma->vm_start, pfn,
+			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+static int coherent_heap_map_km(struct heap *heap, struct buffer *buffer)
+{
+	pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer, buffer->kptr);
+
+	return 0;
+}
+
+static int coherent_heap_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer, buffer->kptr);
+
+	return 0;
+}
+
+static int coherent_heap_get_page_array(struct heap *heap,
+					struct buffer *buffer,
+					uint64_t **addrs)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	*addrs = buffer_data->addrs;
+	return 0;
+}
+
+static void coherent_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops coherent_heap_ops = {
+	.alloc = coherent_heap_alloc,
+	.import = NULL,
+	.free = coherent_heap_free,
+	.map_um = coherent_heap_map_um,
+	.unmap_um = NULL,
+	.map_km = coherent_heap_map_km,
+	.unmap_km = coherent_heap_unmap_km,
+	.get_sg_table = NULL,
+	.get_page_array = coherent_heap_get_page_array,
+	.sync_cpu_to_dev = NULL,
+	.sync_dev_to_cpu = NULL,
+	.set_offset = NULL,
+	.destroy = coherent_heap_destroy,
+};
+
+int img_mem_coherent_init(const struct heap_config *config, struct heap *heap)
+{
+	pr_debug("%s gfp:%x\n", __func__,
+		 config->options.coherent.gfp_flags);
+
+	heap->ops = &coherent_heap_ops;
+
+	return 0;
+}

+ 502 - 0
driver/img_mem/img_mem_dmabuf.c

@@ -0,0 +1,502 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_dmabuf.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/version.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+/* this condition is actually true for kernels < 4.4.100 */
+#ifndef PHYS_PFN
+#define PHYS_PFN(x)	((unsigned long)((x) >> PAGE_SHIFT))
+#endif
+
+static int trace_physical_pages;
+static int trace_mmap_fault;
+
+struct buffer_data {
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	enum img_mem_attr mattr;  /* memory attributes */
+	struct vm_area_struct *mapped_vma;
+};
+
+static int dmabuf_heap_import(struct device *device, struct heap *heap,
+						size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+						struct buffer *buffer)
+{
+	struct buffer_data *data;
+	int ret;
+	int buf_fd = (int)buf_hnd;
+
+	pr_debug("%s:%d buffer %d (0x%p) buf_fd %d\n", __func__, __LINE__,
+		buffer->id, buffer, buf_fd);
+
+	data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->dma_buf = dma_buf_get(buf_fd);
+	if (IS_ERR_OR_NULL(data->dma_buf)) {
+		pr_err("%s dma_buf_get fd %d\n", __func__, buf_fd);
+		ret = -EINVAL;
+		goto dma_buf_get_failed;
+	}
+	pr_debug("%s:%d buffer %d dma_buf %p\n", __func__, __LINE__,
+		buffer->id, data->dma_buf);
+
+	data->attach = dma_buf_attach(data->dma_buf, device);
+	if (IS_ERR(data->attach)) {
+		pr_err("%s dma_buf_attach fd %d\n", __func__, buf_fd);
+		ret = -EINVAL;
+		goto dma_buf_attach_failed;
+	}
+
+	data->sgt = dma_buf_map_attachment(data->attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(data->sgt)) {
+		pr_err("%s dma_buf_map_attachment fd %d\n", __func__, buf_fd);
+		ret = -EINVAL;
+		goto dma_buf_map_failed;
+	}
+
+	if (trace_physical_pages) {
+		struct scatterlist *sgl = data->sgt->sgl;
+
+		while (sgl) {
+			pr_info("%s:%d phys %#llx length %d (dma_addr:%#llx len:%d)\n",
+				__func__, __LINE__,
+				(unsigned long long)sg_phys(sgl), sgl->length,
+				sg_dma_address(sgl), sg_dma_len(sgl));
+			sgl = sg_next(sgl);
+		}
+	}
+
+	data->mattr = attr;
+	data->mapped_vma = NULL;
+	buffer->priv = data;
+	return 0;
+
+dma_buf_map_failed:
+	dma_buf_detach(data->dma_buf, data->attach);
+dma_buf_attach_failed:
+	dma_buf_put(data->dma_buf);
+dma_buf_get_failed:
+	kfree(data);
+	return ret;
+}
+
+static void dmabuf_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (buffer->kptr) {
+		struct dma_buf *dma_buf = data->dma_buf;
+
+		dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+						0 /* start */,
+						buffer->actual_size,
+#endif
+						DMA_BIDIRECTIONAL);
+
+		dma_buf_vunmap(dma_buf, buffer->kptr);
+		buffer->kptr = NULL;
+	}
+
+	if (data->mapped_vma)
+		data->mapped_vma->vm_private_data = NULL;
+
+	dma_buf_unmap_attachment(data->attach, data->sgt, DMA_BIDIRECTIONAL);
+	dma_buf_detach(data->dma_buf, data->attach);
+	dma_buf_put(data->dma_buf);
+	kfree(data);
+}
+
+static void dmabuf_mmap_open(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+	if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		enum dma_data_direction dma_dir;
+		if (vma->vm_flags & VM_WRITE)
+			dma_dir = DMA_TO_DEVICE;
+		else
+			dma_dir = DMA_FROM_DEVICE;
+
+		/* User will read the buffer so invalidate D-cache */
+		dma_buf_begin_cpu_access(data->dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+							0 /* start */,
+							buffer->actual_size,
+#endif
+							dma_dir);
+	}
+	data->mapped_vma = vma;
+}
+
+static void dmabuf_mmap_close(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *data;
+
+	if (!buffer)
+		return;
+
+	data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+	if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		/* User may have written to the buffer so flush D-cache */
+		dma_buf_end_cpu_access(data->dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+							0 /* start */,
+							buffer->actual_size,
+#endif
+							DMA_TO_DEVICE);
+	}
+
+	data->mapped_vma = NULL;
+}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static vm_fault_t dmabuf_mmap_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#else
+static int dmabuf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#endif
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *data = buffer->priv;
+	struct sg_table *sgt = data->sgt;
+	struct scatterlist *sgl;
+	pgoff_t curr_offset;
+	dma_addr_t phys = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+	unsigned long addr = vmf->address;
+#else
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+#endif
+
+	if (trace_mmap_fault) {
+		pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+				__func__, __LINE__, buffer->id, buffer, vma);
+		pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
+			__func__, __LINE__,
+			vma->vm_start,
+			vma->vm_end,
+			vma->vm_end - vma->vm_start);
+	}
+
+	curr_offset = addr - vma->vm_start;
+
+	sgl = sgt->sgl;
+	while (sgl) {
+		phys = sg_phys(sgl);
+		if(curr_offset < sgl->length)
+			break;
+		curr_offset -= sgl->length;
+		sgl = sg_next(sgl);
+	}
+	phys += curr_offset; /* set to middle of current block */
+	if (trace_mmap_fault)
+		pr_info("%s:%d vmf pgoff:%#lx vmf addr:%lx phys:%#llx\n",
+			__func__, __LINE__, vmf->pgoff, addr,
+			(unsigned long long)phys);
+
+	{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+		unsigned long pfn = PHYS_PFN(phys);
+#else
+		pfn_t pfn = {
+			.val = PHYS_PFN(phys)
+		};
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
+		return vmf_insert_mixed(vma, addr, pfn);
+#else
+		{
+			int err = vm_insert_mixed(vma, addr, pfn);
+			switch (err) {
+			case 0:
+			case -EAGAIN:
+			case -ERESTARTSYS:
+			case -EINTR:
+			case -EBUSY:
+				return VM_FAULT_NOPAGE;
+			case -ENOMEM:
+				return VM_FAULT_OOM;
+			}
+
+			return VM_FAULT_SIGBUS;
+		}
+#endif
+	}
+}
+
+/* vma ops->fault handler is used to track user space mappings
+ * (inspired by other gpu/drm drivers from the kernel source tree)
+ * to properly call dma_sync_* ops when the mapping is destroyed
+ * (when user calls unmap syscall).
+ * vma flags are used to choose a correct dma mapping.
+ * By default use DMA_BIDIRECTONAL mapping type (kernel space only).
+ * The above facts allows us to do automatic cache flushing/invalidation.
+ *
+ * Examples:
+ *  mmap() -> .open -> invalidate buffer cache
+ *  .. read content from buffer
+ *  unmap() -> .close -> do nothing
+ *
+ *  mmap() -> .open -> do nothing
+ *  .. write content to buffer
+ *  unmap() -> .close -> flush buffer cache
+ */
+static struct vm_operations_struct dmabuf_heap_mmap_vm_ops = {
+	.open = dmabuf_mmap_open,
+	.close = dmabuf_mmap_close,
+	.fault = dmabuf_mmap_fault,
+};
+
+static int dmabuf_heap_map_um(struct heap *heap, struct buffer *buffer,
+				struct vm_area_struct *vma)
+{
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
+		__func__, __LINE__,
+		vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	/* CACHED by default */
+	if (data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else if (data->mattr & IMG_MEM_ATTR_UNCACHED)
+		WARN_ONCE(1, "Uncached not allowed");
+	/*vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);*/
+
+	vma->vm_ops = &dmabuf_heap_mmap_vm_ops;
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_private_data = buffer;
+	vma->vm_pgoff = 0;
+
+	dmabuf_mmap_open(vma);
+
+	return 0;
+}
+
+static int dmabuf_heap_map_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct dma_buf *dma_buf = data->dma_buf;
+	int ret;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (buffer->kptr) {
+		pr_warn("%s called for already mapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+							0 /* start */,
+							buffer->actual_size,
+#endif
+							DMA_BIDIRECTIONAL);
+	if (ret) {
+		pr_err("%s begin_cpu_access failed for bufid %d\n", __func__, buffer->id);
+		return ret;
+	}
+
+	buffer->kptr = dma_buf_vmap(dma_buf);
+	if (!buffer->kptr) {
+		pr_err("%s dma_buf_vmap failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+	return 0;
+}
+
+static int dmabuf_heap_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct dma_buf *dma_buf = data->dma_buf;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!buffer->kptr) {
+		pr_warn("%s called for unmapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+					0 /* start */,
+					buffer->actual_size,
+#endif
+					DMA_BIDIRECTIONAL);
+
+	dma_buf_vunmap(dma_buf, buffer->kptr);
+
+	pr_debug("%s:%d buffer %d kunmap from 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+	buffer->kptr = NULL;
+
+	return 0;
+}
+
+static int dmabuf_get_sg_table(struct heap *heap, struct buffer *buffer,
+						struct sg_table **sg_table, bool *use_sg_dma)
+{
+	struct buffer_data *data = buffer->priv;
+
+	*sg_table = data->sgt;
+	*use_sg_dma = heap->options.dmabuf.use_sg_dma;
+	return 0;
+}
+
+static void dmabuf_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct dma_buf *dma_buf = data->dma_buf;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+						0 /* start */,
+						buffer->actual_size,
+#endif
+						DMA_TO_DEVICE);
+
+	}
+}
+
+static void dmabuf_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+	struct dma_buf *dma_buf = data->dma_buf;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+							0 /* start */,
+							buffer->actual_size,
+#endif
+							DMA_FROM_DEVICE);
+	}
+}
+
+static void dmabuf_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops dmabuf_heap_ops = {
+	.alloc = NULL,
+	.import = dmabuf_heap_import,
+	.free = dmabuf_heap_free,
+	.map_um = dmabuf_heap_map_um,
+	.unmap_um = NULL,
+	.map_km = dmabuf_heap_map_km,
+	.unmap_km = dmabuf_heap_unmap_km,
+	.get_sg_table = dmabuf_get_sg_table,
+	.get_page_array = NULL,
+	.sync_cpu_to_dev = dmabuf_sync_cpu_to_dev,
+	.sync_dev_to_cpu = dmabuf_sync_dev_to_cpu,
+	.set_offset = NULL,
+	.destroy = dmabuf_heap_destroy,
+};
+
+int img_mem_dmabuf_init(const struct heap_config *heap_cfg, struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	heap->ops = &dmabuf_heap_ops;
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 266 - 0
driver/img_mem/img_mem_ion.c

@@ -0,0 +1,266 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_ion.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <ion.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+static int trace_physical_pages;
+
+struct buffer_data {
+	struct ion_client *client;
+	struct ion_handle *handle;
+	struct sg_table *sgt;
+};
+
+static int ion_heap_import(struct device *device, struct heap *heap,
+				size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+				struct buffer *buffer)
+{
+	struct buffer_data *data;
+	int ret;
+	int buf_fd = (int)buf_hnd;
+
+	pr_debug("%s:%d buffer %d (0x%p) buf_fd %d\n", __func__, __LINE__,
+		 buffer->id, buffer, buf_fd);
+
+	data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	data->client = heap->priv;
+
+	data->handle = ion_import_dma_buf(data->client, buf_fd);
+	if (IS_ERR_OR_NULL(data->handle)) {
+		pr_err("%s ion_import_dma_buf fd %d\n", __func__, buf_fd);
+		ret = -EINVAL;
+		goto ion_import_dma_buf_failed;
+	}
+	pr_debug("%s:%d buffer %d ion_handle %p\n", __func__, __LINE__,
+		 buffer->id, data->handle);
+
+	data->sgt = ion_sg_table(data->client, data->handle);
+	if (IS_ERR(data->sgt)) {
+		pr_err("%s ion_sg_table fd %d\n", __func__, buf_fd);
+		ret = -EINVAL;
+		goto ion_sg_table_failed;
+	}
+
+	if (trace_physical_pages) {
+		struct scatterlist *sgl = data->sgt->sgl;
+
+		while (sgl) {
+			pr_info("%s:%d phys %#llx length %d\n",
+				 __func__, __LINE__,
+				 (unsigned long long)sg_phys(sgl), sgl->length);
+			sgl = sg_next(sgl);
+		}
+	}
+
+	buffer->priv = data;
+	return 0;
+
+ion_sg_table_failed:
+	ion_free(data->client, data->handle);
+ion_import_dma_buf_failed:
+	kfree(data);
+	return ret;
+}
+
+static void ion_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	if (buffer->kptr)
+		ion_unmap_kernel(data->client, data->handle);
+
+	ion_free(data->client, data->handle);
+	kfree(data);
+}
+
+static int ion_heap_map_um(struct heap *heap, struct buffer *buffer,
+			   struct vm_area_struct *vma)
+{
+	struct buffer_data *data = buffer->priv;
+	struct scatterlist *sgl;
+	unsigned long addr;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
+		 __func__, __LINE__,
+		 vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	sgl = data->sgt->sgl;
+	addr = vma->vm_start;
+	while (sgl) {
+		dma_addr_t phys = sg_phys(sgl); /* sg_dma_address ? */
+		unsigned long pfn = phys >> PAGE_SHIFT;
+		unsigned int len = sgl->length;
+		int ret;
+
+		ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
+		if (ret)
+			return ret; 
+
+		addr += len;
+		sgl = sg_next(sgl);
+	}
+
+	return 0;
+}
+
+static int ion_heap_map_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	if (buffer->kptr) {
+		pr_warn("%s called for already mapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	buffer->kptr = ion_map_kernel(data->client, data->handle);
+	if (!buffer->kptr) {
+		pr_err("%s ion_map_kernel failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("%s:%d buffer %d map to 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer->kptr);
+	return 0;
+}
+
+static int ion_heap_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		 buffer->id, buffer);
+
+	if (!buffer->kptr) {
+		pr_warn("%s called for unmapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	ion_unmap_kernel(data->client, data->handle);
+
+	pr_debug("%s:%d buffer %d unmap from 0x%p\n", __func__, __LINE__,
+		 buffer->id, buffer->kptr);
+	buffer->kptr = NULL;
+
+	return 0;
+}
+
+static int ion_heap_get_sg_table(struct heap *heap, struct buffer *buffer,
+				 struct sg_table **sg_table, bool *use_sg_dma)
+{
+	struct buffer_data *data = buffer->priv;
+
+	*sg_table = data->sgt;
+	*use_sg_dma = false;
+	return 0;
+}
+
+static void ion_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops ion_heap_ops = {
+	.alloc = NULL,
+	.import = ion_heap_import,
+	.free = ion_heap_free,
+	.map_um = ion_heap_map_um,
+	.unmap_um = NULL,
+	.map_km = ion_heap_map_km,
+	.unmap_km = ion_heap_unmap_km,
+	.get_sg_table = ion_heap_get_sg_table,
+	.get_page_array = NULL,
+	.sync_cpu_to_dev = NULL, 
+	.sync_dev_to_cpu = NULL, 
+	.set_offset = NULL,
+	.destroy = ion_heap_destroy,
+};
+
+int img_mem_ion_init(const struct heap_config *heap_cfg, struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	if (!heap_cfg->options.ion.client) {
+		pr_err("%s no ion client defined\n", __func__);
+		return -EINVAL;
+	}
+
+	heap->ops = &ion_heap_ops;
+	heap->priv = heap_cfg->options.ion.client;
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 2666 - 0
driver/img_mem/img_mem_man.c

@@ -0,0 +1,2666 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include <img_mem_man.h>
+#include <vha_drv_common.h>
+#include <mmu.h>
+#include <heap.h>
+
+#include "img_mem_man_priv.h"
+
+/* Maximum number of processes */
+#define MAX_PROC_CTX 1000
+
+/* Minimum page size (4KB) bits. */
+#define MIN_PAGE_SIZE_BITS 12
+
+struct mem_man {
+	struct idr heaps;
+	struct idr mem_ctxs;
+	struct mutex mutex;
+
+	unsigned cache_usage;
+};
+/* define like this, so it is easier to convert to a function argument later */
+static struct mem_man mem_man_data;
+
+/* wrapper struct for imgmmu_page */
+struct mmu_page {
+	struct buffer *buffer;
+	struct imgmmu_page page;
+	unsigned char type;
+	bool bypass_addr_trans;
+	bool use_parity;
+};
+
+static bool trace_physical_pages;
+module_param(trace_physical_pages, bool, 0444);
+MODULE_PARM_DESC(trace_physical_pages,
+		"Enables tracing of physical pages being mapped into MMU");
+static bool cache_sync = true;
+module_param(cache_sync, bool, 0444);
+MODULE_PARM_DESC(cache_sync,
+				"cache sync mode: 0-no sync; 1-force sync (even if hw provides coherency);");
+
+/*
+ * memory heaps
+ */
+static char *get_heap_name(enum img_mem_heap_type type)
+{
+	switch (type) {
+	case IMG_MEM_HEAP_TYPE_UNIFIED:
+		return "unified";
+	case IMG_MEM_HEAP_TYPE_CARVEOUT:
+		return "carveout";
+	case IMG_MEM_HEAP_TYPE_ION:
+		return "ion";
+	case IMG_MEM_HEAP_TYPE_DMABUF:
+		return "dmabuf";
+	case IMG_MEM_HEAP_TYPE_COHERENT:
+		return "coherent";
+	case IMG_MEM_HEAP_TYPE_ANONYMOUS:
+		return "anonymous";
+	case IMG_MEM_HEAP_TYPE_OCM:
+			return "ocm";
+	default:
+		WARN_ON(type);
+		return "unknown";
+	}
+}
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	int (*init_fn)(const struct heap_config *heap_cfg, struct heap *heap);
+	int ret;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	switch (heap_cfg->type) {
+	case IMG_MEM_HEAP_TYPE_UNIFIED:
+		init_fn = img_mem_unified_init;
+		break;
+	case IMG_MEM_HEAP_TYPE_COHERENT:
+		init_fn = img_mem_coherent_init;
+		break;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	case IMG_MEM_HEAP_TYPE_DMABUF:
+		init_fn = img_mem_dmabuf_init;
+		break;
+#endif
+#ifdef ION_SUPPORTED
+#ifdef CONFIG_ION
+	case IMG_MEM_HEAP_TYPE_ION:
+		init_fn = img_mem_ion_init;
+		break;
+#endif
+#endif
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	case IMG_MEM_HEAP_TYPE_CARVEOUT:
+		init_fn = img_mem_carveout_init;
+		break;
+#endif
+	case IMG_MEM_HEAP_TYPE_ANONYMOUS:
+		init_fn = img_mem_anonymous_init;
+		break;
+	case IMG_MEM_HEAP_TYPE_OCM:
+		init_fn = img_mem_ocm_init;
+		break;
+	default:
+		pr_err("%s: heap type %d unknown\n", __func__, heap_cfg->type);
+		return -EINVAL;
+	}
+
+	heap = kmalloc(sizeof(struct heap), GFP_KERNEL);
+	if (!heap)
+		return -ENOMEM;
+
+	ret = mutex_lock_interruptible(&mem_man->mutex);
+	if (ret)
+		goto lock_failed;
+
+	ret = idr_alloc(&mem_man->heaps, heap, IMG_MEM_MAN_MIN_HEAP,
+			IMG_MEM_MAN_MAX_HEAP, GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: idr_alloc failed\n", __func__);
+		goto alloc_id_failed;
+	}
+
+	heap->id = ret;
+	heap->type = heap_cfg->type;
+	heap->options = heap_cfg->options;
+	heap->to_dev_addr = heap_cfg->to_dev_addr;
+	heap->to_host_addr = heap_cfg->to_host_addr;
+	heap->priv = NULL;
+	heap->cache_sync = true;
+	heap->alt_cache_attr = heap_cfg->cache_attr;
+
+	ret = init_fn(heap_cfg, heap);
+	if (ret) {
+		pr_err("%s: heap init failed\n", __func__);
+		goto heap_init_failed;
+	}
+
+	*heap_id = heap->id;
+	mutex_unlock(&mem_man->mutex);
+
+	pr_debug("%s created heap %d type %d (%s)\n",
+		__func__, *heap_id, heap_cfg->type, get_heap_name(heap->type));
+	return 0;
+
+heap_init_failed:
+	idr_remove(&mem_man->heaps, heap->id);
+alloc_id_failed:
+	mutex_unlock(&mem_man->mutex);
+lock_failed:
+	kfree(heap);
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_add_heap);
+
+static void _img_mem_del_heap(struct heap *heap)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	pr_debug("%s heap %d 0x%p\n", __func__, heap->id, heap);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops->destroy)
+		heap->ops->destroy(heap);
+
+	idr_remove(&mem_man->heaps, heap->id);
+}
+
+void img_mem_del_heap(int heap_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+
+	pr_debug("%s:%d heap %d\n", __func__, __LINE__, heap_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	heap = idr_find(&mem_man->heaps, heap_id);
+	if (!heap) {
+		pr_warn("%s heap %d not found!\n", __func__, heap_id);
+		mutex_unlock(&mem_man->mutex);
+		return;
+	}
+
+	_img_mem_del_heap(heap);
+
+	mutex_unlock(&mem_man->mutex);
+
+	kfree(heap);
+}
+EXPORT_SYMBOL(img_mem_del_heap);
+
+int img_mem_get_heap_info(int heap_id, uint8_t *type, uint32_t *attrs)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+
+	pr_debug("%s:%d heap %d\n", __func__, __LINE__, heap_id);
+
+	if (heap_id < IMG_MEM_MAN_MIN_HEAP || heap_id > IMG_MEM_MAN_MAX_HEAP) {
+		pr_err("%s heap %d does not match internal constraints <%u - %u>!\n",
+				__func__, heap_id, IMG_MEM_MAN_MIN_HEAP, IMG_MEM_MAN_MAX_HEAP);
+		return -EINVAL;
+	}
+	mutex_lock(&mem_man->mutex);
+
+	heap = idr_find(&mem_man->heaps, heap_id);
+	if (!heap) {
+		pr_debug("%s heap %d not found!\n", __func__, heap_id);
+		mutex_unlock(&mem_man->mutex);
+		return -ENOENT;
+	}
+
+	*type = heap->type;
+
+	*attrs = 0;
+	if (heap->ops->import)
+		*attrs |= IMG_MEM_HEAP_ATTR_IMPORT;
+	if (heap->ops->export)
+		*attrs |= IMG_MEM_HEAP_ATTR_EXPORT;
+	if (heap->ops->alloc && !heap->ops->import)
+		*attrs |= IMG_MEM_HEAP_ATTR_INTERNAL;
+	if (heap->type == IMG_MEM_HEAP_TYPE_OCM)
+		*attrs = IMG_MEM_HEAP_ATTR_SEALED;
+
+	/* User attributes */
+	*attrs |= heap->options.ocm.hattr;
+
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mem_get_heap_info);
+
+/*
+ * related to process context (contains SYSMEM heap's functionality in general)
+ */
+int img_mem_create_proc_ctx(struct mem_ctx **new_ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mem_ctx *ctx;
+	int ret = 0;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	ctx = kzalloc(sizeof(struct mem_ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	idr_init(&ctx->buffers);
+	INIT_LIST_HEAD(&ctx->mmu_ctxs);
+
+	mutex_lock(&mem_man->mutex);
+	ret = idr_alloc(&mem_man->mem_ctxs, ctx, 0 , MAX_PROC_CTX,
+			GFP_KERNEL);
+	if (ret < 0) {
+		mutex_unlock(&mem_man->mutex);
+		pr_err("%s: idr_alloc failed\n", __func__);
+		goto idr_alloc_failed;
+	}
+	/* Assign id to the newly created context. */
+	ctx->id = ret;
+	mutex_unlock(&mem_man->mutex);
+	pr_debug("%s id:%d\n", __func__, ctx->id);
+
+	*new_ctx = ctx;
+	return 0;
+
+idr_alloc_failed:
+	kfree(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_create_proc_ctx);
+
+static void _img_mem_free(struct buffer *buffer);
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping);
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx);
+
+static void _img_mem_destroy_proc_ctx(struct mem_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int buf_id;
+
+	pr_debug("%s:%d id:%d\n", __func__, __LINE__, ctx->id);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	/* free derelict mmu contexts */
+	while (!list_empty(&ctx->mmu_ctxs)) {
+		struct mmu_ctx *mc;
+
+		mc = list_first_entry(&ctx->mmu_ctxs,
+							struct mmu_ctx, mem_ctx_entry);
+		pr_warn("%s: found derelict mmu context %p\n", __func__, mc);
+		_img_mmu_ctx_destroy(mc);
+		kfree(mc);
+	}
+
+	/* free derelict buffers */
+	buf_id = IMG_MEM_MAN_MIN_BUFFER;
+	buffer = idr_get_next(&ctx->buffers, &buf_id);
+	while (buffer) {
+		pr_warn("%s: found derelict buffer %d\n", __func__, buf_id);
+		_img_mem_free(buffer);
+		buf_id = IMG_MEM_MAN_MIN_BUFFER;
+		buffer = idr_get_next(&ctx->buffers, &buf_id);
+	}
+
+	idr_destroy(&ctx->buffers);
+	idr_remove(&mem_man->mem_ctxs, ctx->id);
+}
+
+void img_mem_destroy_proc_ctx(struct mem_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	mutex_lock(&mem_man->mutex);
+	_img_mem_destroy_proc_ctx(ctx);
+	mutex_unlock(&mem_man->mutex);
+
+	kfree(ctx);
+}
+EXPORT_SYMBOL(img_mem_destroy_proc_ctx);
+
+static int _img_mem_alloc(struct device *device, struct mem_ctx *ctx,
+				struct heap *heap, size_t size,
+				enum img_mem_attr attr, struct buffer **buffer_new)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+	/* Allocations for MMU pages are still 4k so CPU page size is enough */
+	size_t align = attr & IMG_MEM_ATTR_MMU ?
+		imgmmu_get_cpu_page_size() : IMGMMU_GET_MAX_PAGE_SIZE();
+
+	pr_debug("%s heap %p '%s' ctx %p size %zu\n", __func__,
+			heap, get_heap_name(heap->type), ctx, size);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (size == 0) {
+		pr_err("%s: buffer size is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (heap->ops == NULL || heap->ops->alloc == NULL) {
+		pr_err("%s: no alloc function in heap %d!\n",
+					__func__, heap->id);
+		return -EINVAL;
+	}
+
+	buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	ret = idr_alloc(&ctx->buffers, buffer,
+			(IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
+				IMG_MEM_MAN_MIN_BUFFER,
+			(IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
+				IMG_MEM_MAN_MAX_BUFFER,
+			GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: idr_alloc failed\n", __func__);
+		goto idr_alloc_failed;
+	}
+
+	buffer->id = ret;
+	buffer->request_size = size;
+	buffer->actual_size = ((size + align - 1) /
+			align) * align;
+	buffer->device = device;
+	buffer->mem_ctx = ctx;
+	buffer->heap = heap;
+	INIT_LIST_HEAD(&buffer->mappings);
+	buffer->kptr = NULL;
+	buffer->priv = NULL;
+
+	/* Check if heap has been registered using an alternative cache attributes */
+	if (heap->alt_cache_attr &&
+			(heap->alt_cache_attr != (attr & IMG_MEM_ATTR_CACHE_MASK))) {
+		pr_debug("%s heap %d changing cache attributes from %x to %x\n",
+			__func__, heap->id, attr & IMG_MEM_ATTR_CACHE_MASK,
+			heap->alt_cache_attr);
+		attr &= ~IMG_MEM_ATTR_CACHE_MASK;
+		attr |= heap->alt_cache_attr;
+	}
+
+	ret = heap->ops->alloc(device, heap, buffer->actual_size, attr, buffer);
+	if (ret) {
+		pr_err("%s: heap %d alloc failed\n", __func__, heap->id);
+		goto heap_alloc_failed;
+	}
+
+	if (heap->type != IMG_MEM_HEAP_TYPE_OCM) {
+		__img_pdump_printf(device, "-- Allocating zeroed buffer id:%d size:%zu\n",
+				buffer->id, buffer->actual_size);
+		__img_pdump_printf(device, "CALLOC "_PMEM_":BLOCK_%d %#zx %#zx 0x0\n",
+				buffer->id, buffer->actual_size, align);
+	}
+
+	ctx->mem_usage_curr += buffer->actual_size;
+	if (ctx->mem_usage_curr > ctx->mem_usage_max)
+		ctx->mem_usage_max = ctx->mem_usage_curr;
+
+	*buffer_new = buffer;
+
+	pr_debug("%s heap %p ctx %p created buffer %d (%p) actual_size %zu\n",
+		__func__, heap, ctx, buffer->id, buffer, buffer->actual_size);
+	return 0;
+
+heap_alloc_failed:
+	idr_remove(&ctx->buffers, buffer->id);
+idr_alloc_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int img_mem_alloc(struct device *device, struct mem_ctx *ctx, int heap_id,
+			size_t size, enum img_mem_attr attr, int *buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	struct buffer *buffer;
+	int ret;
+
+	pr_debug("%s heap %d ctx %p size %zu\n", __func__, heap_id, ctx, size);
+
+	ret = mutex_lock_interruptible(&mem_man->mutex);
+	if (ret)
+		return ret;
+
+	heap = idr_find(&mem_man->heaps, heap_id);
+	if (!heap) {
+		pr_err("%s: heap id %d not found\n", __func__, heap_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_alloc(device, ctx, heap, size, attr, &buffer);
+	if (ret) {
+		mutex_unlock(&mem_man->mutex);
+		return ret;
+	}
+
+	*buf_id = buffer->id;
+	mutex_unlock(&mem_man->mutex);
+
+	pr_debug("%s heap %d ctx %p created buffer %d (%p) size %zu\n",
+		__func__, heap_id, ctx, *buf_id, buffer, size);
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_alloc);
+
+static int _img_mem_import(struct device *device,
+				struct mem_ctx *ctx, struct heap *heap,
+				size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+				struct buffer **buffer_new)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+	size_t align = IMGMMU_GET_MAX_PAGE_SIZE();
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (size == 0) {
+		pr_err("%s: buffer size is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (heap->ops == NULL || heap->ops->import == NULL) {
+		pr_err("%s: no import function in heap %d!\n",
+					__func__, heap->id);
+		return -EINVAL;
+	}
+
+	buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	ret = idr_alloc(&ctx->buffers, buffer,
+			(IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
+				IMG_MEM_MAN_MIN_BUFFER,
+			(IMG_MEM_MAN_MAX_BUFFER * ctx->id) +
+				IMG_MEM_MAN_MAX_BUFFER,
+			GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: idr_alloc failed\n", __func__);
+		goto idr_alloc_failed;
+	}
+
+	buffer->id = ret;
+	buffer->request_size = size;
+	buffer->actual_size = ((size + align - 1) /
+			align) * align;
+	buffer->device = device;
+	buffer->mem_ctx = ctx;
+	buffer->heap = heap;
+	INIT_LIST_HEAD(&buffer->mappings);
+	buffer->kptr = NULL;
+	buffer->priv = NULL;
+
+	/* If MMU page size is bigger than CPU page size
+	 * we need an extra check against requested size
+	 * The aligned size comparing to requested size
+	 * can't be bigger than CPU page!
+	 * otherwise it can cause troubles when
+	 * HW tries to access non existing pages */
+	if (buffer->actual_size - buffer->request_size >
+			imgmmu_get_cpu_page_size()) {
+		pr_err("%s: original buffer size is not MMU page size aligned!\n",
+				__func__);
+		ret = -EINVAL;
+		goto heap_import_failed;
+	}
+
+	/* Check if heap has been registered using an alternative cache attributes */
+	if (heap->alt_cache_attr &&
+			(heap->alt_cache_attr != (attr & IMG_MEM_ATTR_CACHE_MASK))) {
+		pr_debug("%s heap %d changing cache attributes from %x to %x\n",
+			__func__, heap->id, attr & IMG_MEM_ATTR_CACHE_MASK,
+			heap->alt_cache_attr);
+		attr &= ~IMG_MEM_ATTR_CACHE_MASK;
+		attr |= heap->alt_cache_attr;
+	}
+
+	ret = heap->ops->import(device, heap, buffer->actual_size, attr,
+				buf_hnd, buffer);
+	if (ret) {
+		pr_err("%s: heap %d import failed\n", __func__, heap->id);
+		goto heap_import_failed;
+	}
+
+	__img_pdump_printf(device, "-- Allocating zeroed buffer id:%d size:%zu for imported data\n",
+			buffer->id, buffer->actual_size);
+	__img_pdump_printf(device, "CALLOC "_PMEM_":BLOCK_%d %#zx %#zx 0x0\n",
+			buffer->id, buffer->actual_size, align);
+
+	ctx->mem_usage_curr += buffer->actual_size;
+	if (ctx->mem_usage_curr > ctx->mem_usage_max)
+		ctx->mem_usage_max = ctx->mem_usage_curr;
+
+	*buffer_new = buffer;
+	return 0;
+
+heap_import_failed:
+	idr_remove(&ctx->buffers, buffer->id);
+idr_alloc_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int img_mem_import(struct device *device, struct mem_ctx *ctx, int heap_id,
+			size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+			int *buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	struct buffer *buffer;
+	int ret;
+
+	pr_debug("%s heap %d ctx %p hnd %#llx\n", __func__, heap_id, ctx, buf_hnd);
+
+	ret = mutex_lock_interruptible(&mem_man->mutex);
+	if (ret)
+		return ret;
+
+	heap = idr_find(&mem_man->heaps, heap_id);
+	if (!heap) {
+		pr_err("%s: heap id %d not found\n", __func__, heap_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_import(device, ctx, heap, size, attr, buf_hnd, &buffer);
+	if (ret) {
+		mutex_unlock(&mem_man->mutex);
+		return ret;
+	}
+
+	*buf_id = buffer->id;
+	mutex_unlock(&mem_man->mutex);
+
+	pr_info("%s buf_hnd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
+		buf_hnd, heap_id, get_heap_name(heap->type), *buf_id, size);
+	pr_debug("%s heap %d ctx %p created buffer %d (%p) size %zu\n",
+		__func__, heap_id, ctx, *buf_id, buffer, size);
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_import);
+
+static int _img_mem_export(struct device *device,
+				struct mem_ctx *ctx, struct heap *heap,
+				size_t size, enum img_mem_attr attr,
+				struct buffer *buffer, uint64_t *buf_hnd)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (size > buffer->actual_size) {
+		pr_err("%s: buffer size (%zu) bigger than actual size (%zu)\n",
+				__func__, size, buffer->actual_size);
+		return -EINVAL;
+	}
+
+	if (heap->ops == NULL || heap->ops->export == NULL) {
+		pr_err("%s: no export function in heap %d!\n",
+					__func__, heap->id);
+		return -EINVAL;
+	}
+
+	ret = heap->ops->export(device, heap, buffer->actual_size, attr,
+				buffer, buf_hnd);
+	if (ret) {
+		pr_err("%s: heap %d export failed\n", __func__, heap->id);
+		return -EFAULT;
+	}
+
+	return ret;
+}
+
+int img_mem_export(struct device *device, struct mem_ctx *ctx, int buf_id,
+			size_t size, enum img_mem_attr attr, uint64_t *buf_hnd)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	struct buffer *buffer;
+	int ret;
+
+	pr_debug("%s ctx %p buffer id %d\n", __func__, ctx, buf_id);
+
+	ret = mutex_lock_interruptible(&mem_man->mutex);
+	if (ret)
+		return ret;
+
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	heap = buffer->heap;
+
+	ret = _img_mem_export(device, ctx, heap, size, attr, buffer, buf_hnd);
+	if (ret) {
+		mutex_unlock(&mem_man->mutex);
+		return ret;
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	pr_info("%s buf_hnd %#llx heap %d (%s) buffer %d size %zu\n", __func__,
+		*buf_hnd, heap->id, get_heap_name(heap->type), buf_id, size);
+	pr_debug("%s heap %d ctx %p exported buffer %d (%p) size %zu\n",
+		__func__, heap->id, ctx, buf_id, buffer, size);
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_export);
+
+static void _img_mem_free(struct buffer *buffer)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap = buffer->heap;
+	struct mem_ctx *ctx = buffer->mem_ctx;
+
+	pr_debug("%s buffer 0x%p\n", __func__, buffer);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops == NULL || heap->ops->free == NULL) {
+		pr_err("%s: no free function in heap %d!\n",
+				__func__, heap->id);
+		return;
+	}
+
+	while (!list_empty(&buffer->mappings)) {
+		struct mmu_ctx_mapping *map;
+
+		map = list_first_entry(&buffer->mappings,
+							struct mmu_ctx_mapping, buffer_entry);
+		pr_debug("%s: found mapping for buffer %d (size %zu)\n",
+			__func__, map->buffer->id, map->buffer->actual_size);
+		_img_mmu_unmap(map);
+		kfree(map);
+	}
+
+	heap->ops->free(heap, buffer);
+	if (ctx->mem_usage_curr >= buffer->actual_size)
+		ctx->mem_usage_curr -= buffer->actual_size;
+	else
+		WARN_ON(1);
+
+	idr_remove(&ctx->buffers, buffer->id);
+
+	if (heap->type != IMG_MEM_HEAP_TYPE_OCM) {
+		__img_pdump_printf(buffer->device, "-- Freeing buffer id:%d  size:%zu\n",
+				buffer->id, buffer->actual_size);
+		__img_pdump_printf(buffer->device, "FREE "_PMEM_":BLOCK_%d\n", buffer->id);
+	}
+	kfree(buffer);
+}
+
+void img_mem_free(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return;
+	}
+
+	_img_mem_free(buffer);
+
+	mutex_unlock(&mem_man->mutex);
+}
+EXPORT_SYMBOL(img_mem_free);
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+/*
+ * dma_fence ops
+ */
+static const char *_img_mem_sync_get_driver_name(struct dma_fence *f)
+{
+	return "buf_sync";
+}
+static const char *_img_mem_sync_get_timeline_name(struct dma_fence *f)
+{
+	return "buf_timeline";
+}
+static bool _img_mem_sync_enable_signaling(struct dma_fence *f)
+{
+	return true;
+}
+static void _img_mem_sync_release(struct dma_fence *fence)
+{
+	dma_fence_free(fence);
+}
+static struct dma_fence_ops dma_fence_ops = {
+	.get_driver_name = _img_mem_sync_get_driver_name,
+	.get_timeline_name = _img_mem_sync_get_timeline_name,
+	.enable_signaling = _img_mem_sync_enable_signaling,
+	.release = _img_mem_sync_release,
+	.wait = dma_fence_default_wait
+};
+
+struct dma_fence * img_mem_add_fence(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return NULL;
+	}
+
+	if (buffer->fence) {
+		pr_err("%s: fence for buffer id %d already allocated and not freed \n",
+					__func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return NULL;
+	}
+
+	buffer->fence = kmalloc(sizeof(struct buffer_fence), GFP_KERNEL);
+	if (!buffer->fence) {
+		pr_err("%s: cannot allocate fence for buffer id %d\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return NULL;
+	}
+
+	spin_lock_init(&buffer->fence->lock);
+	dma_fence_init(&buffer->fence->fence,
+					&dma_fence_ops,
+					&buffer->fence->lock,
+					dma_fence_context_alloc(1),
+					1);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return &buffer->fence->fence;
+}
+EXPORT_SYMBOL(img_mem_add_fence);
+
+void img_mem_remove_fence(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct dma_fence *fence = NULL;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return;
+	}
+
+	if (buffer->fence) {
+		fence = &buffer->fence->fence;
+		buffer->fence = NULL;
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	if (fence)
+		dma_fence_signal(fence);
+}
+EXPORT_SYMBOL(img_mem_remove_fence);
+
+int img_mem_signal_fence(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct dma_fence *fence = NULL;
+	int ret = -1;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -1;
+	}
+	if (buffer->fence) {
+		fence = &buffer->fence->fence;
+		buffer->fence = NULL;
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	if (fence)
+		ret = dma_fence_signal(fence);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_signal_fence);
+#endif
+
+static void _img_mem_sync_device_to_cpu(struct buffer *buffer, bool force);
+
+int img_mem_map_um(struct mem_ctx *ctx, int buf_id, struct vm_area_struct *vma)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct heap *heap;
+	int ret;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+	pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
+
+	heap = buffer->heap;
+	if (heap->ops == NULL || heap->ops->map_um == NULL) {
+		pr_err("%s: no map_um in heap %d!\n", __func__, heap->id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = heap->ops->map_um(heap, buffer, vma);
+	/* Always invalidate the buffer when it is mapped into UM for reading */
+	if (!ret && (vma->vm_flags & VM_READ) && !(vma->vm_flags & VM_WRITE))
+		_img_mem_sync_device_to_cpu(buffer, false);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_map_um);
+
+int img_mem_unmap_um(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct heap *heap;
+	int ret;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+	pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
+
+	heap = buffer->heap;
+	if (heap->ops == NULL || heap->ops->unmap_um == NULL) {
+		pr_err("%s: no map_um in heap %d!\n", __func__, heap->id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = heap->ops->unmap_um(heap, buffer);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_unmap_um);
+
+static int _img_mem_map_km(struct buffer *buffer)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap = buffer->heap;
+
+	pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops == NULL || heap->ops->map_km == NULL) {
+		pr_err("%s: no map_km in heap %d!\n", __func__, heap->id);
+		return -EINVAL;
+	}
+
+	return heap->ops->map_km(heap, buffer);
+}
+
+int img_mem_map_km(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_map_km(buffer);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_map_km);
+
+static int _img_mem_unmap_km(struct buffer *buffer)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap = buffer->heap;
+
+	pr_debug("%s:%d buffer 0x%p\n", __func__, __LINE__, buffer);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops == NULL || heap->ops->unmap_km == NULL) {
+		pr_err("%s: no unmap_km in heap %d!\n", __func__, heap->id);
+		return -EINVAL;
+	}
+
+	return heap->ops->unmap_km(heap, buffer);
+}
+
+int img_mem_unmap_km(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_unmap_km(buffer);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mem_unmap_km);
+
+uint64_t *img_mem_get_page_array(struct mem_ctx *mem_ctx, int buf_id)
+{
+	struct buffer *buffer;
+	struct heap *heap;
+	struct mem_man *mem_man = &mem_man_data;
+	uint64_t *addrs = NULL;
+	int ret;
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&mem_ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n",
+				__func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return NULL;
+	}
+
+	heap = buffer->heap;
+	if (heap && heap->ops && heap->ops->get_page_array) {
+		ret = heap->ops->get_page_array(heap, buffer, &addrs);
+		if (ret || addrs == NULL) {
+			pr_err("%s: no page array for heap %d buffer %d\n",
+						__func__, heap->id, buffer->id);
+		}
+	} else
+		pr_err("%s: heap %d does not support page arrays\n",
+					__func__, heap->id);
+	mutex_unlock(&mem_man->mutex);
+	return addrs;
+}
+EXPORT_SYMBOL(img_mem_get_page_array);
+
+/* gets physical address of a single page at given offset */
+uint64_t img_mem_get_single_page(struct mem_ctx *mem_ctx, int buf_id,
+		unsigned int offset)
+{
+	struct buffer *buffer;
+	struct heap *heap;
+	struct mem_man *mem_man = &mem_man_data;
+	int ret;
+	uint64_t addr = 0;
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&mem_ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -1;
+	}
+
+	heap = buffer->heap;
+	if (!heap) {
+		pr_err("%s: buffer %d does not point any heap it belongs to!\n",
+				__func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -1;
+	}
+
+	if (heap->ops && heap->ops->get_sg_table) {
+		struct sg_table *sgt;
+		struct scatterlist *sgl;
+		int offs = offset;
+		bool use_sg_dma = false;
+
+		ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no sg_table!\n",
+						__func__, heap->id, buffer->id);
+			return -1;
+		}
+		sgl = sgt->sgl;
+		while (sgl) {
+			if (use_sg_dma)
+				offs -= sg_dma_len(sgl);
+			else
+				offs -= sgl->length;
+
+			if (offs <= 0)
+				break;
+			sgl = sg_next(sgl);
+		}
+		if (!sgl) {
+			pr_err("%s: heap %d buffer %d wrong offset %d!\n",
+					__func__, heap->id, buffer->id, offset);
+			return -1;
+		}
+
+		if (use_sg_dma)
+			addr = sg_dma_address(sgl);
+		else
+			addr = sg_phys(sgl);
+
+	} else if (heap->ops && heap->ops->get_page_array) {
+		uint64_t *addrs;
+		int page_idx = offset / PAGE_SIZE;
+
+		ret = heap->ops->get_page_array(heap, buffer, &addrs);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no page array!\n",
+						__func__, heap->id, buffer->id);
+			return -1;
+		}
+
+		if (offset > buffer->actual_size) {
+			pr_err("%s: heap %d buffer %d wrong offset %d!\n",
+						__func__, heap->id, buffer->id, offset);
+			return -1;
+		}
+		addr = addrs[page_idx];
+	}
+
+	mutex_unlock(&mem_man->mutex);
+	return addr;
+}
+EXPORT_SYMBOL(img_mem_get_single_page);
+
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	void *kptr;
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return NULL;
+	}
+	kptr = buffer->kptr;
+	mutex_unlock(&mem_man->mutex);
+	return kptr;
+}
+EXPORT_SYMBOL(img_mem_get_kptr);
+
+phys_addr_t img_mem_get_dev_addr(struct mem_ctx *mem_ctx, int buf_id,
+		phys_addr_t addr)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct heap *heap;
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&mem_ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return addr;
+	}
+	heap = buffer->heap;
+	if (heap->to_dev_addr)
+		addr = heap->to_dev_addr(&heap->options, addr);
+
+	mutex_unlock(&mem_man->mutex);
+	return addr;
+}
+EXPORT_SYMBOL(img_mem_get_dev_addr);
+
+int img_mmu_init_cache(struct mmu_ctx *mmu_ctx,	unsigned long cache_phys_start,
+		uint32_t cache_size)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(mmu_ctx->device);
+
+	mutex_lock(&mem_man->mutex);
+
+	mmu_ctx->cache_phys_start = cache_phys_start;
+	mmu_ctx->cache_size = cache_size;
+
+	if (img_pdump_enabled(pdump) && cache_size && !mem_man->cache_usage) {
+		__img_pdump_printf(mmu_ctx->device, "-- Allocating img mem cache buffer size:%u\n", cache_size);
+		__img_pdump_printf(mmu_ctx->device, "CALLOC :OCM:BLOCK_CACHE %#x %#zx 0x0\n",
+			cache_size, IMGMMU_GET_MAX_PAGE_SIZE());
+	}
+
+	mem_man->cache_usage++;
+
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_init_cache);
+
+int img_mmu_clear_cache(struct mmu_ctx *mmu_ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(mmu_ctx->device);
+
+	mutex_lock(&mem_man->mutex);
+
+	if (mem_man->cache_usage)
+		mem_man->cache_usage--;
+
+	if (img_pdump_enabled(pdump) && mmu_ctx->cache_size && !mem_man->cache_usage) {
+		__img_pdump_printf(mmu_ctx->device, "-- Freeing img mem cache buffer size:%u\n",
+				mmu_ctx->cache_size);
+		__img_pdump_printf(mmu_ctx->device, "FREE :OCM:BLOCK_CACHE\n");
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_clear_cache);
+
+int img_mmu_move_pg_to_cache(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		int buf_id, uint64_t virt_addr, uint32_t page_size, uint32_t page_idx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	struct mmu_ctx_mapping *mapping;
+	int ret = -EINVAL;
+
+	if (page_size != imgmmu_get_page_size()) {
+		pr_err("%s: page sizes does not match!\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!mmu_ctx->mmu_cat) {
+		pr_err("%s: trying to move pages with mmu disabled!\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&mem_ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	if (buffer->actual_size <= page_idx * imgmmu_get_page_size()) {
+		pr_err("%s: trying to remap out of the buffer boundaries!\n", __func__);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(mapping, &buffer->mappings, buffer_entry) {
+		if (mapping->virt_addr == virt_addr) {
+			if (mapping->cache_offset + imgmmu_get_page_size() <= mmu_ctx->cache_size) {
+				__img_pdump_printf(buffer->device, "-- Move page to CACHE\n");
+				ret = imgmmu_cat_override_phys_addr(mmu_ctx->mmu_cat,
+					mapping->virt_addr + page_idx * imgmmu_get_page_size(),
+					mmu_ctx->cache_phys_start + mapping->cache_offset);
+
+				mapping->cache_offset += imgmmu_get_page_size();
+			}
+			break;
+		}
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_mmu_move_pg_to_cache);
+
+static void _img_mem_sync_cpu_to_device(struct buffer *buffer, bool force)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap = buffer->heap;
+
+	if (!cache_sync) {
+		pr_debug("%s:%d buffer %d size %zu cache synchronization disabled!\n",
+			__func__, __LINE__, buffer->id, buffer->actual_size);
+		return;
+	}
+
+	pr_debug("%s:%d buffer %d size %zu kptr %p cache(%d:%d)\n",
+			__func__, __LINE__, buffer->id, buffer->actual_size,
+			buffer->kptr, force, heap->cache_sync);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops && heap->ops->sync_cpu_to_dev &&
+			(force || heap->cache_sync))
+		heap->ops->sync_cpu_to_dev(heap, buffer);
+
+#ifdef CONFIG_ARM
+	dmb();
+#else
+	/* Put memory barrier */
+	mb();
+#endif
+}
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mem_sync_cpu_to_device(buffer, false);
+
+	mutex_unlock(&mem_man->mutex);
+	return 0;
+}
+EXPORT_SYMBOL(img_mem_sync_cpu_to_device);
+
+static void _img_mem_sync_device_to_cpu(struct buffer *buffer, bool force)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap = buffer->heap;
+
+	if (!cache_sync) {
+		pr_debug("%s:%d buffer %d size %zu cache synchronization disabled!\n",
+			__func__, __LINE__, buffer->id, buffer->actual_size);
+		return;
+	}
+
+	pr_debug("%s:%d buffer %d size %zu kptr %p cache(%d:%d)\n",
+			__func__, __LINE__, buffer->id, buffer->actual_size,
+			buffer->kptr, force, heap->cache_sync);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (heap->ops && heap->ops->sync_dev_to_cpu &&
+			(force || heap->cache_sync))
+		heap->ops->sync_dev_to_cpu(heap, buffer);
+}
+
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mem_sync_device_to_cpu(buffer, false);
+
+	mutex_unlock(&mem_man->mutex);
+	return 0;
+}
+EXPORT_SYMBOL(img_mem_sync_device_to_cpu);
+
+int img_mem_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mutex_lock(&mem_man->mutex);
+	if (max)
+		*max = ctx->mem_usage_max;
+	if (curr)
+		*curr = ctx->mem_usage_curr;
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mem_get_usage);
+
+int img_mmu_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mutex_lock(&mem_man->mutex);
+	if (max)
+		*max = ctx->mmu_usage_max;
+	if (curr)
+		*curr = ctx->mmu_usage_curr;
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_get_usage);
+
+static int  img_mmu_cache_get_offset(struct mem_ctx *mem_ctx,
+	unsigned long addr, unsigned int *offset)
+{
+	struct mmu_ctx *mmu_ctx;
+
+	list_for_each_entry(mmu_ctx, &mem_ctx->mmu_ctxs, mem_ctx_entry) {
+		if (addr >= mmu_ctx->cache_phys_start &&
+				addr < mmu_ctx->cache_phys_start + mmu_ctx->cache_size) {
+			*offset = addr - mmu_ctx->cache_phys_start;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/*
+ * related to stream MMU context (contains IMGMMU functionality in general)
+ */
+
+static int imgmmu_find_buffer(struct mem_ctx *ctx, uint64_t addr,
+						int *buffer_id, unsigned int *buffer_offset)
+{
+	struct heap *heap;
+	struct buffer *buffer;
+	int buf_id;
+	unsigned int buf_offset;
+	int ret;
+
+	for (buf_id = *buffer_id;
+			((buffer) = idr_get_next(&ctx->buffers, &buf_id)) != NULL; ++buf_id) {
+		heap = buffer->heap;
+		if (heap->ops && heap->ops->get_sg_table) {
+			struct sg_table *sgt;
+			struct scatterlist *sgl;
+			bool use_sg_dma = false;
+
+			ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
+			if (ret) {
+				pr_err("%s: heap %d buffer %d no sg_table!\n",
+							__func__, heap->id, buffer->id);
+				return -EINVAL;
+			}
+
+			if (buffer->pcache.last_sgl) {
+				sgl = buffer->pcache.last_sgl;
+				buf_offset = buffer->pcache.last_offset;
+			} else {
+				sgl = sgt->sgl;
+				buf_offset = 0;
+			}
+
+			while (sgl) {
+				phys_addr_t phys = use_sg_dma ?
+						sg_dma_address(sgl) : sg_phys(sgl);
+				unsigned int len = use_sg_dma ?
+						sg_dma_len(sgl) : sgl->length;
+#if 0
+					pr_err("%s: phys %llx len:%d addr:%llx\n",
+							__func__, phys,  len, addr);
+#endif
+				if (phys == addr) {
+#if 0
+					pr_err("%s: match @addr:%llx buf:%d offs:%d len:%d\n",
+						__func__, addr, buffer->id, buf_offset, len);
+#endif
+					*buffer_id = buffer->id;
+					*buffer_offset = buf_offset;
+					return 0;
+				}
+				buffer->pcache.last_offset = buf_offset += len;
+				buffer->pcache.last_sgl = sgl = sg_next(sgl);
+			}
+		} else if (heap->ops && heap->ops->get_page_array) {
+			uint64_t *addrs;
+			int page_idx;
+
+			ret = heap->ops->get_page_array(heap, buffer, &addrs);
+			if (ret) {
+				pr_err("%s: heap %d buffer %d no page_array!\n",
+							__func__, heap->id, buffer->id);
+				return -EINVAL;
+			}
+
+			if (buffer->pcache.last_sgl) {
+				page_idx = buffer->pcache.last_idx;
+				buf_offset = buffer->pcache.last_offset;
+			} else {
+				page_idx = 0;
+				buf_offset = 0;
+			}
+
+			while (buf_offset < buffer->actual_size) {
+				if (addrs[page_idx] == addr) {
+					*buffer_id = buffer->id;
+					*buffer_offset = buf_offset;
+					return 0;
+				}
+				buffer->pcache.last_idx = page_idx++;
+				buffer->pcache.last_offset = buf_offset += PAGE_SIZE;
+			}
+		} else {
+			pr_err("%s: heap %d buffer %d no phys addrs found!\n",
+						__func__, heap->id, buffer->id);
+			return -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static struct imgmmu_page *_page_alloc(void *arg, unsigned char type)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx *mmu_ctx = arg;
+	struct mmu_page *page;
+	struct buffer *buffer;
+	struct heap *heap;
+	int ret;
+
+	pr_debug("%s:%d arg %p\n", __func__, __LINE__, arg);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	page = kzalloc(sizeof(struct mmu_page), GFP_KERNEL);
+	if (!page)
+		return NULL;
+
+	__img_pdump_printf(mmu_ctx->device, "-- Allocating MMU page for %s\n",
+			type == IMGMMU_PTYPE_PC ? "PC" :
+			type == IMGMMU_PTYPE_PD ? "PD" :
+			type == IMGMMU_PTYPE_PT ? "PT" :
+			"???");
+	ret = _img_mem_alloc(mmu_ctx->device, mmu_ctx->mem_ctx, mmu_ctx->heap,
+					imgmmu_get_cpu_page_size(), mmu_ctx->config.alloc_attr, &buffer);
+	if (ret) {
+		pr_err("%s: img_mem_alloc failed (%d)\n", __func__, ret);
+		goto free_page;
+	}
+
+	ret = _img_mem_map_km(buffer);
+	if (ret) {
+		pr_err("%s: img_mem_map_km failed (%d)\n", __func__, ret);
+		goto free_buffer;
+	}
+
+	WARN_ON(!type);
+	page->type = type;
+	page->buffer = buffer;
+	page->page.cpu_addr = (uintptr_t)buffer->kptr;
+	memset((void *)page->page.cpu_addr, 0, imgmmu_get_cpu_page_size());
+	if (type == IMGMMU_PTYPE_PT)
+		page->use_parity = mmu_ctx->config.use_pte_parity;
+
+	heap = buffer->heap;
+	if (heap->ops && heap->ops->get_sg_table) {
+		struct sg_table *sgt;
+		bool use_sg_dma = false;
+
+		ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no sg_table!\n",
+						__func__, heap->id, buffer->id);
+			ret = -EINVAL;
+			goto free_buffer;
+		}
+
+		if (use_sg_dma)
+			page->page.phys_addr = sg_dma_address(sgt->sgl);
+		else
+			page->page.phys_addr = sg_phys(sgt->sgl);
+	} else if (heap->ops && heap->ops->get_page_array) {
+		uint64_t *addrs;
+
+		ret = heap->ops->get_page_array(heap, buffer, &addrs);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no page array!\n",
+						__func__, heap->id, buffer->id);
+			ret = -EINVAL;
+			goto free_buffer;
+		}
+		page->page.phys_addr = *addrs; /* we allocated a single page */
+	} else {
+		pr_err("%s: heap %d buffer %d no get_sg or get_page_array!\n",
+					__func__, heap->id, buffer->id);
+		ret = -EINVAL;
+		goto free_buffer;
+	}
+
+	mmu_ctx->mem_ctx->mmu_usage_curr += buffer->actual_size;
+	if (mmu_ctx->mem_ctx->mmu_usage_curr > mmu_ctx->mem_ctx->mmu_usage_max)
+		mmu_ctx->mem_ctx->mmu_usage_max = mmu_ctx->mem_ctx->mmu_usage_curr;
+
+	pr_debug("%s:%d virt addr %#lx type:%d\n", __func__, __LINE__,
+		page->page.cpu_addr, type);
+	pr_debug("%s:%d phys addr %#llx\n", __func__, __LINE__,
+		page->page.phys_addr);
+	return &page->page;
+
+free_buffer:
+	_img_mem_free(buffer);
+free_page:
+	kfree(page);
+	return NULL;
+}
+
+static void _page_free(struct imgmmu_page *arg)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_page *page;
+
+	page = container_of(arg, struct mmu_page, page);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	pr_debug("%s:%d buffer %u\n", __func__, __LINE__, page->buffer->id);
+
+	if (page->buffer->mem_ctx->mmu_usage_curr >= page->buffer->actual_size)
+		page->buffer->mem_ctx->mmu_usage_curr -= page->buffer->actual_size;
+	else
+		WARN_ON(1);
+
+	_img_mem_free(page->buffer);
+	kfree(page);
+}
+
+static inline void __pdump_apply_parity(struct device* dev, uint64_t virt,
+		const char *block, unsigned int offset)
+{
+	uint8_t bits;
+	/* XOR 32 bit pair <paddr & vaadr> */
+	__img_pdump_printf(dev,
+			"WRW "_PMEM_":$1 %#llx -- Calculate parity bit\n"
+			"WRW "_PMEM_":$2 %s:%#x\n"
+			"SHR "_PMEM_":$2 "_PMEM_":$2 %d\n"
+			"XOR "_PMEM_":$1 "_PMEM_":$1 "_PMEM_":$2\n",
+			virt >> MIN_PAGE_SIZE_BITS, block, offset, MIN_PAGE_SIZE_BITS);
+	for (bits = 16; bits >= 1; bits>>=1)
+		/* XOR 'bits' pair of previous result */
+		__img_pdump_printf(dev,
+				"AND "_PMEM_":$2 "_PMEM_":$1 %#x\n"
+				"SHR "_PMEM_":$1 "_PMEM_":$1 %d\n"
+				"XOR "_PMEM_":$1 "_PMEM_":$1 "_PMEM_":$2\n",
+				(1<<bits)-1, bits);
+	/* Apply parity bit */
+	__img_pdump_printf(dev,
+			"SHL "_PMEM_":$1 "_PMEM_":$1 %d\n"
+			"OR  "_PMEM_":$0 "_PMEM_":$0 "_PMEM_":$1 -- Apply parity\n",
+			imgmmu_get_pte_parity_shift());
+}
+
+static void _page_write(struct imgmmu_page *page,
+						unsigned int offset, uint64_t entry,
+						unsigned int flags, void *priv)
+{
+	uint64_t *mem64 = (uint64_t *)page->cpu_addr;
+	uint32_t *mem32 = (uint32_t *)mem64;
+	struct mmu_page *mmu_page;
+	struct heap *heap = NULL;
+	struct buffer *buf = (struct buffer*)priv;
+	struct pdump_descr* pdump;
+	uint32_t entry_shift = 0;
+	uint64_t cache_bits = 0;
+	uint64_t address = entry & IMG_MMU_PHY_ADDR_MASK;
+	uint64_t virt = page->virt_base;
+	uint64_t paddr = entry;
+
+	mmu_page = container_of(page, struct mmu_page, page);
+	virt += ((1<<imgmmu_get_entry_shift(mmu_page->type))) * offset;
+
+	if (mmu_page->type == IMGMMU_PTYPE_PC ||
+		mmu_page->type == IMGMMU_PTYPE_PD)
+		heap = mmu_page->buffer->heap;
+	else {
+		/* PT entries are pointing to buffer which may have been allocated
+		 * using different heap than the one used for mmu allocations */
+		if (buf)
+			heap = buf->heap;
+	}
+
+	mmu_page->bypass_addr_trans = (flags & IMGMMU_BYPASS_ADDR_TRANS ? true : false);
+
+	/* Mask MMU flags */
+	flags &= IMG_MMU_ENTRY_FLAGS_MASK;
+
+	/* skip translation when flags are zero, assuming address is invalid */
+	/* or when page is being remapped to on-chip ram*/
+	if (flags && heap && heap->to_dev_addr &&
+			!mmu_page->bypass_addr_trans)
+		paddr = heap->to_dev_addr(&heap->options, paddr);
+
+	WARN(paddr & ~dma_get_mask(mmu_page->buffer->device),
+			"%s: Physical address is out of dma mask, "\
+			"Set proper dma mask to avoid cache problems, ", __func__);
+
+	WARN(paddr & ~IMG_MMU_PHY_ADDR_MASK,
+		"%s: Physical address exceeds hardware capabilities, "\
+		"MMU mapping will be likely invalid", __func__);
+	paddr &= IMG_MMU_PHY_ADDR_MASK;
+
+	if (trace_physical_pages && flags) {
+		if (mmu_page->type == IMGMMU_PTYPE_PC)
+			pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:PC\n",
+					__func__, buf ? buf->mem_ctx->id : -1, offset, paddr, flags);
+		else
+			pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:%s virt:%llx\n",
+					__func__, buf ? buf->mem_ctx->id : -1, offset, paddr, flags,
+					mmu_page->type == IMGMMU_PTYPE_PD ? "PD" :
+					mmu_page->type == IMGMMU_PTYPE_PT ? "PT" :
+					"???", virt);
+	}
+
+	if (mmu_page->type == IMGMMU_PTYPE_PC) {
+		/* Offset of PD physical base address(12)-4(PCE flags) */
+		entry_shift = 8;
+		/* This is 32 bit entry */
+		mem32[offset] = (paddr >> entry_shift) | flags;
+	} else if (mmu_page->type == IMGMMU_PTYPE_PD) {
+		/* Offset of PT physical base address(12)-12(PDE flags) */
+		entry_shift = 0;
+		/* This is 64 bit entry */
+		mem64[offset] = paddr | flags;
+	} else if (mmu_page->type == IMGMMU_PTYPE_PT) {
+		/* Offset of page physical base address(12)-12(PTE flags) */
+		entry_shift = 0;
+		cache_bits = imgmmu_get_pte_cache_bits(entry);
+		/* This is 64 bit entry */
+		mem64[offset] = cache_bits | paddr | flags;
+		if (flags && mmu_page->use_parity) {
+			uint64_t par_pair = (virt >> MIN_PAGE_SIZE_BITS) |
+					((paddr >> MIN_PAGE_SIZE_BITS) << (sizeof(uint32_t)*8));
+			bool par_bit = img_mem_calc_parity(par_pair);
+			if (par_bit)
+				imgmmu_set_pte_parity(&mem64[offset]);
+
+			if (trace_physical_pages)
+				pr_info("%s: [%llx]: %s\n", __func__, mem64[offset],
+						par_bit ? "odd parity" : "even parity");
+		}
+	}
+
+	pdump = vha_pdump_dev_get_drvdata(mmu_page->buffer->device);
+	if (img_pdump_enabled(pdump) && flags) {
+		/* skip when flags are zero, assuming address is invalid */
+		int buffer_id = 0;
+		unsigned int buffer_offset = 0;
+		int ret;
+
+		if (mmu_page->bypass_addr_trans) {
+			ret = img_mmu_cache_get_offset(mmu_page->buffer->mem_ctx, address,
+				&buffer_offset);
+			if (ret) {
+				pr_info("PDUMP: Can't find in cache %#llx\n", address);
+			} else {
+				/* Cache addresses are only applicable for PT entries */
+				WARN_ON(mmu_page->type != IMGMMU_PTYPE_PT);
+				__img_pdump_printf(mmu_page->buffer->device,
+						"WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:%#x\n"
+						"OR  "_PMEM_":$0 "_PMEM_":$0 %d\n",
+						buffer_offset, flags);
+
+				if (mmu_page->use_parity) {
+					const char block[] = ":OCM:BLOCK_CACHE";
+					__pdump_apply_parity(mmu_page->buffer->device, virt, block, buffer_offset);
+				}
+
+				if (cache_bits)
+					__img_pdump_printf(mmu_page->buffer->device,
+						"OR  "_PMEM_":$0 "_PMEM_":$0 %#llx\n",
+						cache_bits);
+				__img_pdump_printf(mmu_page->buffer->device,
+					"WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PTE\n",
+					mmu_page->buffer->id, offset * sizeof(*mem64));
+			}
+		} else {
+			if (mmu_page->type == IMGMMU_PTYPE_PT && buf)
+				buffer_id = buf->id;
+
+			ret = imgmmu_find_buffer(mmu_page->buffer->mem_ctx, address,
+						&buffer_id, &buffer_offset);
+			if (ret) {
+				pr_info("PDUMP: Can't find %#llx\n", address);
+			} else if (mmu_page->type == IMGMMU_PTYPE_PC) {
+				__img_pdump_printf(mmu_page->buffer->device,
+					"WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:%#x\n"
+					"SHR "_PMEM_":$0 "_PMEM_":$0 %d\n"
+					"OR  "_PMEM_":$0 "_PMEM_":$0 %d\n"
+					"WRW "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PCE\n",
+					buffer_id, buffer_offset,
+					entry_shift, flags,
+					mmu_page->buffer->id, offset * sizeof(*mem32));
+			} else {
+				if (mmu_page->type == IMGMMU_PTYPE_PD) {
+					__img_pdump_printf(mmu_page->buffer->device,
+						"WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:%#x\n"
+						"OR  "_PMEM_":$0 "_PMEM_":$0 %d\n"
+						"WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PDE\n",
+						buffer_id, buffer_offset, flags,
+						mmu_page->buffer->id, offset * sizeof(*mem64));
+				} else if (mmu_page->type == IMGMMU_PTYPE_PT) {
+					char block[25];
+
+					if (heap->type == IMG_MEM_HEAP_TYPE_OCM)
+						snprintf(block, sizeof(block), ":OCM:BLOCK_CACHE");
+					else
+						snprintf(block, sizeof(block), ""_PMEM_":BLOCK_%d",
+								buffer_id);
+
+					__img_pdump_printf(mmu_page->buffer->device,
+						"WRW "_PMEM_":$0 %s:%#x\n"
+						"OR  "_PMEM_":$0 "_PMEM_":$0 %d\n",
+						block, buffer_offset, flags);
+
+					if (mmu_page->use_parity)
+						__pdump_apply_parity(mmu_page->buffer->device, virt, block, buffer_offset);
+
+					if (cache_bits)
+						__img_pdump_printf(mmu_page->buffer->device,
+							"OR  "_PMEM_":$0 "_PMEM_":$0 %#llx\n",
+							cache_bits);
+					__img_pdump_printf(mmu_page->buffer->device,
+						"WRW64 "_PMEM_":BLOCK_%d:%#zx "_PMEM_":$0 -- PTE\n",
+						mmu_page->buffer->id, offset * sizeof(*mem64));
+				}
+			}
+		}
+	}
+}
+
+static uint64_t _page_read(struct imgmmu_page *page,
+					unsigned int offset, void *priv,
+					unsigned int *flags)
+{
+	uint64_t *mem64 = (uint64_t *)page->cpu_addr;
+	uint32_t *mem32 = (uint32_t *)mem64;
+	struct mmu_page *mmu_page;
+	struct heap *heap = NULL;
+	uint32_t entry_shift = 0;
+	uint64_t entry = 0;
+	uint64_t paddr;
+	uint64_t virt = page->virt_base;
+	struct buffer *buf = (struct buffer*)priv;
+
+	mmu_page = container_of(page, struct mmu_page, page);
+	virt += ((1<<imgmmu_get_entry_shift(mmu_page->type))) * offset;
+
+	if (mmu_page->type == IMGMMU_PTYPE_PC ||
+		mmu_page->type == IMGMMU_PTYPE_PD)
+		heap = mmu_page->buffer->heap;
+	else {
+		/* PT entries are pointing to buffer which may have been allocated
+		 * using different heap than the one used for mmu allocations */
+		if (buf)
+			heap = buf->heap;
+	}
+
+	if (mmu_page->type == IMGMMU_PTYPE_PC) {
+		/* Offset of PD physical base address(12)-4(PCE flags) */
+		entry_shift = 8;
+		/* This is 32 bit entry */
+		entry = mem32[offset];
+	} else if (mmu_page->type == IMGMMU_PTYPE_PD) {
+		/* Offset of PT physical base address(12)-12(PDE flags) */
+		entry_shift = 0;
+		/* This is 64 bit entry */
+		entry = mem64[offset];
+	} else if (mmu_page->type == IMGMMU_PTYPE_PT) {
+		/* Offset of page physical base address(12)-12(PTE flags) */
+		entry_shift = 0;
+		/* This is 64 bit entry */
+		entry = mem64[offset];
+	}
+
+	*flags = entry & IMG_MMU_ENTRY_FLAGS_MASK;
+	paddr = entry & ~IMG_MMU_ENTRY_FLAGS_MASK;
+
+	if (mmu_page->type == IMGMMU_PTYPE_PT) {
+		/* Mask parity and special cache bits */
+		paddr &= ~(1ULL<<imgmmu_get_pte_parity_shift());
+		paddr &= ~imgmmu_get_pte_cache_bits(entry);
+	}
+
+	paddr <<= entry_shift;
+
+	/* Check if physical address set in PTE is within correct range */
+	if (paddr & ~IMG_MMU_PHY_ADDR_MASK) {
+		pr_err("%s: mmu page entry (%llx) corruption detected (phys)!\n",
+				__func__, paddr);
+		*flags = IMG_MMU_ENTRY_FLAGS_MASK;
+		goto exit;
+	}
+
+	/* Check parity */
+	if (*flags && mmu_page->type == IMGMMU_PTYPE_PT &&
+			mmu_page->use_parity) {
+		uint64_t par_pair = (virt >> MIN_PAGE_SIZE_BITS) |
+				((paddr >> MIN_PAGE_SIZE_BITS) << (sizeof(uint32_t)*8));
+		bool par_bit = img_mem_calc_parity(par_pair);
+
+		if (trace_physical_pages)
+			pr_info("%s: [%llx]: %s\n", __func__, entry,
+					par_bit ? "odd parity" : "even parity");
+		if ((entry >> imgmmu_get_pte_parity_shift()) != par_bit) {
+			pr_err("%s: mmu page entry (%llx) corruption detected (parity)!\n",
+					__func__, entry);
+			*flags = IMG_MMU_ENTRY_FLAGS_MASK;
+			goto exit;
+		}
+	}
+
+	/* skip translation when flags are zero, assuming address is invalid */
+	if (*flags && heap && heap->to_host_addr &&
+			!mmu_page->bypass_addr_trans)
+		paddr = heap->to_host_addr(&heap->options, paddr);
+
+	/* Check if physical address matches dma mask */
+	if (paddr & ~dma_get_mask(mmu_page->buffer->device)) {
+		pr_err("%s: mmu page entry (%llx) physical address is out of dma mask!\n"
+				"Set proper dma mask to avoid cache problems\n",
+				__func__, paddr);
+		*flags = IMG_MMU_ENTRY_FLAGS_MASK;
+		goto exit;
+	}
+
+	/* Sanity check for MMU flags - different on each level */
+	if (((mmu_page->type == IMGMMU_PTYPE_PC ||
+		mmu_page->type == IMGMMU_PTYPE_PD) &&
+			(*flags & ~IMG_MMU_PTE_FLAG_VALID)) ||
+		(mmu_page->type == IMGMMU_PTYPE_PT &&
+			(*flags & ~(IMG_MMU_PTE_FLAG_VALID|IMG_MMU_PTE_FLAG_READ_ONLY)))) {
+		pr_err("%s: mmu page entry corruption detected (flags)!\n",
+				__func__);
+		*flags = IMG_MMU_ENTRY_FLAGS_MASK;
+	}
+
+exit:
+	if (trace_physical_pages && *flags) {
+		if (mmu_page->type == IMGMMU_PTYPE_PC)
+			pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:PC\n",
+					__func__, buf ? buf->mem_ctx->id : -1, offset, paddr, *flags);
+		else
+			pr_info("%s: sid:%d off %#03x paddr %#016llx flags %#x type:%s virt:%llx\n",
+					__func__, buf ? buf->mem_ctx->id : -1, offset, paddr, *flags,
+					mmu_page->type == IMGMMU_PTYPE_PD ? "PD" :
+					mmu_page->type == IMGMMU_PTYPE_PT ? "PT" :
+					"???", virt);
+	}
+
+	return paddr;
+}
+
+static void _update_page(struct imgmmu_page *arg)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_page *page;
+
+	if (trace_physical_pages)
+		pr_debug("%s\n", __func__);
+
+	page = container_of(arg, struct mmu_page, page);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	_img_mem_sync_cpu_to_device(page->buffer, true);
+}
+
+int img_mmu_ctx_create(struct device *device, const struct mmu_config *config,
+					struct mem_ctx *mem_ctx, int heap_id,
+					int (*callback_fn)(enum img_mmu_callback_type type,
+						int buf_id, void *data),
+					void *callback_data, struct mmu_ctx **mmu_ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	static struct imgmmu_info info = {
+		.page_alloc = _page_alloc,
+		.page_free = _page_free,
+		.page_write = _page_write,
+		.page_read = _page_read,
+		.page_update = _update_page,
+	};
+	struct mmu_ctx *ctx;
+	int res;
+
+	if (((config->addr_width < imgmmu_get_virt_size()) ||
+		(config->addr_width < imgmmu_get_phys_size())) &&
+			!config->bypass_hw) {
+		pr_err("%s: invalid addr_width (%d)!\n",
+					__func__, config->addr_width);
+		return -EINVAL;
+	}
+
+	ctx = kzalloc(sizeof(struct mmu_ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->device = device;
+	ctx->mem_ctx = mem_ctx;
+	memcpy(&ctx->config, config, sizeof(struct mmu_config));
+
+	imgmmu_set_page_size(config->page_size);
+
+	mutex_lock(&mem_man->mutex);
+
+	ctx->heap = idr_find(&mem_man->heaps, heap_id);
+	if (!ctx->heap) {
+		pr_err("%s: invalid heap_id (%d)!\n", __func__, heap_id);
+		mutex_unlock(&mem_man->mutex);
+		kfree(ctx);
+		return -EINVAL;
+	}
+
+	/* Apply offset when needed */
+	if (ctx->heap->ops->set_offset) {
+		if (ctx->heap->ops->set_offset(ctx->heap, config->bypass_offset)) {
+			pr_err("%s: failed to set offset %zu heap_id (%d)!\n",
+					__func__, config->bypass_offset, heap_id);
+			mutex_unlock(&mem_man->mutex);
+			kfree(ctx);
+			return -EINVAL;
+		}
+		pr_debug("%s adding %lx offset bytes to heap %d type %d (%s)\n",
+				__func__, config->bypass_offset, ctx->heap->id,
+				ctx->heap->type, get_heap_name(ctx->heap->type));
+	}
+
+	info.ctx = ctx;
+	/* If we are in bypass mode, do not populate hw structures */
+	if (!config->bypass_hw) {
+		ctx->mmu_cat = imgmmu_cat_create(&info, &res);
+		if (res) {
+			pr_err("%s: catalogue create failed (%d)!\n",
+					__func__, res);
+			mutex_unlock(&mem_man->mutex);
+			kfree(ctx);
+			return -EFAULT;
+		}
+	} else
+		pr_debug("%s imgmmu_cat_create bypass!\n", __func__);
+
+	list_add(&ctx->mem_ctx_entry, &mem_ctx->mmu_ctxs);
+	INIT_LIST_HEAD(&ctx->mappings);
+
+	ctx->callback_fn = callback_fn;
+	ctx->callback_data = callback_data;
+
+	ctx->id = mem_ctx->id;
+
+	*mmu_ctx = ctx;
+
+	mutex_unlock(&mem_man->mutex);
+
+	return ctx->id;
+}
+EXPORT_SYMBOL(img_mmu_ctx_create);
+
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	int res;
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	while (!list_empty(&ctx->mappings)) {
+		struct mmu_ctx_mapping *map;
+
+		map = list_first_entry(&ctx->mappings,
+							struct mmu_ctx_mapping, mmu_ctx_entry);
+		pr_info("%s: found mapped buffer %d (size %zu)\n",
+			__func__, map->buffer->id, map->buffer->request_size);
+		_img_mmu_unmap(map);
+		kfree(map);
+	}
+
+	if (!ctx->config.bypass_hw) {
+		res = imgmmu_cat_destroy(ctx->mmu_cat);
+		if (res)
+			pr_err("imgmmu_cat_destroy failed (%d)!\n", res);
+	} else
+		pr_debug("%s imgmmu_cat_destroy bypass!\n", __func__);
+
+	list_del(&ctx->mem_ctx_entry);
+}
+
+void img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mutex_lock(&mem_man->mutex);
+	_img_mmu_ctx_destroy(ctx);
+	mutex_unlock(&mem_man->mutex);
+
+	kfree(ctx);
+}
+EXPORT_SYMBOL(img_mmu_ctx_destroy);
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id,
+		uint64_t virt_addr, unsigned int map_flags)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx_mapping *mapping;
+	struct imgmmu_halloc heap_alloc;
+	struct buffer *buffer;
+	struct heap *heap;
+	int res = 0;
+	int ret;
+
+	pr_debug("%s buffer %d virt_addr %#llx\n",
+			__func__, buf_id, virt_addr);
+
+	mapping = kzalloc(sizeof(struct mmu_ctx_mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+
+	mutex_lock(&mem_man->mutex);
+	buffer = idr_find(&mem_ctx->buffers, buf_id);
+	if (!buffer) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		ret = -EINVAL;
+		goto error;
+	}
+	pr_debug("%s buffer %d 0x%p size %zu virt_addr %#llx\n", __func__,
+		buf_id, buffer, buffer->request_size, virt_addr);
+	/* Store MMU mapping flags */
+	buffer->map_flags = map_flags;
+
+	heap_alloc.vaddr = virt_addr;
+	heap_alloc.size = buffer->actual_size;
+
+	mapping->mmu_ctx = mmu_ctx;
+	mapping->buffer = buffer;
+	mapping->virt_addr = virt_addr;
+
+	if (!mmu_ctx->config.bypass_hw)
+		__img_pdump_printf(buffer->device, "-- Mapping "_PMEM_":BLOCK_%d @ 0x%llx\n",
+				buf_id, virt_addr);
+
+	heap = buffer->heap;
+	if (heap->ops && heap->ops->get_sg_table) {
+		struct sg_table *sgt;
+		bool use_sg_dma = false;
+
+		ret = heap->ops->get_sg_table(heap, buffer, &sgt, &use_sg_dma);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no sg_table!\n",
+						__func__, heap->id, buffer->id);
+			goto error;
+		}
+
+		if (!mmu_ctx->config.bypass_hw)
+			mapping->map = imgmmu_cat_map_sg(
+						mmu_ctx->mmu_cat,
+						sgt->sgl,
+						use_sg_dma,
+						&heap_alloc,
+						map_flags, buffer,
+						&res);
+		else
+			pr_debug("%s imgmmu_cat_map_sg bypass!\n", __func__);
+	} else if (heap->ops && heap->ops->get_page_array) {
+		uint64_t *addrs;
+
+		ret = heap->ops->get_page_array(heap, buffer, &addrs);
+		if (ret) {
+			pr_err("%s: heap %d buffer %d no page array!\n",
+						__func__, heap->id, buffer->id);
+			goto error;
+		}
+
+		if (!mmu_ctx->config.bypass_hw)
+			mapping->map = imgmmu_cat_map_arr(
+						mmu_ctx->mmu_cat,
+						addrs,
+						&heap_alloc,
+						map_flags, buffer,
+						&res);
+		else
+			pr_debug("%s imgmmu_cat_map_arr bypass!\n", __func__);
+	} else {
+		pr_err("%s: heap %d buffer %d no get_sg or get_page_array!\n",
+					__func__, heap->id, buffer->id);
+		ret = -EINVAL;
+		goto error;
+	}
+	if (res) {
+		pr_err("imgmmu_cat_map failed (%d)!\n", res);
+		ret = -EFAULT;
+		goto error;
+	}
+
+	list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
+	list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
+
+	if (mmu_ctx->callback_fn && !mmu_ctx->config.bypass_hw) {
+		ret = mmu_ctx->callback_fn(IMG_MMU_CALLBACK_MAP, buffer->id,
+						mmu_ctx->callback_data);
+		if (ret) {
+			pr_err("%s: imgmmu map callback failed!\n", __func__);
+		}
+	}
+	mutex_unlock(&mem_man->mutex);
+	return ret;
+
+error:
+	mutex_unlock(&mem_man->mutex);
+	kfree(mapping);
+	return ret;
+}
+EXPORT_SYMBOL(img_mmu_map);
+
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx *ctx = mapping->mmu_ctx;
+	int res;
+
+	pr_debug("%s:%d unmapping %p buffer %d\n",
+		__func__, __LINE__, mapping, mapping->buffer->id);
+
+	WARN_ON(!mutex_is_locked(&mem_man->mutex));
+
+	if (!ctx->config.bypass_hw) {
+		res = imgmmu_cat_unmap(mapping->map);
+		if (res)
+			pr_warn("imgmmu_cat_unmap failed (%d)!\n", res);
+	} else
+		pr_debug("%s imgmmu_cat_unmap bypass!\n", __func__);
+
+	list_del(&mapping->mmu_ctx_entry);
+	list_del(&mapping->buffer_entry);
+
+	if (ctx->callback_fn && !ctx->config.bypass_hw)
+		ctx->callback_fn(IMG_MMU_CALLBACK_UNMAP, mapping->buffer->id,
+				ctx->callback_data);
+}
+
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx_mapping *mapping;
+	struct list_head *lst;
+
+	pr_debug("%s:%d buffer %d\n", __func__, __LINE__, buf_id);
+
+	mutex_lock(&mem_man->mutex);
+
+	mapping = NULL;
+	list_for_each(lst, &mmu_ctx->mappings) {
+		struct mmu_ctx_mapping *m;
+
+		m = list_entry(lst, struct mmu_ctx_mapping, mmu_ctx_entry);
+		if (m->buffer->id == buf_id) {
+			mapping = m;
+			break;
+		}
+	}
+
+	if (!mapping) {
+		pr_err("%s: buffer id %d not found\n", __func__, buf_id);
+		mutex_unlock(&mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mmu_unmap(mapping);
+
+	mutex_unlock(&mem_man->mutex);
+	kfree(mapping);
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_unmap);
+
+int img_mmu_get_conf(size_t *page_size, size_t *virt_size)
+{
+	if (page_size)
+		*page_size = imgmmu_get_page_size();
+	if (virt_size)
+		*virt_size = imgmmu_get_virt_size();
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_get_conf);
+
+int img_mmu_get_pc(const struct mmu_ctx *ctx, unsigned int *pc_reg, int *bufid)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct imgmmu_page *page = NULL;
+	phys_addr_t addr = 0ULL;
+
+	mutex_lock(&mem_man->mutex);
+	*pc_reg = 0;
+
+	if (!ctx->config.bypass_hw) {
+		struct mmu_page *mmu_page;
+
+		page = imgmmu_cat_get_page(ctx->mmu_cat);
+		if (!page) {
+			mutex_unlock(&mem_man->mutex);
+			return -EINVAL;
+		}
+
+		mmu_page = container_of(page, struct mmu_page, page);
+		*bufid = mmu_page->buffer->id;
+
+		addr = page->phys_addr;
+		if (ctx->heap->to_dev_addr)
+			addr = ctx->heap->to_dev_addr(
+					&ctx->heap->options,
+					addr);
+
+		/* This is PFN of Page Catalogue phy address */
+		*pc_reg = (unsigned int)(addr >>= IMG_MMU_PC_ADDR_SHIFT);
+
+		pr_debug("%s: addr %#llx pc %#llx bufid %d\n", __func__,
+				page->phys_addr, addr, *bufid);
+	} else
+		pr_debug("%s imgmmu_cat_get_page bypass!\n", __func__);
+
+	mutex_unlock(&mem_man->mutex);
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_get_pc);
+
+phys_addr_t img_mmu_get_paddr(const struct mmu_ctx *ctx,
+				uint64_t vaddr, uint8_t *flags)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	uint64_t entry = 0;
+	phys_addr_t paddr = 0;
+
+	*flags = 0;
+	mutex_lock(&mem_man->mutex);
+
+	entry = imgmmu_cat_get_pte(ctx->mmu_cat, vaddr);
+	if (entry != ~0) {
+		*flags = entry & IMG_MMU_ENTRY_FLAGS_MASK;
+		paddr = entry & ~IMG_MMU_ENTRY_FLAGS_MASK;
+	}
+
+	mutex_unlock(&mem_man->mutex);
+
+	return paddr;
+}
+EXPORT_SYMBOL(img_mmu_get_paddr);
+
+/*
+ * Wrapper functions for virtual address allocator
+ */
+int img_mmu_vaa_create(struct device *device,
+		uint32_t base, size_t size, struct mmu_vaa **vaa)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_vaa *ctx;
+	int ret = 0;
+
+	if (!size)
+		return -EINVAL;
+
+	ctx = kzalloc(sizeof(struct mmu_vaa), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mutex_lock(&mem_man->mutex);
+
+	ctx->heap = imgmmu_hcreate(base, imgmmu_get_page_size(),
+			size, true, &ret);
+	if (ret) {
+		pr_err("%s: imgmmu_hcreate failed (%d)!\n", __func__, ret);
+		kfree(ctx);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	INIT_LIST_HEAD(&ctx->entries);
+	ctx->device = device;
+	*vaa = ctx;
+
+exit:
+	mutex_unlock(&mem_man->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(img_mmu_vaa_create);
+
+int img_mmu_vaa_destroy(struct mmu_vaa *vaa)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	if (!vaa)
+		return -EINVAL;
+
+	mutex_lock(&mem_man->mutex);
+
+	while (!list_empty(&vaa->entries)) {
+		struct vaa_entry *entry;
+
+		entry = list_first_entry(&vaa->entries,
+			struct vaa_entry, mmu_vaa_entry);
+		if (imgmmu_hfree(entry->alloc)) {
+			pr_err("%s: imgmmu_hfree failed!\n",
+				__func__);
+			WARN_ON(1);
+		}
+		list_del(&entry->mmu_vaa_entry);
+		kfree(entry);
+	}
+
+	if (imgmmu_hdestroy(vaa->heap)) {
+		pr_err("%s: imgmmu_hdestroy failed!\n", __func__);
+		/* If some attachments are still active */
+		WARN_ON(1);
+	}
+	kfree(vaa);
+
+	mutex_unlock(&mem_man->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(img_mmu_vaa_destroy);
+
+int img_mmu_vaa_alloc(struct mmu_vaa *vaa, size_t size, uint32_t *addr)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct imgmmu_halloc *alloc;
+	struct vaa_entry *entry;
+	int ret = 0;
+
+	if (!vaa || !addr || !size)
+		return -EINVAL;
+
+	entry = kzalloc(sizeof(struct vaa_entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	mutex_lock(&mem_man->mutex);
+
+	alloc = imgmmu_hallocate(vaa->heap, size, &ret);
+	if (!alloc || ret) {
+		pr_err("%s: imgmmu_hallocate failed (%zu)!\n",
+			__func__, size);
+		kfree(entry);
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	entry->alloc = alloc;
+	list_add(&entry->mmu_vaa_entry, &vaa->entries);
+
+	*addr = alloc->vaddr;
+
+exit:
+	mutex_unlock(&mem_man->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(img_mmu_vaa_alloc);
+
+int img_mmu_vaa_free(struct mmu_vaa *vaa, uint32_t addr, size_t size)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct vaa_entry *entry;
+	int ret = 0;
+
+	if (!vaa || !size)
+		return -EINVAL;
+
+	mutex_lock(&mem_man->mutex);
+
+	entry = list_first_entry(&vaa->entries,
+		struct vaa_entry, mmu_vaa_entry);
+	while (!entry) {
+		if (entry->alloc->vaddr == addr &&
+			entry->alloc->size == size)
+			break;
+		/* advance */
+		entry = list_next_entry(entry, mmu_vaa_entry);
+	}
+
+	if (!entry) {
+		pr_err("%s: allocation not found (0x%x:%zu)!\n",
+				__func__, addr, size);
+		ret = -EINVAL;
+		goto exit;
+	}
+	if (imgmmu_hfree(entry->alloc)) {
+		pr_err("%s: imgmmu_hfree failed (0x%x:%zu)!\n",
+			__func__, addr, size);
+		ret = -EFAULT;
+		goto exit;
+	}
+	list_del(&entry->mmu_vaa_entry);
+	kfree(entry);
+
+exit:
+	mutex_unlock(&mem_man->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(img_mmu_vaa_free);
+
+// Parity look-up table for 8bits
+static unsigned int _parity_lut_[256] =
+			{0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			0, 1, 1, 0, 1, 0, 0, 1,
+			1, 0, 0, 1, 0, 1, 1, 0};
+
+bool img_mem_calc_parity(unsigned long long input)
+{
+	// Split by half as number is considered to be of 64 bits
+	int bits;
+
+	// Dividing the number into 8-bit chunks while performing X-OR
+	for (bits = 32; bits >= 8; bits>>=1)
+		input = input ^ (input >> bits);
+
+	return _parity_lut_[input & 0xff] ? true : false;
+}
+EXPORT_SYMBOL(img_mem_calc_parity);
+
+/*
+ * Initialisation
+ */
+static int __init img_mem_init(void)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	idr_init(&mem_man->heaps);
+	idr_init(&mem_man->mem_ctxs);
+	mutex_init(&mem_man->mutex);
+	mem_man->cache_usage = 0;
+
+	return 0;
+}
+
+static void __exit img_mem_exit(void)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	struct mem_ctx *ctx;
+	int heap_id;
+	int ctx_id;
+
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	/* keeps mutex checks (WARN_ON) happy, this will never actually wait */
+	mutex_lock(&mem_man->mutex);
+
+	ctx_id = 0;
+	ctx = idr_get_next(&mem_man->mem_ctxs, &ctx_id);
+	while (ctx) {
+		pr_warn("%s derelict memory context %p!\n", __func__, ctx);
+		_img_mem_destroy_proc_ctx(ctx);
+		kfree(ctx);
+		ctx_id = 0;
+		ctx = idr_get_next(&mem_man->mem_ctxs, &ctx_id);
+	}
+
+	heap_id = IMG_MEM_MAN_MIN_HEAP;
+	heap = idr_get_next(&mem_man->heaps, &heap_id);
+	while (heap) {
+		pr_warn("%s derelict heap %d!\n", __func__, heap_id);
+		_img_mem_del_heap(heap);
+		kfree(heap);
+		heap_id = IMG_MEM_MAN_MIN_HEAP;
+		heap = idr_get_next(&mem_man->heaps, &heap_id);
+	}
+	idr_destroy(&mem_man->heaps);
+	idr_destroy(&mem_man->mem_ctxs);
+
+	mutex_unlock(&mem_man->mutex);
+
+	mutex_destroy(&mem_man->mutex);
+}
+
+module_init(img_mem_init);
+module_exit(img_mem_exit);
+
+MODULE_LICENSE("GPL");
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 213 - 0
driver/img_mem/img_mem_man_priv.h

@@ -0,0 +1,213 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_man_priv.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef IMG_MEM_MAN_PRIV_H
+#define IMG_MEM_MAN_PRIV_H
+
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+
+#include <img_mem_man.h>
+
+/* Memory context : one per process */
+struct mem_ctx {
+	unsigned id;
+	struct idr buffers;
+	struct list_head mmu_ctxs;
+	/* Used to track memory usage for all buffers and
+	 * separately for MMU page tables only */
+	size_t mem_usage_max;
+	size_t mem_usage_curr;
+	size_t mmu_usage_max;
+	size_t mmu_usage_curr;
+};
+
+/* An MMU mapping of a buffer */
+struct mmu_ctx_mapping {
+	struct mmu_ctx *mmu_ctx;
+	struct buffer *buffer;
+	struct imgmmu_map *map;
+	uint64_t virt_addr;
+	uint64_t cache_offset;
+	struct list_head mmu_ctx_entry; /* Entry in <mmu_ctx:mappings> */
+	struct list_head buffer_entry; /* Entry in <buffer:mappings> */
+};
+
+/* mmu context : one per session */
+struct mmu_ctx {
+	unsigned id;
+	struct device *device;
+	struct mmu_config config;
+	struct mem_ctx *mem_ctx; /* for memory allocations */
+	struct heap *heap; /* for memory allocations */
+	struct imgmmu_cat *mmu_cat;
+	struct list_head mappings; /* contains <struct mmu_ctx_mapping> */
+	struct list_head mem_ctx_entry; /* Entry in <mem_ctx:mmu_ctxs> */
+	int (*callback_fn)(enum img_mmu_callback_type type, int buf_id,
+					void *data);
+	void *callback_data;
+	unsigned long cache_phys_start;
+	uint32_t cache_size;
+};
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+struct buffer_fence {
+	struct dma_fence fence;
+	spinlock_t lock;
+};
+#endif
+
+/* Pdump cache info */
+struct buffer_pcache {
+	unsigned int last_offset;
+	struct scatterlist *last_sgl;
+	int last_idx;
+};
+
+/* buffer : valid in the context of a mem_ctx */
+struct buffer {
+	int id; /* Generated in <mem_ctx:buffers> */
+	size_t request_size;
+	size_t actual_size;
+	struct device *device;
+	struct mem_ctx *mem_ctx;
+	struct heap *heap;
+	struct list_head mappings; /* contains <struct mmu_ctx_mapping> */
+	void *kptr;
+	void *priv;
+	unsigned map_flags;
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+	struct buffer_fence *fence;
+#endif
+	struct buffer_pcache pcache;
+};
+
+/* vaa_entry : represents single entry in mmu_vaa */
+struct vaa_entry {
+	struct imgmmu_halloc *alloc;
+	struct list_head mmu_vaa_entry; /* links to mmu_vaa */
+};
+
+/* mmu vaa : one per session */
+struct mmu_vaa {
+	struct device *device;
+	struct imgmmu_heap *heap;
+	struct list_head entries; /* contains <struct vaa_entry> */
+};
+
+struct heap_ops {
+	int (*alloc)(struct device *device, struct heap *heap,
+				 size_t size, enum img_mem_attr attr,
+				 struct buffer *buffer);
+	int (*import)(struct device *device, struct heap *heap,
+					size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
+					struct buffer *buffer);
+	int (*export)(struct device *device, struct heap *heap,
+					size_t size, enum img_mem_attr attr, struct buffer *buffer,
+					uint64_t *buf_hnd);
+	void (*free)(struct heap *heap, struct buffer *buffer);
+	int (*map_um)(struct heap *heap, struct buffer *buffer,
+					struct vm_area_struct *vma);
+	int (*unmap_um)(struct heap *heap, struct buffer *buffer);
+	int (*map_km)(struct heap *heap, struct buffer *buffer);
+	int (*unmap_km)(struct heap *heap, struct buffer *buffer);
+	int (*get_sg_table)(struct heap *heap, struct buffer *buffer,
+					struct sg_table **sg_table, bool *use_sg_dma);
+	int (*get_page_array)(struct heap *heap, struct buffer *buffer,
+						uint64_t **addrs);
+	void (*sync_cpu_to_dev)(struct heap *heap, struct buffer *buffer);
+	void (*sync_dev_to_cpu)(struct heap *heap, struct buffer *buffer);
+	int (*set_offset)(struct heap *heap, size_t offs);
+	void (*destroy)(struct heap *heap);
+};
+
+struct heap {
+	int id; /* Generated in <mem_man:heaps> */
+	enum img_mem_heap_type type;
+	struct heap_ops *ops;
+	union heap_options options;
+	phys_addr_t (*to_dev_addr)(union heap_options *opts, phys_addr_t addr);
+	phys_addr_t (*to_host_addr)(union heap_options *opts, phys_addr_t addr);
+	bool cache_sync;
+	enum img_mem_attr alt_cache_attr;
+	void *priv;
+};
+
+int img_mem_unified_init(const struct heap_config *config, struct heap *heap);
+int img_mem_coherent_init(const struct heap_config *config, struct heap *heap);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+int img_mem_dmabuf_init(const struct heap_config *config, struct heap *heap);
+#endif
+
+#ifdef CONFIG_ION
+int img_mem_ion_init(const struct heap_config *config, struct heap *heap);
+#endif
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+int img_mem_carveout_init(const struct heap_config *config, struct heap *heap);
+#endif
+
+int img_mem_anonymous_init(const struct heap_config *config, struct heap *heap);
+
+int img_mem_ocm_init(const struct heap_config *config, struct heap *heap);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
+typedef int vm_fault_t;
+#endif
+
+#endif /* IMG_MEM_MAN_PRIV_H */
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 173 - 0
driver/img_mem/img_mem_ocm.c

@@ -0,0 +1,173 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_ocm.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+static int trace_physical_pages;
+
+struct buffer_data {
+	uint64_t *addrs; /* array of physical addresses, upcast to 64-bit */
+	enum img_mem_attr mattr;  /* memory attributes */
+};
+
+static int ocm_heap_alloc(struct device *device, struct heap *heap,
+			size_t size, enum img_mem_attr attr,
+			struct buffer *buffer)
+{	
+	struct buffer_data *buffer_data;
+	phys_addr_t phys_addr;
+	size_t pages, page;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	if (size > heap->options.ocm.size) {
+		pr_err("%s requested size bigger than ocm size !\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	buffer_data = kzalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!buffer_data)
+		return -ENOMEM;
+
+	pages = size / PAGE_SIZE;
+	buffer_data->addrs = kmalloc_array(pages, sizeof(uint64_t), GFP_KERNEL);
+	if (!buffer_data->addrs) {
+		kfree(buffer_data);
+		return -ENOMEM;
+	}
+
+	buffer_data->mattr = attr;
+
+	phys_addr = heap->options.ocm.phys;
+
+	page = 0;
+	while (page < pages) {
+		if (trace_physical_pages)
+			pr_info("%s phys %llx\n",
+				__func__, (unsigned long long)phys_addr);
+		buffer_data->addrs[page++] = phys_addr;
+		phys_addr += PAGE_SIZE;
+	};
+
+	buffer->priv = buffer_data;
+
+	pr_debug("%s buffer %d phys %#llx size %zu attrs %x\n", __func__,
+		buffer->id,
+		(unsigned long long)buffer_data->addrs[0],
+		size,
+		attr);
+	return 0;
+}
+
+static void ocm_heap_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	kfree(buffer_data->addrs);
+	kfree(buffer_data);
+}
+
+static int ocm_heap_get_page_array(struct heap *heap,
+				struct buffer *buffer,
+				uint64_t **addrs)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	*addrs = buffer_data->addrs;
+	return 0;
+}
+
+static void ocm_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops ocm_heap_ops = {
+	.alloc = ocm_heap_alloc,
+	.import = NULL,
+	.free = ocm_heap_free,
+	.map_um = NULL,
+	.unmap_um = NULL,
+	.map_km = NULL,
+	.unmap_km = NULL,
+	.get_sg_table = NULL,
+	.get_page_array = ocm_heap_get_page_array,
+	.sync_cpu_to_dev = NULL,
+	.sync_dev_to_cpu = NULL,
+	.set_offset = NULL,
+	.destroy = ocm_heap_destroy,
+};
+
+int img_mem_ocm_init(const struct heap_config *heap_cfg, struct heap *heap)
+{
+	pr_debug("%s phys:%#llx size:%zu attrs:%#x\n", __func__,
+		 (unsigned long long)heap->options.ocm.phys,
+		 heap->options.ocm.size, heap->options.ocm.hattr);
+
+	heap->ops = &ocm_heap_ops;
+	heap->priv = NULL;
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 1002 - 0
driver/img_mem/img_mem_unified.c

@@ -0,0 +1,1002 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_unified.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/version.h>
+#ifdef CONFIG_X86
+#include <asm/cacheflush.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+#include <linux/set_memory.h>
+#endif
+#endif
+
+#include <img_mem_man.h>
+#include "img_mem_man_priv.h"
+
+static int trace_physical_pages;
+static int trace_mmap_fault;
+
+struct buffer_data {
+	struct sg_table *sgt;
+	enum img_mem_attr mattr;  /* memory attributes */
+	enum dma_data_direction dma_dir;
+	struct vm_area_struct *mapped_vma;
+	/* exporter via dmabuf */
+	struct dma_buf *dma_buf;
+	bool exported;
+};
+
+static void set_page_cache(struct page *page,
+		enum img_mem_attr attr)
+{
+#ifdef CONFIG_X86
+	if (attr & IMG_MEM_ATTR_UNCACHED)
+		set_memory_uc((unsigned long)page_address(page), 1);
+	else if (attr & IMG_MEM_ATTR_WRITECOMBINE)
+		set_memory_wc((unsigned long)page_address(page), 1);
+	else if (attr & IMG_MEM_ATTR_CACHED)
+		set_memory_wb((unsigned long)page_address(page), 1);
+#endif
+}
+
+/*
+ * dmabuf wrapper ops
+ */
+static struct sg_table *unified_map_dmabuf(struct dma_buf_attachment *attach,
+		enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+	struct buffer_data *buffer_data;
+	struct sg_table *sgt;
+	struct scatterlist *src, *dst;
+	int ret, i;
+
+	if (!buffer)
+		return NULL;
+
+	pr_debug("%s:%d client:%p buffer %d (0x%p)\n", __func__, __LINE__,
+			attach->dev, buffer->id, buffer);
+	buffer_data = buffer->priv;
+
+	/* Copy sgt so that we make an independent mapping */
+	sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (sgt == NULL)
+		return NULL;
+
+	ret = sg_alloc_table(sgt, buffer_data->sgt->orig_nents, GFP_KERNEL);
+	if (ret)
+		goto err_free;
+
+	src = buffer_data->sgt->sgl;
+	dst = sgt->sgl;
+	for (i = 0; i < buffer_data->sgt->orig_nents; ++i) {
+		sg_set_page(dst, sg_page(src), src->length, src->offset);
+		dst = sg_next(dst);
+		src = sg_next(src);
+	}
+
+	ret = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	if (ret <= 0) {
+		pr_err("%s dma_map_sg failed!\n", __func__);
+		goto err_free_sgt;
+	}
+	sgt->nents = ret;
+
+	return sgt;
+
+err_free_sgt:
+	sg_free_table(sgt);
+err_free:
+	kfree(sgt);
+	return NULL;
+}
+
+static void unified_unmap_dmabuf(struct dma_buf_attachment *attach,
+					struct sg_table *sgt,
+					enum dma_data_direction dir)
+{
+	struct buffer *buffer = attach->dmabuf->priv;
+
+	pr_debug("%s:%d client:%p buffer %d (0x%p)\n", __func__, __LINE__,
+			attach->dev, buffer ? buffer->id : -1, buffer);
+
+	dma_unmap_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+/* Called when when ref counter reaches zero! */
+static void unified_release_dmabuf(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+
+	if (!buffer)
+		return;
+
+	buffer_data = buffer->priv;
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+			buffer->id, buffer);
+	if (!buffer_data)
+		return;
+
+	buffer_data->exported = false;
+}
+
+static void unified_dma_map(struct buffer *buffer);
+static void unified_dma_unmap(struct buffer *buffer);
+
+static int unified_begin_cpu_access_dmabuf(struct dma_buf *buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+					size_t start, size_t len,
+#endif
+					enum dma_data_direction direction)
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+	struct sg_table *sgt;
+
+	if (!buffer) {
+		/* Buffer may have been released, exit silently */
+		return 0;
+	}
+
+	buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+			buffer->id, buffer);
+
+	buffer_data->dma_dir = direction;
+	unified_dma_map(buffer);
+
+	sgt = buffer_data->sgt;
+	dma_sync_sg_for_cpu(buffer->device, sgt->sgl, sgt->orig_nents,
+						direction);
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+static void unified_end_cpu_access_dmabuf(struct dma_buf *buf,
+					size_t start, size_t len,
+					enum dma_data_direction direction)
+#else
+static int unified_end_cpu_access_dmabuf(struct dma_buf *buf,
+					enum dma_data_direction direction)
+#endif
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+	struct sg_table *sgt;
+
+	if (!buffer) {
+		/* Buffer may have been released, exit silently */
+		return 0;
+	}
+
+	buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+			buffer->id, buffer);
+
+	sgt = buffer_data->sgt;
+	dma_sync_sg_for_device(buffer->device, sgt->sgl, sgt->orig_nents,
+					direction);
+
+	unified_dma_unmap(buffer);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
+	return 0;
+#endif
+	;
+}
+
+/* Called on file descriptor mmap */
+static int unified_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	struct buffer *buffer = buf->priv;
+	struct buffer_data *buffer_data;
+	struct scatterlist *sgl;
+	unsigned long addr;
+
+	if (!buffer)
+		return -EINVAL;
+
+	buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %#lx\n",
+		__func__, __LINE__,
+		vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	sgl = buffer_data->sgt->sgl;
+	addr = vma->vm_start;
+	while (sgl) {
+		dma_addr_t phys = sg_phys(sgl);
+		unsigned long pfn = phys >> PAGE_SHIFT;
+		unsigned int len = sgl->length;
+		int ret;
+
+		if (vma->vm_end < (addr + len)) {
+			unsigned long size = vma->vm_end - addr;
+			pr_debug("%s:%d buffer %d (0x%p) truncating len=%#x to size=%#lx\n",
+				__func__, __LINE__,
+				buffer->id, buffer, len, size);
+			WARN(round_up(size, PAGE_SIZE) != size,
+				"VMA size %#lx not page aligned\n", size);
+			len = size;
+			if (!len) /* VM space is smaller than allocation */
+				break;
+		}
+
+		ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
+		if (ret)
+			return ret;
+
+		addr += len;
+		sgl = sg_next(sgl);
+	}
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+static void *unified_kmap_dmabuf(struct dma_buf *buf, unsigned long page)
+{
+	pr_err("%s not supported\n", __func__);
+	return NULL;
+}
+#endif
+
+static int unified_map_km(struct heap *heap, struct buffer *buffer);
+static int unified_unmap_km(struct heap *heap, struct buffer *buffer);
+
+static void *unified_vmap_dmabuf(struct dma_buf *buf)
+{
+	struct buffer *buffer = buf->priv;
+	struct heap *heap;
+
+	if (!buffer)
+		return NULL;
+
+	heap = buffer->heap;
+
+	if (unified_map_km(heap, buffer))
+		return NULL;
+
+	pr_debug("%s:%d buffer %d kptr 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+
+	return buffer->kptr;
+}
+
+static void unified_vunmap_dmabuf(struct dma_buf *buf, void *kptr)
+{
+	struct buffer *buffer = buf->priv;
+	struct heap *heap;
+
+	if (!buffer)
+		return;
+
+	heap = buffer->heap;
+
+	pr_debug("%s:%d buffer %d kptr 0x%p (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer->kptr, kptr);
+
+	if (buffer->kptr == kptr)
+		unified_unmap_km(heap, buffer);
+}
+
+static const struct dma_buf_ops unified_dmabuf_ops = {
+	.map_dma_buf = unified_map_dmabuf,
+	.unmap_dma_buf = unified_unmap_dmabuf,
+	.release = unified_release_dmabuf,
+	.begin_cpu_access = unified_begin_cpu_access_dmabuf,
+	.end_cpu_access = unified_end_cpu_access_dmabuf,
+	.mmap = unified_mmap_dmabuf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
+	.kmap_atomic = unified_kmap_dmabuf,
+	.kmap = unified_kmap_dmabuf,
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
+	.map_atomic = unified_kmap_dmabuf,
+	.map = unified_kmap_dmabuf,
+#endif
+#endif
+	.vmap = unified_vmap_dmabuf,
+	.vunmap = unified_vunmap_dmabuf,
+};
+
+static int unified_export(struct device *device, struct heap *heap,
+						 size_t size, enum img_mem_attr attr,
+						 struct buffer *buffer, uint64_t* buf_hnd)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct dma_buf *dma_buf;
+	int ret, fd;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+#endif
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!buffer_data)
+		/* Nothing to export ? */
+		return -ENOMEM;
+
+	if (buffer_data->exported) {
+		pr_err("%s: already exported!\n", __func__);
+		return -EBUSY;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+	dma_buf = dma_buf_export(buffer_data, &unified_dmabuf_ops,
+			size, O_RDWR);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
+	dma_buf = dma_buf_export(buffer_data, &unified_dmabuf_ops,
+			size, O_RDWR, NULL);
+#else
+	exp_info.ops = &unified_dmabuf_ops;
+	exp_info.size = size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+	exp_info.resv = NULL;
+	dma_buf = dma_buf_export(&exp_info);
+#endif
+	if (IS_ERR(dma_buf)) {
+		pr_err("%s:dma_buf_export failed\n", __func__);
+		ret = PTR_ERR(dma_buf);
+		return ret;
+	}
+
+	get_dma_buf(dma_buf);
+	fd = dma_buf_fd(dma_buf, 0);
+	if (fd < 0) {
+		pr_err("%s: dma_buf_fd failed\n", __func__);
+		dma_buf_put(dma_buf);
+		return -EFAULT;
+	}
+	buffer_data->dma_buf = dma_buf;
+	buffer_data->exported = true;
+	*buf_hnd = (uint64_t)fd;
+
+	return 0;
+}
+
+static int unified_alloc(struct device *device, struct heap *heap,
+			size_t size, enum img_mem_attr attr,
+			struct buffer *buffer)
+{
+	struct buffer_data *buffer_data;
+	struct sg_table *sgt;
+	struct scatterlist *sgl;
+	struct page *page, *tmp_page;
+	struct list_head pages_list;
+	int pages = 0;
+	int ret;
+	int min_order = heap->options.unified.min_order;
+	int max_order = heap->options.unified.max_order;
+
+	if (min_order == 0)
+		min_order = IMG_MIN_ALLOC_ORDER_DEFAULT;
+
+	if (max_order == 0)
+		max_order = IMG_MAX_ALLOC_ORDER_DEFAULT;
+
+	pr_debug("%s:%d buffer %d (0x%p) size:%zu attr:%x\n", __func__, __LINE__,
+		buffer->id, buffer, size, attr);
+
+	/* Allocations for MMU pages are still 4k so CPU page size is enough */
+	if (attr & IMG_MEM_ATTR_MMU)
+		min_order = get_order(size);
+
+	if (min_order > max_order) {
+		pr_err("min_alloc_order > max_alloc_order !\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&pages_list);
+
+	while((long)size > 0) {
+		int order;
+
+		page = NULL;
+		/* Fit the buffer size starting from the biggest order.
+			 When system already run out of chunks with specific order,
+			 try with lowest available with min_order constraint */
+		for (order = max_order; order >= min_order; order--) {
+			int page_order;
+
+			/* Try to allocate min_order size */
+			if (size < (PAGE_SIZE << order) && (order > min_order))
+				continue;
+
+			page = alloc_pages(heap->options.unified.gfp_type |
+					__GFP_COMP | __GFP_NOWARN, order);
+			if (!page)
+				continue;
+
+			page_order = compound_order(page);
+			if (trace_physical_pages)
+				pr_info("%s:%d phys %#llx size %lu page_address %p order:%d\n",
+					__func__, __LINE__,
+					(unsigned long long)page_to_phys(page),
+					PAGE_SIZE << page_order, page_address(page), page_order);
+
+			/* The below code is just a sanity check
+			 * that dma streaming api is going to work with this device */
+			if (!(attr & IMG_MEM_ATTR_UNCACHED)) {
+				/*
+				 * dma_map_page() is probably going to fail if
+				 * alloc flags are GFP_HIGHMEM, since it is not
+				 * mapped to CPU. Hopefully, this will never happen
+				 * because memory of this sort cannot be used
+				 * for DMA anyway. To check if this is the case,
+				 * build with debug, set trace_physical_pages=1
+				 * and check if page_address printed above is NULL
+				 */
+				dma_addr_t dma_addr = dma_map_page(device,
+						page, 0, PAGE_SIZE << page_order, DMA_BIDIRECTIONAL);
+				if (dma_mapping_error(device, dma_addr)) {
+					__free_page(page);
+					pr_err("%s dma_map_page failed!\n", __func__);
+					ret = -EIO;
+					goto alloc_pages_failed;
+				}
+				dma_unmap_page(device, dma_addr,
+						PAGE_SIZE, DMA_BIDIRECTIONAL);
+			}
+			/* Record the max order taking the info
+			 * from the page we have just found */
+			max_order = page_order;
+			break;
+		}
+
+		if (!page) {
+			pr_err("%s alloc_pages failed!\n", __func__);
+			ret = -ENOMEM;
+			goto alloc_pages_failed;
+		}
+		size -= PAGE_SIZE << max_order;
+
+		/* Split pages back to order 0 ->
+		 * this is required to properly map into UM */
+		if (max_order) {
+			struct page *end = page + (1 << max_order);
+
+			split_page(page, max_order);
+			while (page < end) {
+				list_add_tail(&page->lru, &pages_list);
+				pages++;
+				/* There should not by any mapping attached to the page at this point,
+				 * but clear it just for sanity.
+				 * This is workaround for kernel 4.15 & "splited" pages. */
+				page->mapping = NULL;
+				page++;
+			}
+		} else {
+			list_add_tail(&page->lru, &pages_list);
+			pages++;
+		}
+	}
+
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt) {
+		ret = -ENOMEM;
+		goto alloc_pages_failed;
+	}
+
+	ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+	if (ret)
+		goto sg_alloc_table_failed;
+
+	sgl = sgt->sgl;
+	list_for_each_entry_safe(page, tmp_page, &pages_list, lru) {
+		sg_set_page(sgl, page, PAGE_SIZE, 0);
+		set_page_cache(page, attr);
+		sgl = sg_next(sgl);
+		list_del(&page->lru);
+	}
+
+	pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
+		buffer->id, sgt->orig_nents);
+
+	buffer_data = kzalloc(sizeof(struct buffer_data), GFP_KERNEL);
+	if (!buffer_data) {
+		ret = -ENOMEM;
+		goto alloc_buffer_data_failed;
+	}
+
+	buffer->priv = buffer_data;
+	buffer_data->sgt = sgt;
+	buffer_data->mattr = attr;
+	buffer_data->dma_dir = DMA_NONE;
+	buffer_data->mapped_vma = NULL;
+
+	return 0;
+
+alloc_buffer_data_failed:
+	sg_free_table(sgt);
+sg_alloc_table_failed:
+	kfree(sgt);
+alloc_pages_failed:
+	list_for_each_entry_safe(page, tmp_page, &pages_list, lru) {
+		set_page_cache(page, IMG_MEM_ATTR_CACHED);
+		__free_page(page);
+	}
+	return ret;
+}
+
+static void unified_dma_map(struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	int ret = 0;
+
+	if (buffer_data->dma_dir == DMA_NONE)
+		buffer_data->dma_dir = DMA_BIDIRECTIONAL;
+
+	ret = dma_map_sg(buffer->device, sgt->sgl, sgt->orig_nents,
+			buffer_data->dma_dir);
+	if (ret <= 0) {
+		pr_err("%s dma_map_sg failed!\n", __func__);
+		buffer_data->dma_dir = DMA_NONE;
+		return;
+	}
+	pr_debug("%s:%d buffer %d orig_nents %d nents %d\n", __func__, __LINE__,
+				buffer->id, sgt->orig_nents, ret);
+	sgt->nents = ret;
+}
+
+static void unified_dma_unmap(struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+
+	if (buffer_data->dma_dir == DMA_NONE)
+		return;
+
+	dma_unmap_sg(buffer->device, sgt->sgl,
+			sgt->orig_nents, buffer_data->dma_dir);
+	buffer_data->dma_dir = DMA_NONE;
+
+	pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
+				buffer->id, sgt->orig_nents);
+}
+
+static void unified_free(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	struct scatterlist *sgl;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	/* If user forgot to unmap, free dma mapping anyway */
+	unified_dma_unmap(buffer);
+
+	if (buffer_data->dma_buf) {
+		dma_buf_put(buffer_data->dma_buf);
+		buffer_data->dma_buf->priv = NULL;
+	}
+
+	if (buffer->kptr) {
+		pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
+		vunmap(buffer->kptr);
+	}
+
+	if (buffer_data->mapped_vma)
+		buffer_data->mapped_vma->vm_private_data = NULL;
+
+	sgl = sgt->sgl;
+	while (sgl) {
+		struct page *page = sg_page(sgl);
+		if (page) {
+			set_page_cache(page, IMG_MEM_ATTR_CACHED);
+			__free_page(page);
+		}
+		sgl = sg_next(sgl);
+	}
+	sg_free_table(sgt);
+	kfree(sgt);
+	kfree(buffer_data);
+}
+
+static void unified_mmap_open(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+
+	buffer_data->mapped_vma = vma;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		if (vma->vm_flags & VM_WRITE)
+			buffer_data->dma_dir = DMA_TO_DEVICE;
+		else
+			buffer_data->dma_dir = DMA_FROM_DEVICE;
+
+		unified_dma_map(buffer);
+
+		/* User will read the buffer so invalidate D-cache */
+		if (buffer_data->dma_dir == DMA_FROM_DEVICE)
+			dma_sync_sg_for_cpu(buffer->device,
+					sgt->sgl,
+					sgt->orig_nents,
+					DMA_FROM_DEVICE);
+	}
+}
+
+static void unified_mmap_close(struct vm_area_struct *vma)
+{
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data;
+	struct sg_table *sgt;
+
+	if (!buffer)
+		return;
+
+	buffer_data = buffer->priv;
+	sgt = buffer_data->sgt;
+
+	pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+			__func__, __LINE__, buffer->id, buffer, vma);
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
+		/* User may have written to the buffer so flush D-cache */
+		if (buffer_data->dma_dir == DMA_TO_DEVICE) {
+			dma_sync_sg_for_device(buffer->device,
+					sgt->sgl,
+					sgt->orig_nents,
+					DMA_TO_DEVICE);
+			dma_sync_sg_for_cpu(buffer->device,
+					sgt->sgl,
+					sgt->orig_nents,
+					DMA_FROM_DEVICE);
+		}
+
+		unified_dma_unmap(buffer);
+	}
+
+	buffer_data->mapped_vma = NULL;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+static vm_fault_t unified_mmap_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+#else
+static int unified_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#endif
+	struct buffer *buffer = vma->vm_private_data;
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	struct scatterlist *sgl;
+	struct page *page = NULL;
+	int err;
+        unsigned long addr;
+
+	if (trace_mmap_fault) {
+		pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
+				__func__, __LINE__, buffer->id, buffer, vma);
+		pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
+			__func__, __LINE__,
+			vma->vm_start, vma->vm_end,
+			vma->vm_end - vma->vm_start);
+	}
+
+	sgl = sgt->sgl;
+	addr = vma->vm_start;
+	while (sgl && addr < vma->vm_end) {
+		page = sg_page(sgl);
+		if (!page) {
+			pr_err("%s:%d no page!\n", __func__, __LINE__);
+			return VM_FAULT_SIGBUS;
+		}
+		if (trace_mmap_fault)
+			pr_info("%s:%d vmf addr %lx page_address:%p phys:%#llx\n",
+				__func__, __LINE__, addr, page,
+				(unsigned long long)page_to_phys(page));
+
+		err = vm_insert_page(vma, addr, page);
+		switch (err) {
+		case 0:
+		case -EAGAIN:
+		case -ERESTARTSYS:
+		case -EINTR:
+		case -EBUSY:
+			break; // passthrough
+		case -ENOMEM:
+			return VM_FAULT_OOM;
+		default:
+			return VM_FAULT_SIGBUS;
+		}
+
+		addr += sgl->length;
+		sgl = sg_next(sgl);
+	}
+
+	return VM_FAULT_NOPAGE;
+}
+
+/* vma ops->fault handler is used to track user space mappings
+ * (inspired by other gpu/drm drivers from the kernel source tree)
+ * to properly call dma_sync_* ops when the mapping is destroyed
+ * (when user calls unmap syscall).
+ * vma flags are used to choose a correct dma mapping.
+ * By default use DMA_BIDIRECTONAL mapping type (kernel space only).
+ * The above facts allows us to do automatic cache flushing/invalidation.
+ *
+ * Examples:
+ *  mmap() -> .open -> invalidate buffer cache
+ *  .. read content from buffer
+ *  unmap() -> .close -> do nothing
+ *
+ *  mmap() -> .open -> do nothing
+ *  .. write content to buffer
+ *  unmap() -> .close -> flush buffer cache
+ */
+static struct vm_operations_struct unified_mmap_vm_ops = {
+	.open = unified_mmap_open,
+	.close = unified_mmap_close,
+	.fault = unified_mmap_fault,
+};
+
+static int unified_map_um(struct heap *heap, struct buffer *buffer,
+				struct vm_area_struct *vma)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+	pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
+		__func__, __LINE__,
+		vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
+
+	/* Throw a warning when attempting
+	 * to do dma mapping when already exists */
+	WARN_ON(buffer_data->dma_dir != DMA_NONE);
+
+	/* CACHED by default */
+	if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	vma->vm_ops = &unified_mmap_vm_ops;
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_private_data = buffer;
+	vma->vm_pgoff = 0;
+
+	unified_mmap_open(vma);
+
+	return 0;
+}
+
+static int unified_map_km(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+	struct scatterlist *sgl = sgt->sgl;
+	unsigned int num_pages = sg_nents(sgl);
+	struct page **pages;
+	pgprot_t prot;
+	int i;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (buffer->kptr) {
+		pr_warn("%s called for already mapped buffer %d\n",
+			__func__, buffer->id);
+		return 0;
+	}
+
+	/*
+	 * Use vmalloc to avoid limit with kmalloc
+	 * where max possible allocation is 4MB,
+	 * therefore the limit for the buffer that can be mapped
+	 * 4194304 = number of 4k pages x sizeof(struct page *)
+	 * number of 4k pages = 524288 which represents ~2.1GB.
+	 * */
+	pages = vmalloc(num_pages * sizeof(struct page *));
+	if (!pages) {
+		pr_err("%s failed to allocate memory for pages\n", __func__);
+		return -ENOMEM;
+	}
+
+	prot = PAGE_KERNEL;
+	/* CACHED by default */
+	if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		prot = pgprot_writecombine(prot);
+	else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
+		prot = pgprot_noncached(prot);
+
+	/* Make dma mapping before mapping into kernel */
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
+		unified_dma_map(buffer);
+
+	i = 0;
+	while (sgl) {
+		pages[i++] = sg_page(sgl);
+		sgl = sg_next(sgl);
+	}
+
+	buffer->kptr = vmap(pages, num_pages, VM_MAP, prot);
+	vfree(pages);
+	if (!buffer->kptr) {
+		pr_err("%s vmap failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+
+	return 0;
+}
+
+static int unified_unmap_km(struct heap *heap, struct buffer *buffer)
+{
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!buffer->kptr) {
+		pr_warn("%s called for already unmapped buffer %d\n",
+			__func__, buffer->id);
+		return -EFAULT;
+	}
+
+	unified_dma_unmap(buffer);
+
+	pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
+	vunmap(buffer->kptr);
+	buffer->kptr = NULL;
+
+	return 0;
+}
+
+static int unified_get_sg_table(struct heap *heap, struct buffer *buffer,
+				struct sg_table **sg_table, bool *use_sg_dma)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	if (!buffer_data)
+		return -EINVAL;
+
+	*sg_table = buffer_data->sgt;
+	*use_sg_dma = false;
+	return 0;
+}
+
+static void unified_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED) &&
+			buffer_data->dma_dir != DMA_NONE) {
+		dma_sync_sg_for_device(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_TO_DEVICE);
+		dma_sync_sg_for_cpu(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_FROM_DEVICE);
+	}
+}
+
+static void unified_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+	struct buffer_data *buffer_data = buffer->priv;
+	struct sg_table *sgt = buffer_data->sgt;
+
+	pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED) &&
+			buffer_data->dma_dir != DMA_NONE)
+		dma_sync_sg_for_cpu(buffer->device,
+				sgt->sgl,
+				sgt->orig_nents,
+				DMA_FROM_DEVICE);
+}
+
+static void unified_heap_destroy(struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+}
+
+static struct heap_ops unified_heap_ops = {
+	.export = unified_export,
+	.alloc = unified_alloc,
+	.import = NULL,
+	.free = unified_free,
+	.map_um = unified_map_um,
+	.unmap_um = NULL, /* we are using vma ops to detect unmap event */
+	.map_km = unified_map_km,
+	.unmap_km = unified_unmap_km,
+	.get_sg_table = unified_get_sg_table,
+	.get_page_array = NULL,
+	.sync_cpu_to_dev = unified_sync_cpu_to_dev,
+	.sync_dev_to_cpu = unified_sync_dev_to_cpu,
+	.set_offset = NULL,
+	.destroy = unified_heap_destroy,
+};
+
+int img_mem_unified_init(const struct heap_config *heap_cfg, struct heap *heap)
+{
+	pr_debug("%s:%d\n", __func__, __LINE__);
+
+	heap->ops = &unified_heap_ops;
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 189 - 0
driver/img_mem/img_pdump.c

@@ -0,0 +1,189 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <stdarg.h>
+
+#include <img_mem_man.h>
+#include <vha_drv_common.h>
+
+/*
+ * create a pdump buffer
+ * Pdump buffers are currently identified by hard coded number:
+ * from PDUMP_TXT up to PDUMP_MAX.
+ * Buffer is allocated using vmalloc, because it might be several MBytes.
+ *
+ * If size==0, the buffer can be used for SAB, but not for LDB or TXT.
+ * (in other words, no memory will be allocated,
+ * but it will still have a 'length')
+ */
+struct pdump_buf *img_pdump_create(struct pdump_descr* pdump, uint32_t pdump_num, size_t size)
+{
+	struct pdump_buf *pbuf = &pdump->pbufs[pdump_num];
+
+	if (pdump_num >= PDUMP_MAX) {
+		pr_err("%s: invalid pdump number:%d\n", __func__, pdump_num);
+		return NULL;
+	}
+	if (pbuf->ptr != NULL) {
+		pr_err("%s: pdump %d already created\n", __func__, pdump_num);
+		return NULL;
+	}
+
+	pbuf->size = size;
+	pbuf->len = 0;
+	if (size == 0)
+		return pbuf;
+
+	pbuf->ptr  = vmalloc(size);
+	pr_debug("%s %d buffer %p size:%zu!\n", __func__,
+			pdump_num, pbuf->ptr, size);
+	if (pbuf->ptr == NULL) {
+		pr_err("%s: failed to create pdump %d\n", __func__, pdump_num);
+		return NULL;
+	}
+	return pbuf;
+}
+EXPORT_SYMBOL(img_pdump_create);
+
+/*
+ * append binary data to one of the pdump buffers
+ */
+int img_pdump_write(struct pdump_descr* pdump, uint32_t pdump_num, const void *ptr, size_t size)
+{
+	struct pdump_buf *pbuf = &pdump->pbufs[pdump_num];
+	int ret = 0;
+
+	if (pdump_num >= PDUMP_MAX || ptr == NULL || pbuf->ptr == NULL)
+		return -EINVAL;
+
+	mutex_lock(&pdump->lock);
+	if (pbuf->len + size > pbuf->size)
+		size = pbuf->size - pbuf->len;
+
+	if (!size) {
+		pr_err("%s: no space left in the pdump %d buffer!\n",
+				__func__, pdump_num);
+		ret = -ENOSPC;
+		goto unlock;
+	}
+	pr_debug("%s %d buffer len:%zu size:%zu!\n", __func__,
+			pdump_num, size, pbuf->len);
+	memcpy(pbuf->ptr + pbuf->len, ptr, size);
+	pbuf->len += size;
+	pr_debug("%s end!\n", __func__);
+
+unlock:
+	mutex_unlock(&pdump->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(img_pdump_write);
+
+/*
+ * append a string to the TXT pdump buffer.
+ * returns the number of bytes printed or error.
+ */
+__printf(2, 3)
+int __img_pdump_printf(struct device* dev, const char *fmt, ...)
+{
+	struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(dev);
+	struct pdump_buf *pbuf;
+	va_list ap;
+
+	BUG_ON(pdump==NULL);
+	pbuf = &pdump->pbufs[PDUMP_TXT];
+	if (pbuf->ptr == NULL)
+		return -EINVAL;
+
+	mutex_lock(&pdump->lock);
+	va_start(ap, fmt);
+	if (pbuf->len < pbuf->size) {
+#if defined(OSID)
+		/* Prepend OSID to pdump comments */
+		if (fmt[0] == '-' && fmt[1] == '-')
+			pbuf->len += sprintf(pbuf->ptr + pbuf->len,
+								 "-- (OS%d) ", OSID);
+#endif
+		pbuf->len += vsnprintf(pbuf->ptr + pbuf->len,
+							 pbuf->size - pbuf->len,
+							 fmt, ap);
+	}
+	/*
+	 * vsnprintf returns the number of bytes that WOULD have been printed
+	 */
+	pbuf->len = min(pbuf->size, pbuf->len);
+	va_end(ap);
+	mutex_unlock(&pdump->lock);
+
+	return pbuf->len;
+}
+EXPORT_SYMBOL(__img_pdump_printf);
+
+
+void img_pdump_destroy(struct pdump_descr* pdump)
+{
+	int i;
+
+	for (i = 0; i < PDUMP_MAX; i++) {
+		void *ptr = pdump->pbufs[i].ptr;
+
+		pdump->pbufs[i].ptr = NULL;
+		pr_debug("%s %d buffer %p!\n", __func__, i, ptr);
+		vfree(ptr);
+	}
+}
+EXPORT_SYMBOL(img_pdump_destroy);
+
+/*
+ * PDUMP generation is disabled until a PDUMP TXT buffer has been created
+ */
+bool img_pdump_enabled(struct pdump_descr* pdump)
+{
+	return pdump && pdump->pbufs[PDUMP_TXT].ptr != NULL;
+}
+EXPORT_SYMBOL(img_pdump_enabled);
+

+ 1449 - 0
driver/img_mem/imgmmu/imgmmu.c

@@ -0,0 +1,1449 @@
+/*!
+ *****************************************************************************
+ *
+ * @File         imgmmu.c
+ * @Description  Implementation of the MMU functions
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "mmulib/mmu.h"
+#include "mmulib/heap.h"	/* for struct imgmmu_halloc */
+
+/*-----------------------------------------------------------------------------
+ * Following elements are in the IMGMMU_lib_int module
+ *---------------------------------------------------------------------------*/
+
+/* access to MMU info and error printing function */
+#include "mmu_defs.h"
+
+#include <asm/page.h>
+
+static int pte_cache_mode;
+module_param(pte_cache_mode, int, 0444);
+MODULE_PARM_DESC(pte_cache_mode,
+    "PTE ax_cache signals. Acceptable values:<0-15>, refer to MMUv3 spec.");
+
+static bool pte_rb_check = true;
+module_param(pte_rb_check, bool, 0444);
+MODULE_PARM_DESC(pte_rb_check,
+    "Enables PTE read-back checks");
+
+/** variable page shift */
+static size_t g_mmupageshift = IMGMMU_PAGE_SHIFT;
+
+/* Page table index mask in virtual address - low bits */
+static uint64_t VIRT_PAGE_TBL_MASK(void) {
+	return ((((1ULL<<IMGMMU_CAT_SHIFT)-1) & ((1ULL<<IMGMMU_DIR_SHIFT)-1)) &
+			~(((1ULL<<g_mmupageshift)-1)));
+}
+
+/* Directory index mask in virtual address - middle bits */
+static const uint64_t VIRT_DIR_IDX_MASK
+	= (((1ULL<<IMGMMU_CAT_SHIFT)-1) & ~((1ULL<<IMGMMU_DIR_SHIFT)-1));
+
+/* Catalogue index mask in virtual address - high bits */
+static const uint64_t VIRT_CAT_IDX_MASK = (~((1ULL<<IMGMMU_CAT_SHIFT)-1));
+
+
+/*
+ * Catalogue entry in the MMU - contains up to 1024 directory mappings
+ */
+struct imgmmu_cat {
+	/* Physical page used for the catalogue entries */
+	struct imgmmu_page *page;
+	/* All the page directory structures in
+	 * a static array of pointers
+	 */
+	struct imgmmu_dir **dir_map;
+
+	/*
+	 * Functions to use to manage pages allocation,
+	 * liberation and writing
+	 */
+	struct imgmmu_info config;
+
+	/* number of mapping using this catalogue (PCEs) */
+	uint32_t nmap;
+};
+
+/* Directory entry in the MMU - contains several page mapping */
+struct imgmmu_dir {
+	/* associated catalogue */
+	struct imgmmu_cat *cat;
+	/* Physical page used for the directory entries */
+	struct imgmmu_page *page;
+	/* All the page table structures
+	 * in a static array of pointers */
+	struct imgmmu_pagetab **page_map;
+
+	/*
+	 * Functions to use to manage pages allocation,
+	 * liberation and writing
+	 */
+	struct imgmmu_info config;
+
+	/* number of mapping using this directory (PDEs)*/
+	uint32_t nmap;
+};
+
+/* Mapping a virtual address range and some entries in a directory */
+struct imgmmu_dirmap {
+	struct list_head entry; /* Entry in <imgmmu_map:dir_maps> */
+	/* associated directory */
+	struct imgmmu_dir *dir;
+	/*
+	 * device virtual address range associated with this mapping - not
+	 * owned by the mapping
+	 */
+	struct imgmmu_halloc virt_mem;
+
+	/* flag used when allocating */
+	unsigned int flags;
+	/* number of entries mapped (PTEs) */
+	uint32_t entries;
+};
+
+/* Mapping a virtual address and catalogue entries */
+struct imgmmu_map {
+	struct list_head dir_maps; /* contains <struct imgmmu_dirmap> */
+	/*
+	 * device virtual address associated with this mapping - not
+	 * owned by the mapping
+	 */
+	struct imgmmu_halloc virt_mem;
+
+	/* number of entries mapped (PCEs) */
+	uint32_t entries;
+};
+
+/* One page Table of the directory */
+struct imgmmu_pagetab {
+	/* associated directory */
+	struct imgmmu_dir *dir;
+	/* page used to store this mapping in the MMU */
+	struct imgmmu_page *page;
+
+	/* number of valid entries in this page */
+	uint32_t valid_entries;
+};
+
+/*
+ * local functions
+ */
+
+#define MMU_LOG_TMP 256
+
+/*
+ *  Write to stderr (or KRN_ERR if in kernel module)
+ */
+void _mmu_log(int err, const char *function, uint32_t line,
+	      const char *format, ...)
+{
+	char _message_[MMU_LOG_TMP];
+	va_list args;
+
+	va_start(args, format);
+
+	vsprintf(_message_, format, args);
+
+	va_end(args);
+
+	if (err)
+		pr_err("ERROR: %s:%u %s", function, line, _message_);
+	else
+		/* info, debug, ... */
+		pr_info("%s:%u %s", function, line, _message_);
+}
+
+/*
+ * Destruction of a PageTable
+ *
+ * warning: Does not verify if pages are still valid or not
+ */
+static void mmu_pagetab_destroy(struct imgmmu_pagetab *pagetab)
+{
+	WARN_ON(pagetab->dir == NULL);
+	/* the function should be configured */
+	WARN_ON(pagetab->dir->config.page_free == NULL);
+	/* the physical page should still be here */
+	WARN_ON(pagetab->page == NULL);
+
+	mmu_log_dbg("Destroy page table (phys addr 0x%x)\n",
+		     pagetab->page->phys_addr);
+	pagetab->dir->config.page_free(pagetab->page);
+	pagetab->page = NULL;
+
+	kfree(pagetab);
+}
+
+/*
+ * Extact the catalogue index from a virtual address
+ */
+static uint16_t mmu_cat_entry(uint64_t vaddr)
+{
+	return (vaddr & VIRT_CAT_IDX_MASK) >>
+			  IMGMMU_CAT_SHIFT;
+}
+
+/*
+ * Extact the directory index from a virtual address
+ */
+static uint16_t mmu_dir_entry(uint64_t vaddr)
+{
+	return (vaddr & VIRT_DIR_IDX_MASK) >>
+			  IMGMMU_DIR_SHIFT;
+}
+
+/*
+ * Extract the page table index from a virtual address
+ */
+static uint16_t mmu_page_entry(uint64_t vaddr)
+{
+	return (vaddr & VIRT_PAGE_TBL_MASK())
+			  >> g_mmupageshift;
+}
+
+/*
+ * Create a page table
+ *
+ * A pointer to the new page table structure and 0 in res
+ * return: NULL in case of error and a value in res
+ *  -ENOMEM if internal structure allocation failed
+ *  -EFAULT if physical page allocation failed
+ */
+static struct imgmmu_pagetab *mmu_pagetab_create(struct imgmmu_dir *dir,
+		int *res)
+{
+	struct imgmmu_pagetab *tab = NULL;
+	uint32_t i;
+
+	WARN_ON(res == NULL);
+	WARN_ON(dir == NULL);
+	WARN_ON(dir->config.page_alloc == NULL);
+	WARN_ON(dir->config.page_write == NULL);
+
+	tab = kzalloc(sizeof(struct imgmmu_pagetab), GFP_KERNEL);
+	if (tab == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for page table\n",
+			     sizeof(struct imgmmu_pagetab));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	tab->dir = dir;
+
+	tab->page = dir->config.page_alloc(dir->config.ctx, IMGMMU_PTYPE_PT);
+	if (tab->page == NULL) {
+		mmu_log_err("failed to allocate Page Table physical page\n");
+		kfree(tab);
+		*res = -EFAULT;
+		return NULL;
+	}
+	mmu_log_dbg("Create page table (phys addr 0x%x 0x%x)\n",
+		     tab->page->phys_addr, tab->page->cpu_addr);
+
+	/* invalidate all pages */
+	for (i = 0; i < IMGMMU_N_PAGE; i++)
+		dir->config.page_write(tab->page, i, 0, MMU_FLAG_INVALID, NULL);
+
+	/*
+	 * when non-UMA need to update the device
+	 * memory after setting it to 0
+	 */
+	if (dir->config.page_update != NULL)
+		dir->config.page_update(tab->page);
+
+	*res = 0;
+	return tab;
+}
+
+/* Sets mapped pages as invalid with given pagetab entry and range*/
+static void mmu_pagetab_rollback(struct imgmmu_dir *dir,
+		unsigned int page_offs, unsigned int dir_offs,
+		uint32_t entry, uint32_t from, uint32_t to)
+{
+	while (entry > 1) {
+		if (from == 0) {
+			entry--;
+			from = to;
+		}
+		from--;
+
+		if (page_offs == 0) {
+			/* -1 is done just after */
+			page_offs = IMGMMU_N_PAGE;
+			WARN_ON(dir_offs == 0);
+			dir_offs--;
+		}
+
+		page_offs--;
+
+		/* it should have been used before */
+		WARN_ON(dir->page_map[dir_offs] == NULL);
+		dir->config.page_write(
+			dir->page_map[dir_offs]->page,
+			page_offs, 0, MMU_FLAG_INVALID, NULL);
+		dir->page_map[dir_offs]->valid_entries--;
+	}
+}
+
+/*-----------------------------------------------------------------------------
+ * End of the IMGMMU_lib_int module
+ *---------------------------------------------------------------------------*/
+
+/*
+ * public functions already have a group in mmu.h
+ */
+
+static size_t g_mmupagesize = IMGMMU_PAGE_SIZE;
+
+size_t imgmmu_get_page_size(void)
+{
+	return g_mmupagesize;
+}
+
+size_t imgmmu_get_entry_shift(unsigned char type)
+{
+	if (type == IMGMMU_PTYPE_PT)
+		return g_mmupageshift;
+	else if (type == IMGMMU_PTYPE_PD)
+		return IMGMMU_DIR_SHIFT;
+	else if (type == IMGMMU_PTYPE_PC)
+		return IMGMMU_CAT_SHIFT;
+	else
+		return 0;
+}
+
+int imgmmu_set_page_size(size_t pagesize)
+{
+	if (pagesize > imgmmu_get_cpu_page_size()) {
+		mmu_log_dbg("MMU page size: %zu is bigger than CPU page size (%zu)\
+				and will only work with physically contiguous memory!\n",
+			     pagesize, imgmmu_get_cpu_page_size());
+	}
+	// get_order uses CPU page size as a base
+	g_mmupageshift = IMGMMU_PAGE_SHIFT + get_order(pagesize);
+
+	g_mmupagesize = pagesize;
+
+	return 0;
+}
+
+size_t imgmmu_get_phys_size(void)
+{
+	return IMGMMU_PHYS_SIZE;
+}
+
+size_t imgmmu_get_virt_size(void)
+{
+	return IMGMMU_VIRT_SIZE;
+}
+
+static size_t g_cpupagesize = PAGE_SIZE;
+
+size_t imgmmu_get_cpu_page_size(void)
+{
+	return g_cpupagesize;
+}
+
+int imgmmu_set_cpu_page_size(size_t pagesize)
+{
+	if (pagesize != PAGE_SIZE) {
+		mmu_log_err("trying to change CPU page size from %zu to %zu\n",
+			     PAGE_SIZE, pagesize);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/* Proper directory will be populated on the first mapping request */
+static struct imgmmu_dir *mmu_dir_create(const struct imgmmu_info *info,
+			int *res)
+{
+	struct imgmmu_dir *dir = NULL;
+	uint32_t i;
+
+	WARN_ON(res == NULL);
+
+	/* invalid information in the directory config:
+	   - invalid page allocator and dealloc (page write can be NULL)
+	   - invalid virtual address representation
+	   - invalid page size
+	   - invalid MMU size */
+	if (info == NULL || info->page_alloc == NULL ||
+	    info->page_free == NULL) {
+		mmu_log_err("invalid MMU configuration\n");
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	dir = kzalloc(sizeof(struct imgmmu_dir), GFP_KERNEL);
+	if (dir == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for directory\n",
+			     sizeof(struct imgmmu_dir));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	dir->page_map = kzalloc(
+			IMGMMU_N_TABLE * sizeof(struct imgmmu_pagetab *),
+			GFP_KERNEL);
+	if (dir->page_map == NULL) {
+		kfree(dir);
+		mmu_log_err("failed to allocate %zu bytes for directory\n",
+			     IMGMMU_N_TABLE * sizeof(struct imgmmu_pagetab *));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	memcpy(&dir->config, info, sizeof(struct imgmmu_info));
+	if (info->page_write == NULL ||
+			info->page_read == NULL) {
+		mmu_log_err("wrong configuration!\n");
+		kfree(dir->page_map);
+		kfree(dir);
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	dir->page = info->page_alloc(info->ctx, IMGMMU_PTYPE_PD);
+	if (dir->page == NULL) {
+		mmu_log_err("failed to allocate directory physical page\n");
+		kfree(dir->page_map);
+		kfree(dir);
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	mmu_log_dbg("create MMU directory (phys page 0x%x 0x%x)\n",
+		     dir->page->phys_addr, dir->page->cpu_addr);
+	/* now we have a valid imgmmu_dir structure */
+
+	/* invalidate all entries */
+	for (i = 0; i < IMGMMU_N_TABLE; i++)
+		dir->config.page_write(dir->page, i, 0, MMU_FLAG_INVALID, NULL);
+
+	/* when non-UMA need to update the device memory */
+	if (dir->config.page_update != NULL)
+		dir->config.page_update(dir->page);
+
+	*res = 0;
+	return dir;
+}
+
+struct imgmmu_cat *imgmmu_cat_create(const struct imgmmu_info *info,
+			int *res)
+{
+	struct imgmmu_cat *cat = NULL;
+	uint32_t i;
+
+	WARN_ON(res == NULL);
+
+	/* invalid information in the directory config:
+	   - invalid page allocator and dealloc (page write can be NULL)
+	 */
+	if (info == NULL || info->page_alloc == NULL ||
+	    info->page_free == NULL) {
+		mmu_log_err("invalid MMU configuration\n");
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	cat = kzalloc(sizeof(struct imgmmu_cat), GFP_KERNEL);
+	if (cat == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for catalogue\n",
+			     sizeof(struct imgmmu_cat));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	cat->dir_map = kzalloc(
+			IMGMMU_N_DIR * sizeof(struct imgmmu_dir *),
+			GFP_KERNEL);
+	if (cat->dir_map == NULL) {
+		kfree(cat);
+		mmu_log_err("failed to allocate %zu bytes for catalogue\n",
+			     IMGMMU_N_DIR * sizeof(struct imgmmu_dir *));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	memcpy(&cat->config, info, sizeof(struct imgmmu_info));
+	if (info->page_write == NULL ||
+			info->page_read == NULL) {
+		mmu_log_err("wrong configuration!\n");
+		kfree(cat->dir_map);
+		kfree(cat);
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	cat->page = info->page_alloc(info->ctx, IMGMMU_PTYPE_PC);
+	if (cat->page == NULL) {
+		mmu_log_err("failed to allocate catalogue physical page\n");
+		kfree(cat->dir_map);
+		kfree(cat);
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	mmu_log_dbg("create MMU catalogue (phys page 0x%x 0x%x)\n",
+		     cat->page->phys_addr, cat->page->cpu_addr);
+	/* now we have a valid imgmmu_cat structure */
+
+	/* invalidate all entries */
+	for (i = 0; i < IMGMMU_N_DIR; i++)
+		cat->config.page_write(cat->page, i, 0, MMU_FLAG_INVALID, NULL);
+
+	/* when non-UMA need to update the device memory */
+	if (cat->config.page_update != NULL)
+		cat->config.page_update(cat->page);
+
+	*res = 0;
+	return cat;
+}
+
+static int mmu_dir_destroy(struct imgmmu_dir *dir)
+{
+	uint32_t i;
+
+	if (dir == NULL) {
+		/* could be an assert */
+		mmu_log_err("dir is NULL\n");
+		return -EINVAL;
+	}
+
+	if (dir->nmap > 0)
+		/* mappings should have been destroyed! */
+		mmu_log_err("directory still has %u mapping attached to it\n",
+			     dir->nmap);
+
+	WARN_ON(dir->config.page_free == NULL);
+	WARN_ON(dir->page_map == NULL);
+
+	mmu_log_dbg("destroy MMU dir (phys page 0x%x)\n",
+		     dir->page->phys_addr);
+
+	/* first we destroy the directory entry */
+	dir->config.page_free(dir->page);
+	dir->page = NULL;
+
+	/* destroy every mapping that still exists */
+	for (i = 0; i < IMGMMU_N_TABLE; i++)
+		if (dir->page_map[i] != NULL) {
+			mmu_pagetab_destroy(dir->page_map[i]);
+			dir->page_map[i] = NULL;
+		}
+
+	kfree(dir->page_map);
+	kfree(dir);
+	return 0;
+}
+
+int imgmmu_cat_destroy(struct imgmmu_cat *cat)
+{
+	uint32_t i;
+
+	if (cat == NULL) {
+		/* could be an assert */
+		mmu_log_err("cat is NULL\n");
+		return -EINVAL;
+	}
+
+	if (cat->nmap > 0)
+		/* mappings should have been destroyed! */
+		mmu_log_err("catalogue still has %u mapping attached to it\n",
+			     cat->nmap);
+
+	WARN_ON(cat->config.page_free == NULL);
+	WARN_ON(cat->dir_map == NULL);
+
+	mmu_log_dbg("destroy MMU cat (phys page 0x%x)\n",
+		     cat->page->phys_addr);
+
+	/* first we destroy the catalogue entry */
+	cat->config.page_free(cat->page);
+	cat->page = NULL;
+
+	/* destroy every mapping that still exists */
+	for (i = 0; i < IMGMMU_N_DIR; i++)
+		if (cat->dir_map[i] != NULL) {
+			mmu_dir_destroy(cat->dir_map[i]);
+			cat->dir_map[i] = NULL;
+		}
+
+	kfree(cat->dir_map);
+	kfree(cat);
+	return 0;
+}
+
+struct imgmmu_page *imgmmu_cat_get_page(struct imgmmu_cat *cat)
+{
+	WARN_ON(cat == NULL);
+
+	return cat->page;
+}
+
+uint64_t imgmmu_cat_get_pte(struct imgmmu_cat *cat,
+					     uint64_t vaddr)
+{
+	uint16_t cat_entry = 0;
+	uint16_t dir_entry = 0;
+	uint16_t tab_entry = 0;
+	struct imgmmu_dir *dir;
+	struct imgmmu_pagetab *tab;
+	uint64_t addr;
+	unsigned flags;
+
+	if (vaddr & (imgmmu_get_page_size()-1))
+		return (uint64_t)-1;
+
+	WARN_ONCE(cat == NULL, "No MMU entries");
+	if (cat == NULL || cat->config.page_read == NULL)
+		return (uint64_t)-1;
+
+	cat_entry = mmu_cat_entry(vaddr);
+	dir_entry = mmu_dir_entry(vaddr);
+	tab_entry = mmu_page_entry(vaddr);
+
+	dir = cat->dir_map[cat_entry];
+	if (dir == NULL || dir->page_map[dir_entry] == NULL)
+		return (uint64_t)-1;
+
+	addr = cat->config.page_read(
+			cat->page, cat_entry, NULL, &flags);
+	/* Check consistency of PCE */
+	if (addr != dir->page->phys_addr) {
+		mmu_log_err("PCE entry inconsistent!\n");
+		return (uint64_t)-1;
+	}
+
+	tab = dir->page_map[dir_entry];
+	if (tab == NULL || dir->page == NULL)
+		return (uint64_t)-1;
+
+	addr = dir->config.page_read(
+			dir->page, dir_entry, NULL, &flags);
+	/* Check consistency of PDE */
+	if (addr != tab->page->phys_addr) {
+		mmu_log_err("PDE entry inconsistent!\n");
+		return (uint64_t)-1;
+	}
+
+	addr = dir->config.page_read(
+			tab->page, tab_entry, NULL, &flags);
+
+	return addr|flags;
+}
+
+uint64_t imgmmu_cat_override_phys_addr(struct imgmmu_cat *cat,
+				  uint64_t vaddr, uint64_t new_phys_addr)
+{
+	uint32_t cat_entry = 0;
+	uint32_t dir_entry = 0;
+	uint32_t tab_entry = 0;
+	struct imgmmu_dir *dir;
+	unsigned flags = 0;
+
+	WARN_ON(cat == NULL);
+	if (cat->config.page_read == NULL)
+		return (uint64_t)-1;
+
+	if (cat->config.page_write == NULL)
+		return (uint64_t)-1;
+
+	cat_entry = mmu_cat_entry(vaddr);
+	dir_entry = mmu_dir_entry(vaddr);
+	tab_entry = mmu_page_entry(vaddr);
+
+	dir = cat->dir_map[cat_entry];
+	WARN_ON(dir == NULL);
+	if (dir->page_map[dir_entry] == NULL)
+		return (uint64_t)-1;
+
+	(void)dir->config.page_read(
+		dir->page_map[dir_entry]->page, tab_entry, NULL, &flags);
+
+	if (!(flags & MMU_FLAG_VALID))
+		return (uint64_t)-1;
+
+	dir->config.page_write(
+			dir->page_map[dir_entry]->page,
+			tab_entry,
+			new_phys_addr | (uint64_t)pte_cache_mode << MMU_PTE_AXCACHE_SHIFT,
+			flags | IMGMMU_BYPASS_ADDR_TRANS, NULL);
+
+	return 0;
+}
+
+static struct imgmmu_dirmap *mmu_dir_map(struct imgmmu_dir *dir,
+				struct imgmmu_halloc *virt_mem,
+				unsigned int map_flag,
+				int(*phys_iter_next) (void *arg, uint64_t *next),
+				void *phys_iter_arg,
+				void *priv,
+				int *res)
+{
+	unsigned int first_dir = 0, first_page = 0;
+	unsigned int dir_offs = 0, page_offs = 0;
+	uint32_t entries = 0;
+	uint32_t i, d;
+	const uint32_t duplicate = imgmmu_get_cpu_page_size() < imgmmu_get_page_size() ?
+		1 : imgmmu_get_cpu_page_size() / imgmmu_get_page_size();
+	int ret = 0;
+	struct imgmmu_dirmap *map = NULL;
+
+	/* in non UMA updates on pages needs to be done,
+	 * store index of directory entry pages to update */
+	uint32_t *pages_to_update;
+	/* number of pages in pages_to_update
+	 * (will be at least 1 for the first_page to update) */
+	uint32_t num_pages_to_update = 0;
+	/* to know if we also need to update the directory page
+	 * (creation of new page) */
+	bool dir_update = false;
+
+	WARN_ON(res == NULL);
+	WARN_ON(dir == NULL);
+	WARN_ON(virt_mem == NULL);
+	/* otherwise PAGE_SIZE and MMU page size are not set properly! */
+	WARN_ON(duplicate == 0);
+
+	entries = virt_mem->size / IMGMMU_GET_MAX_PAGE_SIZE();
+	if (virt_mem->size % imgmmu_get_page_size() != 0 || entries == 0) {
+		mmu_log_err("invalid allocation size\n");
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	if ((map_flag & MMU_FLAG_VALID) != 0) {
+		mmu_log_err("valid flag (0x%x) is set in the flags 0x%x\n",
+			     MMU_FLAG_VALID, map_flag);
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	/* has to be dynamically allocated because it is bigger than 1k
+	 * (max stack in the kernel)
+	 * IMGMMU_N_TABLE is 1024 for 4096B pages,
+	 * that's a 4k allocation (1 page) */
+	pages_to_update = kzalloc(IMGMMU_N_TABLE * sizeof(uint32_t), GFP_KERNEL);
+	if (pages_to_update == NULL) {
+		mmu_log_err("Failed to allocate the update index table (%zu Bytes)\n",
+			     IMGMMU_N_TABLE * sizeof(uint32_t));
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	/* manage multiple page table mapping */
+
+	first_dir = mmu_dir_entry(virt_mem->vaddr);
+	first_page = mmu_page_entry(virt_mem->vaddr);
+
+	WARN_ON(first_dir > IMGMMU_N_TABLE);
+	WARN_ON(first_page > IMGMMU_N_PAGE);
+
+	/* verify that the pages that should be used are available */
+	dir_offs = first_dir;
+	page_offs = first_page;
+
+	/*
+	 * loop over the number of entries given by CPU allocator
+	 * but CPU page size can be > than MMU page size therefore
+	 * it may need to "duplicate" entries  by creating a fake
+	 * physical address
+	 */
+	for (i = 0; i < entries * duplicate; i++) {
+		if (page_offs >= IMGMMU_N_PAGE) {
+			WARN_ON(dir_offs > IMGMMU_N_TABLE);
+			dir_offs++;	/* move to next directory */
+			WARN_ON(dir_offs > IMGMMU_N_TABLE);
+			page_offs = 0;	/* using its first page */
+		}
+
+		/* if dir->page_map[dir_offs] == NULL not yet allocated it
+		   means all entries are available */
+		if (pte_rb_check &&
+				dir->page_map[dir_offs] != NULL) {
+			/*
+			 * inside a pagetable
+			 * verify that the required offset is invalid
+			 */
+			unsigned flags = 0;
+			(void)dir->config.page_read(
+					dir->page_map[dir_offs]->page, page_offs, priv, &flags);
+
+			if (flags & MMU_FLAG_VALID) {
+				mmu_log_err("PTE is currently in use\n");
+				ret = -EBUSY;
+				break;
+			}
+		}
+		/* PageTable struct exists */
+		page_offs++;
+	} /* for all needed entries */
+
+	/* it means one entry was not invalid or not enough page were given */
+	if (ret != 0) {
+		/* message already printed */
+		*res = ret;
+		kfree(pages_to_update);
+		return NULL;
+	}
+
+	map = kzalloc(sizeof(struct imgmmu_dirmap), GFP_KERNEL);
+	if (map == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
+				sizeof(struct imgmmu_dirmap));
+		*res = -ENOMEM;
+		kfree(pages_to_update);
+		return NULL;
+	}
+	map->dir = dir;
+	map->virt_mem = *virt_mem;
+	memcpy(&(map->virt_mem), virt_mem, sizeof(struct imgmmu_halloc));
+	map->flags = map_flag;
+
+	/* we now know that all pages are available */
+	dir_offs = first_dir;
+	page_offs = first_page;
+
+	pages_to_update[num_pages_to_update] = first_dir;
+	num_pages_to_update++;
+
+	for (i = 0; i < entries; i++) {
+		uint64_t curr_phy_addr;
+
+		if (phys_iter_next(phys_iter_arg, &curr_phy_addr) != 0) {
+			mmu_log_err("not enough entries in physical address array/sg list!\n");
+			kfree(map);
+			kfree(pages_to_update);
+			*res = -EFAULT;
+			return NULL;
+		}
+		if ((curr_phy_addr & (imgmmu_get_page_size()-1)) != 0) {
+			mmu_log_err("current physical address: %llx "
+					"is not aligned to MMU page size: %zu!\n",
+					curr_phy_addr, imgmmu_get_page_size());
+			kfree(map);
+			kfree(pages_to_update);
+			*res = -EFAULT;
+			return NULL;
+		}
+		for (d = 0; d < duplicate; d++) {
+			if (page_offs >= IMGMMU_N_PAGE) {
+				dir_offs++;	/* move to next directory */
+				page_offs = 0;	/* using its first page */
+
+				pages_to_update[num_pages_to_update] = dir_offs;
+				num_pages_to_update++;
+			}
+
+			/* this page table object does not exists, create it */
+			if (dir->page_map[dir_offs] == NULL) {
+				struct imgmmu_pagetab *pagetab;
+
+				pagetab = mmu_pagetab_create(dir, res);
+				dir->page_map[dir_offs] = pagetab;
+
+				if (dir->page_map[dir_offs] == NULL) {
+					mmu_log_err("failed to create a page table\n");
+
+					/* invalidate all already mapped pages
+					 * do not destroy the created pages */
+					mmu_pagetab_rollback(dir,
+							page_offs,
+							dir_offs,
+							i,
+							d,
+							duplicate);
+
+					kfree(map);
+					kfree(pages_to_update);
+					*res = -EFAULT;
+					return NULL;
+				}
+				pagetab->page->virt_base = (dir->page->virt_base  &
+						~(VIRT_PAGE_TBL_MASK())) +
+						((1<<IMGMMU_DIR_SHIFT) * dir_offs);
+
+				/*
+				 * make this page table valid
+				 * should be dir_offs
+				 */
+				dir->config.page_write(
+					dir->page,
+					dir_offs,
+					pagetab->page->phys_addr,
+					MMU_FLAG_VALID, NULL);
+				dir_update = true;
+			}
+
+			if (pte_rb_check) {
+				unsigned flags = 0;
+				(void)dir->config.page_read(
+						dir->page_map[dir_offs]->page, page_offs, priv, &flags);
+
+				if (flags & MMU_FLAG_VALID) {
+					mmu_log_err("PTE is currently in use (2)\n");
+					kfree(map);
+					kfree(pages_to_update);
+					*res = -EFAULT;
+					return NULL;
+				}
+			}
+			/*
+			 * map this particular page in the page table
+			 * use d*(MMU page size) to add additional entries
+			 * from the given  physical address with the correct
+			 * offset for the MMU
+			 */
+			dir->config.page_write(
+				dir->page_map[dir_offs]->page,
+				page_offs,
+				(curr_phy_addr + d * imgmmu_get_page_size()) |
+					(uint64_t)pte_cache_mode << MMU_PTE_AXCACHE_SHIFT,
+				map->flags | MMU_FLAG_VALID, priv);
+			dir->page_map[dir_offs]->valid_entries++;
+
+			if (pte_rb_check) {
+				unsigned flags = 0;
+
+				uint64_t phys = dir->config.page_read(
+						dir->page_map[dir_offs]->page, page_offs, priv, &flags);
+
+				if (flags != (map->flags | MMU_FLAG_VALID) ||
+						(phys != (curr_phy_addr + d * imgmmu_get_page_size())) ) {
+					mmu_log_err("PTE read back failed\n");
+					kfree(map);
+					kfree(pages_to_update);
+					*res = -EFAULT;
+					return NULL;
+				}
+			}
+
+			page_offs++;
+		} /* for duplicate */
+	} /* for entries */
+
+	map->entries = entries * duplicate;
+	/* one more mapping is related to this directory */
+	dir->nmap++;
+
+	/* if non UMA we need to update device memory */
+	if (dir->config.page_update != NULL) {
+		while (num_pages_to_update > 0) {
+			uint32_t idx = pages_to_update[num_pages_to_update - 1];
+			dir->config.page_update(
+				dir->page_map[idx]->page);
+			num_pages_to_update--;
+		}
+		if (dir_update == true)
+			dir->config.page_update(
+				dir->page);
+	}
+
+	*res = 0;
+	kfree(pages_to_update);
+	return map;
+}
+
+/*
+ * with physical address array
+ */
+
+struct linear_phys_iter {
+	uint64_t *array;
+	int idx;
+};
+
+static int linear_phys_iter_next(void *arg, uint64_t *next)
+{
+	struct linear_phys_iter *iter = arg;
+
+	int advance = imgmmu_get_cpu_page_size() < imgmmu_get_page_size() ?
+		imgmmu_get_page_size() / imgmmu_get_cpu_page_size() : 1;
+
+	*next = iter->array[iter->idx];	/* boundary check? */
+	iter->idx += advance;
+	return 0;
+}
+
+struct imgmmu_map *imgmmu_cat_map_arr(struct imgmmu_cat *cat,
+					uint64_t *phys_page_list,
+					const struct imgmmu_halloc *virt_mem,
+					unsigned int map_flag,
+					void *priv,
+					int *res)
+{
+	uint16_t idx;
+	struct linear_phys_iter arg = { phys_page_list, 0 };
+	struct imgmmu_map *map = NULL;
+	struct imgmmu_dirmap *dir_map = NULL;
+	struct imgmmu_halloc virt_mem_range;
+
+	if (virt_mem->vaddr >> IMGMMU_VIRT_SIZE) {
+		mmu_log_err("Virtual address beyond %u bits!\n",
+				IMGMMU_VIRT_SIZE);
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	if (virt_mem->vaddr & (imgmmu_get_page_size()-1)) {
+		mmu_log_err("Virtual address not aligned to %zu!\n",
+				imgmmu_get_page_size());
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	map = kzalloc(sizeof(struct imgmmu_map), GFP_KERNEL);
+	if (map == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
+				sizeof(struct imgmmu_map));
+		*res = -ENOMEM;
+		return NULL;
+	}
+	INIT_LIST_HEAD(&map->dir_maps);
+	/* Store the whole virtual address space for this mapping */
+	map->virt_mem = *virt_mem;
+	/* Set starting address & total size */
+	virt_mem_range.vaddr = virt_mem->vaddr;
+	virt_mem_range.size = virt_mem->size;
+
+	do {
+		struct imgmmu_dir *dir;
+
+		/* Determine catalogue entry (PCE-> PD) */
+		idx = mmu_cat_entry(virt_mem_range.vaddr);
+		dir = cat->dir_map[idx];
+		if (dir == NULL) {
+			dir = mmu_dir_create(
+					&cat->config, res);
+			if (*res != 0)
+				goto error;
+
+			dir->page->virt_base = virt_mem_range.vaddr &
+						~(VIRT_DIR_IDX_MASK | VIRT_PAGE_TBL_MASK());
+
+			dir->cat = cat;
+			WARN_ON(cat->dir_map[idx] != NULL);
+			cat->dir_map[idx] = dir;
+			/* Mark PCE valid and store PD address */
+			cat->config.page_write(
+				cat->page,
+				idx, dir->page->phys_addr,
+				MMU_FLAG_VALID, NULL);
+			if (cat->config.page_update != NULL)
+				cat->config.page_update(cat->page);
+			cat->nmap++;
+		}
+
+		/* Need to handle buffer spanning across the GB boundaries */
+		if (((virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT)) +
+		  virt_mem_range.size) >= (1ULL<<IMGMMU_CAT_SHIFT))
+			virt_mem_range.size = (1ULL<<IMGMMU_CAT_SHIFT) -
+				(virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT));
+
+		dir_map = mmu_dir_map(dir, &virt_mem_range, map_flag,
+			   linear_phys_iter_next, &arg, priv, res);
+
+		if (dir_map) {
+			/* Update starting address */
+			virt_mem_range.vaddr += virt_mem_range.size;
+			/* and bytes left ... */
+			virt_mem_range.size =  (virt_mem->vaddr + virt_mem->size) -
+					virt_mem_range.vaddr;
+
+			list_add(&dir_map->entry, &map->dir_maps);
+		}
+
+	} while(dir_map && *res == 0 && virt_mem_range.size);
+
+	if (dir_map)
+		/* If last dir mapping succeeded,
+		 * return overlay container mapping structure */
+		return map;
+	else
+error:
+		imgmmu_cat_unmap(map);
+		return NULL;
+}
+
+/*
+ * with sg
+ */
+
+struct sg_phys_iter {
+	struct scatterlist *sgl;
+	unsigned int offset;
+	bool use_sg_dma;
+};
+
+static int sg_phys_iter_next(void *arg, uint64_t *next)
+{
+	struct sg_phys_iter *iter = arg;
+	phys_addr_t phys;
+	unsigned int len;
+
+	if (!iter->sgl)
+		return -EFAULT;
+
+	if (iter->use_sg_dma) {
+		if (sg_dma_address(iter->sgl) == ~(dma_addr_t)0 ||
+				!sg_dma_len(iter->sgl))
+			return -EFAULT;
+
+		phys = sg_dma_address(iter->sgl);
+		len = sg_dma_len(iter->sgl);
+	} else {
+		phys = sg_phys(iter->sgl);
+		len = iter->sgl->length;
+	}
+
+	*next = phys + iter->offset;
+	iter->offset += IMGMMU_GET_MAX_PAGE_SIZE();
+
+	if (iter->offset >= len) {
+		int advance = iter->offset/len;
+		while (iter->sgl) {
+			iter->sgl = sg_next(iter->sgl);
+			advance--;
+			if (!advance)
+				break;
+		}
+		iter->offset = 0;
+	}
+
+	return 0;
+}
+
+struct imgmmu_map *imgmmu_cat_map_sg(
+	struct imgmmu_cat *cat,
+	struct scatterlist *phys_page_sg,
+	bool use_sg_dma,
+	const struct imgmmu_halloc *virt_mem,
+	unsigned int map_flag,
+	void *priv,
+	int *res)
+{
+	uint16_t idx;
+	struct sg_phys_iter arg = { phys_page_sg, 0, use_sg_dma};
+	struct imgmmu_map *map = NULL;
+	struct imgmmu_dirmap *dir_map = NULL;
+	struct imgmmu_halloc virt_mem_range;
+
+	if (virt_mem->vaddr >> IMGMMU_VIRT_SIZE) {
+		mmu_log_err("Virtual address beyond %u bits!\n",
+				IMGMMU_VIRT_SIZE);
+		*res = -EFAULT;
+		return NULL;
+	}
+	if (virt_mem->vaddr & (imgmmu_get_page_size()-1)) {
+		mmu_log_err("Virtual address not aligned to %zu!\n",
+				imgmmu_get_page_size());
+		*res = -EFAULT;
+		return NULL;
+	}
+
+	map = kzalloc(sizeof(struct imgmmu_map), GFP_KERNEL);
+	if (map == NULL) {
+		mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
+				sizeof(struct imgmmu_map));
+		*res = -ENOMEM;
+		return NULL;
+	}
+	INIT_LIST_HEAD(&map->dir_maps);
+	/* Store the whole virtual address space for this mapping */
+	map->virt_mem = *virt_mem;
+	/* Set starting address & total size */
+	virt_mem_range.vaddr = virt_mem->vaddr;
+	virt_mem_range.size = virt_mem->size;
+
+	do {
+		struct imgmmu_dir *dir;
+
+		/* Determine catalogue entry (PCE-> PD) */
+		idx = mmu_cat_entry(virt_mem_range.vaddr);
+		dir = cat->dir_map[idx];
+		if (dir == NULL) {
+			dir = mmu_dir_create(
+					&cat->config, res);
+			if (*res != 0)
+				goto error;
+
+			dir->page->virt_base = virt_mem_range.vaddr &
+						~(VIRT_DIR_IDX_MASK | VIRT_PAGE_TBL_MASK());
+
+			dir->cat = cat;
+			WARN_ON(cat->dir_map[idx] != NULL);
+			cat->dir_map[idx] = dir;
+			/* Mark PCE valid and store PD address */
+			cat->config.page_write(
+				cat->page,
+				idx, dir->page->phys_addr,
+				MMU_FLAG_VALID, NULL);
+			if (cat->config.page_update != NULL)
+				cat->config.page_update(cat->page);
+			cat->nmap++;
+		}
+
+		/* Need to handle buffer spanning across the GB boundaries */
+		if (((virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT)) +
+		  virt_mem_range.size) >= (1ULL<<IMGMMU_CAT_SHIFT))
+			virt_mem_range.size = (1ULL<<IMGMMU_CAT_SHIFT) -
+				(virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT));
+
+		dir_map = mmu_dir_map(dir, &virt_mem_range, map_flag,
+				   sg_phys_iter_next, &arg, priv, res);
+
+		if (dir_map) {
+			/* Update starting address */
+			virt_mem_range.vaddr += virt_mem_range.size;
+			/* and bytes left ... */
+			virt_mem_range.size = (virt_mem->vaddr + virt_mem->size) -
+				virt_mem_range.vaddr;
+
+			list_add(&dir_map->entry, &map->dir_maps);
+		}
+
+	} while(dir_map && *res == 0 && virt_mem_range.size);
+
+	if (dir_map)
+		/* If last dir mapping succeeded,
+		 * return overlay container mapping structure */
+		return map;
+	else
+error:
+		imgmmu_cat_unmap(map);
+		return NULL;
+}
+
+static int mmu_dir_unmap(struct imgmmu_dirmap *map)
+{
+	unsigned int first_dir = 0, first_page = 0;
+	unsigned int dir_offs = 0, page_offs = 0;
+	uint32_t i;
+	struct imgmmu_dir *dir = NULL;
+
+	/* in non UMA updates on pages needs to be done
+	 * store index of directory entry pages to update */
+	uint32_t *pages_to_update;
+	uint32_t num_pages_to_update = 0;
+
+	WARN_ON(map == NULL);
+	WARN_ON(map->entries == 0);
+	WARN_ON(map->dir == NULL);
+
+	dir = map->dir;
+
+	/* has to be dynamically allocated because
+	 * it is bigger than 1k (max stack in the kernel) */
+	pages_to_update = kzalloc(IMGMMU_N_TABLE * sizeof(uint32_t), GFP_KERNEL);
+	if (pages_to_update == NULL) {
+		mmu_log_err("Failed to allocate the update index table (%zu Bytes)\n",
+			     IMGMMU_N_TABLE * sizeof(uint32_t));
+		kfree(map);
+		return -ENOMEM;
+	}
+
+	first_dir = mmu_dir_entry(map->virt_mem.vaddr);
+	first_page = mmu_page_entry(map->virt_mem.vaddr);
+
+	/* verify that the pages that should be used are available */
+	dir_offs = first_dir;
+	page_offs = first_page;
+
+	pages_to_update[num_pages_to_update] = first_dir;
+	num_pages_to_update++;
+
+	for (i = 0; i < map->entries; i++) {
+		if (page_offs >= IMGMMU_N_PAGE) {
+			dir_offs++;	/* move to next directory */
+			page_offs = 0;	/* using its first page */
+
+			pages_to_update[num_pages_to_update] = dir_offs;
+			num_pages_to_update++;
+		}
+
+		/* this page table object does not exists something destroyed it
+		 * while the mapping was supposed to use it */
+		WARN_ON(dir->page_map[dir_offs] == NULL);
+
+		dir->config.page_write(
+			dir->page_map[dir_offs]->page,
+			page_offs, 0,
+			MMU_FLAG_INVALID, NULL);
+		dir->page_map[dir_offs]->valid_entries--;
+
+		page_offs++;
+	}
+
+	dir->nmap--;
+
+	if (dir->config.page_update != NULL)
+		while (num_pages_to_update > 0) {
+			uint32_t idx = pages_to_update[num_pages_to_update - 1];
+			dir->config.page_update(
+				dir->page_map[idx]->page);
+			num_pages_to_update--;
+		}
+
+	/* mapping does not own the given virtual address */
+	kfree(map);
+	kfree(pages_to_update);
+	return 0;
+}
+
+int imgmmu_cat_unmap(struct imgmmu_map *map)
+{
+	WARN_ON(map == NULL);
+
+	while (!list_empty(&map->dir_maps)) {
+		struct imgmmu_dirmap *dir_map;
+		struct imgmmu_cat *cat;
+		struct imgmmu_dir *dir;
+		uint16_t idx;
+
+		dir_map = list_first_entry(&map->dir_maps,
+				       struct imgmmu_dirmap, entry);
+		list_del(&dir_map->entry);
+
+		idx = mmu_cat_entry(dir_map->virt_mem.vaddr);
+		dir = dir_map->dir;
+		cat = dir->cat;
+		WARN_ON(cat == NULL);
+		/* This destroys the mapping */
+		mmu_dir_unmap(dir_map);
+
+		/* Check integrity */
+		WARN_ON(dir != cat->dir_map[idx]);
+
+		if (!dir->nmap) {
+			mmu_dir_destroy(dir);
+			WARN_ON(cat->dir_map[idx] == NULL);
+			cat->dir_map[idx] = NULL;
+			/* Mark PCE invalid */
+			cat->config.page_write(
+				cat->page,
+				idx, 0,
+				MMU_FLAG_INVALID, NULL);
+			if (cat->config.page_update != NULL)
+				cat->config.page_update(cat->page);
+
+			cat->nmap--;
+		}
+	}
+
+	kfree(map);
+	return 0;
+}
+
+static uint32_t mmu_dir_clean(struct imgmmu_dir *dir)
+{
+	uint32_t i, removed = 0;
+
+	WARN_ON(dir == NULL);
+	WARN_ON(dir->config.page_write == NULL);
+
+	for (i = 0; i < IMGMMU_N_TABLE; i++) {
+		if (dir->page_map[i] != NULL &&
+		    dir->page_map[i]->valid_entries == 0) {
+			dir->config.page_write(
+				dir->page,
+				i, 0,
+				MMU_FLAG_INVALID, NULL);
+
+			mmu_pagetab_destroy(dir->page_map[i]);
+			dir->page_map[i] = NULL;
+			removed++;
+		}
+	}
+
+	if (dir->config.page_update != NULL)
+		dir->config.page_update(dir->page);
+
+	return removed;
+}
+
+/* Not used */
+uint32_t imgmmu_cat_clean(struct imgmmu_cat *cat)
+{
+	uint32_t i, removed = 0;
+
+	WARN_ON(cat == NULL);
+	WARN_ON(cat->config.page_write == NULL);
+
+	for (i = 0; i < IMGMMU_N_DIR; i++) {
+		if (cat->dir_map[i] != NULL) {
+			mmu_dir_clean(cat->dir_map[i]);
+			cat->dir_map[i] = NULL;
+			removed++;
+		}
+	}
+
+	if (cat->config.page_update != NULL)
+		cat->config.page_update(cat->page);
+
+	return removed;
+}
+
+uint64_t imgmmu_get_pte_cache_bits(uint64_t pte_entry)
+{
+	return pte_entry & MMU_PTE_AXCACHE_MASK;
+}
+
+u8 imgmmu_get_pte_parity_shift(void)
+{
+	return MMU_PTE_PARITY_SHIFT;
+}
+
+void imgmmu_set_pte_parity(uint64_t *pte_entry)
+{
+	*pte_entry &= ~(1ULL << imgmmu_get_pte_parity_shift());
+
+	*pte_entry |= (1ULL << imgmmu_get_pte_parity_shift());
+}

+ 307 - 0
driver/img_mem/imgmmu/kernel_heap.c

@@ -0,0 +1,307 @@
+/*!
+ *****************************************************************************
+ *
+ * @File         kernel_heap.c
+ * @Description  MMU Library: device virtual allocation (heap) implementation
+ *               using gen_alloc from the Linux kernel
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+
+#include "mmulib/heap.h"
+/* access to MMU info and error printing function */
+#include "mmu_defs.h"
+
+/*#define DEBUG_POOL 1 */
+
+/*
+ * Internal heap object using genalloc
+ */
+struct gen_heap {
+	struct gen_pool *pool;
+	size_t nalloc;
+	struct imgmmu_heap hinfo;
+};
+
+/*
+ * The Heap allocation - contains an imgmmu_halloc
+ * that is given to the caller
+ */
+struct gen_halloc {
+	/*
+	 * Associated heap
+	 */
+	struct gen_heap *heap;
+	/*
+	 * MMU lib allocation part
+	 */
+	struct imgmmu_halloc virt_mem;
+};
+
+/*
+ *  be used for debugging
+ */
+static void pool_crawler(struct gen_pool *pool,
+		struct gen_pool_chunk *chunk, void *data) __maybe_unused;
+
+static void pool_crawler(struct gen_pool *pool,
+		struct gen_pool_chunk *chunk, void *data)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	unsigned long size = (chunk->end_addr - chunk->start_addr);
+#else
+	unsigned long size = (chunk->end_addr - chunk->start_addr + 1);
+#endif
+	pr_info("pool 0x%p has chunk 0x%lx to 0x%lx (size = %lu B)\n",
+			data, chunk->start_addr, chunk->end_addr, size);
+}
+
+struct imgmmu_heap *imgmmu_hcreate(uintptr_t vaddr_start,
+		size_t atom, size_t size, bool guard_band, int *res)
+{
+	struct gen_heap *iheap = NULL;
+	int min_order = 0; /* log2 of the alloc atom */
+	size_t isize = atom;
+	int ret;
+	uintptr_t start = vaddr_start;
+
+	WARN_ON(res == NULL);
+	WARN_ON(size == 0);
+
+	if (size%atom != 0 || (vaddr_start != 0 && vaddr_start%atom != 0)) {
+		mmu_log_err("Wrong input params: %zu %zu %zu %zu %zu %zu\n",
+			size, atom, size%atom,
+			vaddr_start, atom, vaddr_start%atom);
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	iheap = kzalloc(sizeof(struct gen_heap), GFP_KERNEL);
+	if (iheap == NULL) {
+		*res = -ENOMEM;
+		return NULL;
+	}
+
+	iheap->nalloc = 0;
+
+	/* compute log2 of the alloc atom */
+	while (isize >>= 1)
+		min_order++;
+
+	/* ugly fix for trouble using gen_pool_alloc() when allocating a block
+	 * gen_pool_alloc() returns 0 on error alought 0 can be a valid
+	 * first virtual address
+	 * therefore all addresses are offseted by the allocation atom
+	 * to insure 0 is the actual error code
+	 */
+	if (vaddr_start == 0)
+		start = vaddr_start+atom; /* otherwise it is vaddr_start */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+	isize = start + size;
+#else
+	isize = start + size - 1;
+#endif
+	WARN_ON(isize < start); /* too big! it did an overflow */
+
+	mmu_log_dbg("create genalloc pool of order %u\n", min_order);
+	/* -1: not using real inode */
+	iheap->pool = gen_pool_create(min_order, -1);
+
+	if (iheap->pool == NULL) {
+		*res = -ENOMEM;
+		mmu_log_err("Failure to create the genalloc pool\n");
+		kfree(iheap);
+		return NULL;
+	}
+
+	mmu_log_dbg("pool 0x%p order %u region from 0x%x for %zu bytes\n",
+			iheap->pool, min_order, start, size);
+
+	ret = gen_pool_add(iheap->pool, start, size, -1);
+	if (ret != 0) {
+		*res = -EFAULT;
+		mmu_log_err("Failure to configure the new pool: %d\n", ret);
+		gen_pool_destroy(iheap->pool);
+		kfree(iheap);
+		return NULL;
+	}
+
+#ifdef DEBUG_POOL
+	gen_pool_for_each_chunk(iheap->pool, &pool_crawler, iheap->pool);
+#endif
+
+	iheap->hinfo.vaddr_start = vaddr_start;
+	iheap->hinfo.atom = atom;
+	iheap->hinfo.size = size;
+	iheap->hinfo.guard_band = guard_band;
+
+	*res = 0;
+	return &(iheap->hinfo);
+}
+
+struct imgmmu_halloc *imgmmu_hallocate(struct imgmmu_heap *heap,
+		size_t size, int *res)
+{
+	struct gen_heap *iheap = NULL;
+	struct gen_halloc *ialloc = NULL;
+
+	WARN_ON(res == NULL);
+	WARN_ON(heap == NULL);
+	iheap = container_of(heap, struct gen_heap, hinfo);
+
+	if (size%heap->atom != 0 || size == 0) {
+		mmu_log_err("invalid alloc size (0x%zx) for atom:%zu\n",
+			size, heap->atom);
+		*res = -EINVAL;
+		return NULL;
+	}
+
+	ialloc = kzalloc(sizeof(struct gen_halloc), GFP_KERNEL);
+	if (ialloc == NULL) {
+		mmu_log_err("failed to allocate internal structure\n");
+		*res = -ENOMEM;
+		return NULL;
+	}
+	mmu_log_dbg("heap 0x%p alloc %u\n", iheap->pool, size);
+
+
+	/* gen_pool_alloc returns 0 on error
+	 * that is a problem when 1st valid address is 0
+	 * check imgmmu_hcreate for explanations
+	 */
+	ialloc->virt_mem.vaddr = gen_pool_alloc(iheap->pool,
+	/* Take one more atom to create a fake gap between
+	 * virtual addresses, when needed */
+				iheap->hinfo.guard_band ?
+					size + iheap->hinfo.atom :
+					size);
+
+	if (ialloc->virt_mem.vaddr == 0) {
+		mmu_log_err("failed to allocate from gen_pool_alloc\n");
+		*res = -EFAULT;
+		kfree(ialloc);
+		return NULL;
+	}
+
+	mmu_log_dbg(KERN_INFO "heap 0x%p alloc 0x%p %u B atom %u B\n",
+		iheap->pool, ialloc->virt_mem.vaddr, size, iheap->hinfo.atom);
+
+	/* if base address is 0 we applied an offset */
+	if (iheap->hinfo.vaddr_start == 0)
+		ialloc->virt_mem.vaddr -= iheap->hinfo.atom;
+
+	ialloc->virt_mem.size = size;
+	ialloc->heap = iheap;
+
+	iheap->nalloc++;
+
+#ifdef DEBUG_POOL
+	gen_pool_for_each_chunk(iheap->pool, &pool_crawler, iheap->pool);
+#endif
+
+	*res = 0;
+	return &(ialloc->virt_mem);
+}
+
+int imgmmu_hfree(struct imgmmu_halloc *alloc)
+{
+	struct gen_halloc *ialloc = NULL;
+	uintptr_t addr = 0;
+	size_t size;
+
+	WARN_ON(alloc == NULL);
+	ialloc = container_of(alloc, struct gen_halloc, virt_mem);
+
+	WARN_ON(ialloc->heap == NULL);
+	WARN_ON(ialloc->heap->pool == NULL);
+	WARN_ON(ialloc->heap->nalloc == 0);
+
+	mmu_log_dbg("heap 0x%p free 0x%p %u B\n",
+			ialloc->heap->pool, alloc->vaddr, alloc->size);
+
+#ifdef DEBUG_POOL
+	gen_pool_for_each_chunk(ialloc->heap->pool,
+			&pool_crawler, ialloc->heap->pool);
+#endif
+
+	addr = alloc->vaddr;
+	/* Include a fake gap */
+	size = ialloc->heap->hinfo.guard_band ?
+				alloc->size + ialloc->heap->hinfo.atom :
+				alloc->size;
+	/* see the explanation in imgmmu_hcreate to know why + atom */
+	if (ialloc->heap->hinfo.vaddr_start == 0)
+		addr += ialloc->heap->hinfo.atom;
+
+	gen_pool_free(ialloc->heap->pool, addr, size);
+
+	ialloc->heap->nalloc--;
+
+	kfree(ialloc);
+	return 0;
+}
+
+int imgmmu_hdestroy(struct imgmmu_heap *heap)
+{
+	struct gen_heap *iheap = NULL;
+
+	WARN_ON(heap == NULL);
+	iheap = container_of(heap, struct gen_heap, hinfo);
+
+	if (iheap->nalloc > 0) {
+		mmu_log_err("destroying a heap with non-freed allocation\n");
+		return -EFAULT;
+	}
+
+	if (iheap->pool != NULL) {
+		mmu_log_dbg("destroying genalloc pool 0x%p\n", iheap->pool);
+		gen_pool_destroy(iheap->pool);
+	}
+
+	kfree(iheap);
+	return 0;
+}

+ 142 - 0
driver/img_mem/imgmmu/mmu_defs.h

@@ -0,0 +1,142 @@
+/*!
+ *****************************************************************************
+ *
+ * @File        mmu_defs.h
+ * @Description Internal MMU library header used to define MMU information at
+ *           compilation time and have access to the error printing functions
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef MMU_DEFS_H
+#define MMU_DEFS_H
+
+#include <stdarg.h>
+
+/**
+ * @addtogroup IMGMMU_lib
+ * @{
+ */
+/*-------------------------------------------------------------------------
+ * Following elements are in the IMGMMU_int documentation module
+ *------------------------------------------------------------------------*/
+
+#ifndef IMGMMU_PHYS_SIZE
+/** @brief MMU physical address size in bits */
+#define IMGMMU_PHYS_SIZE 40
+#endif
+
+#ifndef IMGMMU_VIRT_SIZE
+/** @brief MMU virtual address size in bits */
+#define IMGMMU_VIRT_SIZE 40
+#endif
+
+/** @brief Page size in bytes used at PC & PD (always 4k),
+ * PT may use variable page size */
+#define IMGMMU_PAGE_SIZE 4096u
+
+/** should be log2(IMGMMU_PAGE_SIZE)*3-6 */
+#define IMGMMU_CAT_SHIFT 30
+
+/** should be log2(IMGMMU_PAGE_SIZE)*2-3 */
+#define IMGMMU_DIR_SHIFT 21
+
+/** should be log2(IMGMMU_PAGE_SIZE) */
+#define IMGMMU_PAGE_SHIFT 12
+
+#if IMGMMU_VIRT_SIZE == 40
+/**
+ * @brief maximum number of directories that
+ * can be stored in the catalogue entry
+ */
+#define IMGMMU_N_DIR (IMGMMU_PAGE_SIZE/4u)
+/**
+ * @brief maximum number of pagetables that
+ * can be stored in the directory entry
+ */
+#define IMGMMU_N_TABLE (IMGMMU_PAGE_SIZE/8u)
+/**
+ * @brief maximum number of page mappings in the pagetable
+ * for variable page size
+ */
+#define IMGMMU_N_PAGE (IMGMMU_PAGE_SIZE/ \
+		((imgmmu_get_page_size()/IMGMMU_PAGE_SIZE)*8u))
+#else
+/* it is unlikely to change anyway */
+#error "need an update for the new virtual address size"
+#endif
+
+/** @brief Memory flag used to mark a page mapping as invalid */
+#define MMU_FLAG_VALID 0x1
+#define MMU_FLAG_INVALID 0x0
+
+/** @brief Other memory flags */
+#define MMU_FLAG_READ_ONLY 0x2
+
+/** @brief PTE entry cache bits mask */
+#define MMU_PTE_AXCACHE_MASK 0x3C00000000000000UL
+
+/** @brief PTE entry cache bits shift */
+#define MMU_PTE_AXCACHE_SHIFT 58
+
+/** @brief PTE entry parity bit shift */
+#define MMU_PTE_PARITY_SHIFT 62
+
+/*
+ * internal printing functions
+ */
+__printf(4, 5)
+void _mmu_log(int err, const char *function, uint32_t line, const char *format, ...);
+
+#define mmu_log_err(...) _mmu_log(1, __func__, __LINE__, __VA_ARGS__)
+
+#define mmu_log_dbg(...)
+/*
+ * #define mmu_log_dbg(...) _mmu_log(0, __func__, __LINE__, __VA_ARGS__)
+ */
+
+/**
+ * @}
+ */
+/*-------------------------------------------------------------------------
+ * End of the IMGMMU_int documentation module
+ *------------------------------------------------------------------------*/
+
+#endif /* MMU_DEFS_H */

+ 159 - 0
driver/img_mem/imgmmu/mmulib/heap.h

@@ -0,0 +1,159 @@
+/*!
+ *****************************************************************************
+ *
+ * @File           heap.h
+ * @Description    MMU Library: device virtual allocation (heap)
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef IMGMMU_HEAP_H
+#define IMGMMU_HEAP_H
+
+/**
+ * @defgroup IMGMMU_heap MMU Heap Interface
+ * @brief The API for the device virtual address Heap - must be implemented
+ * (see tal_heap.c for an example implementation)
+ * @ingroup IMGMMU_lib
+ * @{
+ */
+/*-----------------------------------------------------------------------------
+ * Following elements are in the IMGMMU_heap documentation module
+ *---------------------------------------------------------------------------*/
+
+/** @brief An allocation on a heap. */
+struct imgmmu_halloc {
+    /** @brief Start of the allocation */
+	uint64_t vaddr;
+    /** @brief Size in bytes */
+	size_t size;
+};
+
+/**
+ * @brief A virtual address heap - not directly related to HW MMU directory
+ * entry
+ */
+struct imgmmu_heap {
+    /** @brief Start of device virtual address */
+	uint64_t vaddr_start;
+    /** @brief Allocation atom in bytes */
+	size_t atom;
+    /** @brief Total size of the heap in bytes */
+	size_t size;
+    /** Guard band indicator. */
+	bool guard_band;
+};
+
+/**
+ * @name Device virtual address allocation (heap management)
+ * @{
+ */
+
+/**
+ * @brief Create a Heap
+ *
+ * @param vaddr_start start of the heap - must be a multiple of atom
+ * @param atom the minimum possible allocation on the heap in bytes
+ * - usually related to the system page size
+ * @param size total size of the heap in bytes
+ * @param guard_band enables/disables creation of a gap
+ *                   between virtual addresses.
+ *                   NOTE: The gap has size of atom.
+ * @param res must be non-NULL - used to give detail about error
+ *
+ * @return pointer to the new Heap object and res is 0
+ * @return NULL and the value of res can be:
+ * @li -ENOMEM if internal allocation failed
+ */
+struct imgmmu_heap *imgmmu_hcreate(uintptr_t vaddr_start,
+		size_t atom, size_t size, bool guard_band, int *res);
+
+/**
+ * @brief Allocate from a heap
+ *
+ * @warning Heap do not relate to each other, therefore one must insure that
+ * they should not overlap if they should not.
+ *
+ * @param heap must not be NULL
+ * @param size allocation size in bytes
+ * @param res must be non-NULL - used to give details about error
+ *
+ * @return pointer to the new halloc object and res is 0
+ * @return NULL and the value of res can be:
+ * @li -EINVAL if the give size is not a multiple of
+ * heap->atom
+ * @li -ENOMEM if the internal structure allocation failed
+ * @li -EFAULT if the internal device memory allocator did not
+ * find a suitable virtual address
+ */
+struct imgmmu_halloc *imgmmu_hallocate(struct imgmmu_heap *heap, size_t size,
+		int *res);
+
+/**
+ * @brief Liberate an allocation
+ *
+ * @return 0
+ */
+int imgmmu_hfree(struct imgmmu_halloc *alloc);
+
+/**
+ * @brief Destroy a heap object
+ *
+ * @return 0
+ * @return -EFAULT if the given heap still has attached
+ * allocation
+ */
+int imgmmu_hdestroy(struct imgmmu_heap *heap);
+
+/**
+ * @}
+ */
+/*-----------------------------------------------------------------------------
+ * End of the public functions
+ *---------------------------------------------------------------------------*/
+
+/**
+ * @}
+ */
+/*-----------------------------------------------------------------------------
+ * End of the IMGMMU_heap documentation module
+ *---------------------------------------------------------------------------*/
+
+#endif /* IMGMMU_HEAP_H */

+ 449 - 0
driver/img_mem/imgmmu/mmulib/mmu.h

@@ -0,0 +1,449 @@
+/*!
+ *****************************************************************************
+ *
+ * @File           mmu.h
+ * @Description    MMU Library
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef IMGMMU_MMU_H
+#define IMGMMU_MMU_H
+
+#include <linux/scatterlist.h>
+
+/**
+ * @defgroup IMGMMU_lib The MMU page table manager
+ * @brief The Memory Mapping Unit page table manager library handles the memory
+ * hierarchy for a multi-directory MMU.
+ *
+ * The library is composed of several elements:
+ * @li the Page Table management, responsible for managing the device memory
+ * used for the mapping. This requires some functions from the Allocator to
+ * access the memory and a virtual address from a Heap.
+ * @li the Heap interface, that is the SW heap API one can re-implement (or use
+ * the provided TAL one). This is responsible for choosing a virtual address
+ * for an allocation.
+ * @li the Allocator, that is not implemented in this library, is responsible
+ * for providing physical pages list (when mapping) and a few memory operation
+ * functions (imgmmu_info).
+ * An example TAL allocator is provided in this code and can be used when
+ * running in full user-space with pdumps.
+ *
+ * Some pre-processor values can be defined to customise the MMU:
+ * @li IMGMMU_PHYS_SIZE physical address size of the MMU in bits (default: 40)
+ * - only used for the default memory write function
+ * @li IMGMMU_PAGE_SIZE page size in bytes (default: 4096) - not used directly
+ * in the MMU code, but the allocator should take it into account
+ * If IMGMMU_PAGE_SIZE is defined the following HAVE TO be defined as well:
+ * @li IMGMMU_PAGE_SHIFT as log2(IMGMMU_PAGE_SIZE) (default: 12) - used in
+ * virtual address to determine the position of the page offset
+ * @li IMGMMU_DIR_SHIFT as log2(IMGMMU_PAGE_SIZE)*2-2 (default: 21) - used in
+ * virtual address to determine the position of the directory offset
+ * @li IMGMMU_CAT_SHIFT as log2(IMGMMU_PAGE_SIZE)*3-6 (default: 30) - used in
+ * virtual address to determine the position of the catalogue offset
+ *
+ * @{
+ */
+/*-----------------------------------------------------------------------------
+ * Following elements are in the IMGMMU_lib documentation module
+ *---------------------------------------------------------------------------*/
+
+/**
+ * @name MMU page table management
+ * @brief The public functions to use to control the MMU.
+ * @image html MMU_class.png "MMU structure organisation"
+ * @{
+ */
+/*-----------------------------------------------------------------------------
+ * Following elements are in the public functions
+ *---------------------------------------------------------------------------*/
+
+/** @brief Opaque type representing an MMU Catalogue page */
+struct imgmmu_cat;
+/** @brief Opaque type representing an MMU Mapping */
+struct imgmmu_map;
+
+struct imgmmu_page;
+struct imgmmu_halloc;
+
+/** @brief Define indicating the mmu page type */
+#define IMGMMU_PTYPE_PC 0x1
+#define IMGMMU_PTYPE_PD 0x2
+#define IMGMMU_PTYPE_PT 0x3
+
+/** @brief Bypass phys address translation when using page_write */
+#define IMGMMU_BYPASS_ADDR_TRANS 0x80000000
+
+/**
+ * @brief Pointer to a function implemented by the used allocator to create 1
+ * page table (used for the MMU mapping - catalogue page and mapping page)
+ *
+ * @param type of the mmu page (PC, PD, PT)
+ *
+ * This is not done internally to allow the usage of different allocators
+ *
+ * @return A populated imgmmu_page structure with the result of the page allocation
+ * @return NULL if the allocation failed
+ *
+ * @see imgmmu_page_free to liberate the page
+ */
+typedef struct imgmmu_page *(*imgmmu_page_alloc) (void *, unsigned char type);
+/**
+ * @brief Pointer to a function to free the allocated page table used for MMU
+ * mapping
+ *
+ * This is not done internally to allow the usage of different allocators
+ *
+ * @see imgmmu_page_alloc to allocate the page
+ */
+typedef void (*imgmmu_page_free) (struct imgmmu_page *);
+
+/**
+ * @brief Pointer to a function to update Device memory on non Unified Memory
+ */
+typedef void (*imgmmu_page_update) (struct imgmmu_page *);
+
+/**
+ * @brief Write to a device address.
+ *
+ * This is not done internally to allow debug operations such a pdumping to
+ * occur
+ *
+ * This function should do all the shifting and masking needed for the used MMU
+ *
+ * @param page page to update - asserts it is not NULL
+ * @param offset in entries (32b word)
+ * @param write physical address to write
+ * @param flags bottom part of the entry used as flags for the MMU (including
+ * valid flag) or IMGMMU_BYPASS_ADDR_TRANS
+ * @param priv private data passed with map call
+ */
+typedef void (*imgmmu_page_write) (struct imgmmu_page *page,
+				     unsigned int offset, uint64_t write,
+				     unsigned int flags, void *priv);
+
+/**
+ * @brief Reads a 32 word on a physical page
+ *
+ * This is used only when debug operations occures (e.g. access page table
+ * entry that caused a page fault)
+ *
+ * @param page physical page - asserts it is not NULL
+ * @param offset in entries (32b word)
+ * @param priv private data passed with map call
+
+ * @return physical address at given offset and flags
+ */
+typedef uint64_t(*imgmmu_page_read) (struct imgmmu_page *page,
+					 unsigned int offset, void *priv,
+					 unsigned int *flags);
+
+/**
+ * @brief Callbacks definition structure
+ */
+struct imgmmu_info {
+	void *ctx;
+    /** @brief allocate a physical page used in MMU mapping */
+	imgmmu_page_alloc page_alloc;
+    /** @brief liberate a physical page used in MMU mapping */
+	imgmmu_page_free page_free;
+    /**
+     * @brief write a physical address onto a page - optional, if NULL internal
+     * function is used
+     *
+     * The internal function assumes that IMGMMU_PHYS_SIZE is the MMU size.
+     *
+     * @note if NULL page_read is also set
+     *
+     * @warning The function assumes that the physical page memory is
+     * accessible
+     */
+	imgmmu_page_write page_write;
+    /**
+     * @brief read a physical page offset - optional, if NULL access to page
+     * table and catalogue entries is not supported
+     *
+     * @note If page_write and page_read are NULL then the internal
+     * function is used.
+     *
+     * @warning The function assumes that the physical page memory is
+     * accessible
+     */
+	imgmmu_page_read page_read;
+    /**
+     * @brief update a physical page on device if non UMA - optional, can be
+     * NULL if update are not needed
+     */
+	imgmmu_page_update page_update;
+
+};
+
+/** @brief Page table entry - used when allocating the MMU pages */
+struct imgmmu_page {
+    /**
+     * @note Use ui64 instead of uintptr_t to support extended physical address
+     * on 32b OS
+     */
+	uint64_t phys_addr;
+	uint64_t virt_base;
+	uintptr_t cpu_addr;
+};
+
+/**
+ * @brief Access the default specified page size of the MMU (in Bytes)
+ */
+size_t imgmmu_get_page_size(void);
+
+/**
+ * @brief Returns entry shift for given type
+ */
+size_t imgmmu_get_entry_shift(unsigned char type);
+
+/**
+ * @brief Change the MMU page size in runtime.
+ */
+int imgmmu_set_page_size(size_t pagesize);
+
+/**
+ * @brief Access the compilation specified physical size of the MMU (in bits)
+ */
+size_t imgmmu_get_phys_size(void);
+
+/**
+ * @brief Access the compilation specified virtual address size of the MMU
+ * (in bits)
+ */
+size_t imgmmu_get_virt_size(void);
+
+/**
+ * @brief Access the CPU page size - similar to PAGE_SIZE macro in Linux
+ *
+ * Not directly using PAGE_SIZE because we need a run-time configuration of the
+ * PAGE_SIZE when running against simulators and different projects define
+ * PAGE_SIZE in different ways...
+ *
+ * The default size is using the PAGE_SIZE macro if defined (or 4kB if not
+ * defined when running against simulators)
+ */
+size_t imgmmu_get_cpu_page_size(void);
+
+/**
+ * @brief Change run-time CPU page size
+ *
+ * @warning to use against simulators only! default of imgmmu_get_cpu_page_size()
+ * is PAGE_SIZE otherwise!
+ */
+int imgmmu_set_cpu_page_size(size_t pagesize);
+
+/**
+ * @brief Create a catalogue entry based on a given catalogue configuration
+ *
+ * @warning Obviously creation of the catalogue allocates memory - do not call
+ * while interrupts are disabled
+ *
+ * @param info is copied and not modified - contains the functions to
+ * use to manage page table memory
+ * @param res where to store the error code, should not be NULL (trapped by
+ * assert)
+ *
+ * @return The opaque handle to the imgmmu_cat object and res to
+ * 0
+ * @return NULL in case of an error and res has the value:
+ * @li -EINVAL if catConfig is NULL or does not
+ * contain function pointers
+ * @li -ENOMEM if an internal allocation failed
+ * @li -EFAULT if the given imgmmu_page_alloc returned NULL
+ */
+struct imgmmu_cat *imgmmu_cat_create(
+	const struct imgmmu_info *info,
+	int *res);
+
+/**
+ * @brief Destroy the imgmmu_cat - assumes that the HW is not going to access
+ * the memory any-more
+ *
+ * Does not invalidate any memory because it assumes that everything is not
+ * used any-more
+ */
+int imgmmu_cat_destroy(struct imgmmu_cat *cat);
+
+/**
+ * @brief Get access to the page table structure used in the catalogue (to be
+ * able to write it to registers)
+ *
+ * @param cat asserts if cat is NULL
+ *
+ * @return the page table structure used
+ */
+struct imgmmu_page *imgmmu_cat_get_page(struct imgmmu_cat *cat);
+
+/**
+ * @brief Returns the page table entry value associated with the virtual
+ * address
+ *
+ * @return -1 if the Catalogue's page_read is NULL or if the associate page
+ * table is invalid in the catalogue map
+ *
+ */
+uint64_t imgmmu_cat_get_pte(
+	struct imgmmu_cat *cat,
+	uint64_t vaddr);
+	
+/**
+ * @brief Overrides the physical address associated with the virtual address
+ *
+ * @return -1 if the Catalogue's page_read is NULL or if the associate page
+ * table is invalid in the catalogue map
+ *
+ */
+uint64_t imgmmu_cat_override_phys_addr(struct imgmmu_cat *cat,
+                 uint64_t vaddr, uint64_t new_phys_addr);
+/**
+ * @brief Create a PageTable mapping for a list of physical pages and device
+ * virtual address
+ *
+ * @warning Mapping can cause memory allocation (missing pages) - do not call
+ * while interrupts are disabled
+ *
+ * @param cat catalogue to use for the mapping
+ * @param phys_page_list sorted array of physical addresses (ascending order).
+ * The number of elements is virt_mem->size/MMU_PAGE_SIZE
+ * @note This array can potentially be big, the caller may need to use vmalloc
+ * if running the linux kernel (e.g. mapping a 1080p NV12 is 760 entries, 6080
+ * Bytes - 2 CPU pages needed, fine with kmalloc; 4k NV12 is 3038 entries,
+ * 24304 Bytes - 6 CPU pages needed, kmalloc would try to find 8 contiguous
+ * pages which may be problematic if memory is fragmented)
+ * @param virt_mem associated device virtual address. Given structure is
+ * copied
+ * @param map_flags flags to apply on the page (typically 0x2 for Write Only,
+ * 0x4 for Read Only) - the flag should not set bit 1 as 0x1 is the valid flag.
+ * @param priv private data to be passed in callback interface
+ * @param res where to store the error code, should not be NULL
+ *
+ * @return The opaque handle to the imgmmu_map object and res to
+ * 0
+ * @return NULL in case of an error an res has the value:
+ * @li -EINVAL if the allocation size is not a multiple of
+ * IMGMMU_PAGE_SIZE,
+ *     if the given list of page table is too long or not long enough for the
+ * mapping or
+ *     if the give flags set the invalid bit
+ * @li -EBUSY if the virtual memory is already mapped
+ * @li -ENOMEM if an internal allocation failed
+ * @li -EFAULT if a page creation failed
+ */
+struct imgmmu_map *imgmmu_cat_map_arr(
+	struct imgmmu_cat *cat,
+	uint64_t  *phys_page_list,
+	const struct imgmmu_halloc *virt_mem,
+	unsigned int map_flags,
+	void *priv,
+	int  *res);
+
+struct imgmmu_map *imgmmu_cat_map_sg(
+	struct imgmmu_cat *cat,
+	struct scatterlist *phys_page_sg,
+	bool use_sg_dma,
+	const struct imgmmu_halloc *virt_mem,
+	unsigned int map_flags,
+	void *priv,
+	int *res);
+
+/**
+ * @brief Un-map the mapped pages (invalidate their entries) and destroy the
+ * mapping object
+ *
+ * This does not destroy the created Page Table (even if they are becoming
+ * un-used) and does not change the Catalogue valid bits.
+ *
+ * @return 0
+ */
+int imgmmu_cat_unmap(struct imgmmu_map *map);
+
+/**
+ * @brief Remove the internal Page Table structures and physical pages that
+ * have no mapping attached to them
+ *
+ * @note This function does not have to be used, but can be used to clean some
+ * un-used memory out and clean the Catalogue valid bits.
+ *
+ * @return The number of clean catalogue entries
+ */
+uint32_t imgmmu_cat_clean(struct imgmmu_cat *cat);
+
+/**
+ * @brief Get cache bits for PTE entry
+ *
+ * @return Cache bits
+ */
+uint64_t imgmmu_get_pte_cache_bits(uint64_t pte_entry);
+
+/**
+ * @brief Get parity bit shift of PTE entry
+ *
+ * @return Parity bit shift
+ */
+u8 imgmmu_get_pte_parity_shift(void);
+
+/**
+ * @brief Set parity for PTE entry
+ *
+ * @return Entry with applied parity
+ */
+void imgmmu_set_pte_parity(uint64_t *pte_entry);
+
+#define IMGMMU_GET_MAX_PAGE_SIZE() (max(imgmmu_get_page_size(), imgmmu_get_cpu_page_size()))
+
+/**
+ * @}
+ */
+/*-----------------------------------------------------------------------------
+ * End of the public functions
+ *---------------------------------------------------------------------------*/
+
+/**
+ * @}
+ */
+/*-----------------------------------------------------------------------------
+ * End of the IMGMMU_lib documentation module
+ *---------------------------------------------------------------------------*/
+
+#endif /* IMGMMU_MMU_H */

+ 4 - 0
driver/include/hwdefs/aura_system.h

@@ -0,0 +1,4 @@
+#define _REG_START 0x00000000
+#define _VHA_TB_START 0x080000
+#define _REG_SIZE 0x00200000
+#define _VHA_TB_SIZE 0x00100000

+ 24 - 0
driver/include/hwdefs/gyrus_system.h

@@ -0,0 +1,24 @@
+#define _REG_NNPU_START 0x00000000
+#define _REG_START 0x00080000
+#define _REG_NNA_START 0x00080000
+#define _REG_NNSYS_START 0x00100000
+#define _VHA_TB_START 0x000c0000
+#define _RGXREG_START 0x00000000
+#define _EMUREG_START 0x0000400000
+#define _REG_JX_START 0x00000000
+#define _REG_JD_START 0x00000000
+#define _REG_JTAG_START 0x00000000
+#define _REG_META_SD_START 0x00000000
+#define _REG_META_TB_START 0x00000000
+#define _REG_NNPU_SIZE 0x00080000
+#define _REG_SIZE 0x00100000
+#define _REG_NNA_SIZE 0x00080000
+#define _REG_NNSYS_SIZE 0x00080000
+#define _VHA_TB_SIZE 0x00080000
+#define _RGXREG_SIZE 0x00080000
+#define _EMUREG_SIZE 0x0000104000
+#define _REG_JX_SIZE 0xFFFFFFFF
+#define _REG_JD_SIZE 0x00000040
+#define _REG_JTAG_SIZE 0x00000080
+#define _REG_META_SD_SIZE 0x00002800
+#define _REG_META_TB_SIZE 0x00010000

+ 4 - 0
driver/include/hwdefs/magna_system.h

@@ -0,0 +1,4 @@
+#define _REG_START 0x00000000
+#define _REG_NNA_START 0x00000000
+#define _REG_SIZE 0x00080000
+#define _REG_NNA_SIZE 0x00080000

+ 4 - 0
driver/include/hwdefs/mirage_system.h

@@ -0,0 +1,4 @@
+#define _REG_START 0x00000000
+#define _VHA_TB_START 0x040000
+#define _REG_SIZE 0x00100000
+#define _VHA_TB_SIZE 0x00100000

+ 355 - 0
driver/include/hwdefs/nn_sys_cr_gyrus.h

@@ -0,0 +1,355 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file nn_sys_cr_gyrus.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _NN_SYS_CR_GYRUS_H_
+#define _NN_SYS_CR_GYRUS_H_
+
+#define NN_SYS_CR_GYRUS_REVISION 1
+
+/*
+    Register NN_SYS_CR_PRODUCT_ID
+*/
+#define NN_SYS_CR_PRODUCT_ID                              (0x0018U)
+#define NN_SYS_CR_PRODUCT_ID_MASKFULL                     (IMG_UINT64_C(0x00000000FFFF0000))
+#define NN_SYS_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT         (16U)
+#define NN_SYS_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register NN_SYS_CR_CORE_ID
+*/
+#define NN_SYS_CR_CORE_ID                                 (0x0020U)
+#define NN_SYS_CR_CORE_ID_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define NN_SYS_CR_CORE_ID_BRANCH_ID_SHIFT                 (48U)
+#define NN_SYS_CR_CORE_ID_BRANCH_ID_CLRMSK                (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define NN_SYS_CR_CORE_ID_VERSION_ID_SHIFT                (32U)
+#define NN_SYS_CR_CORE_ID_VERSION_ID_CLRMSK               (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define NN_SYS_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT  (16U)
+#define NN_SYS_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define NN_SYS_CR_CORE_ID_CONFIG_ID_SHIFT                 (0U)
+#define NN_SYS_CR_CORE_ID_CONFIG_ID_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register NN_SYS_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID                   (0x0028U)
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT       (0U)
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register NN_SYS_CR_CORE_IP_CHANGELIST
+*/
+#define NN_SYS_CR_CORE_IP_CHANGELIST                      (0x0030U)
+#define NN_SYS_CR_CORE_IP_CHANGELIST_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_CORE_IP_CHANGELIST_VALUE_SHIFT          (0U)
+#define NN_SYS_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+#define NN_SYS_CR_CLK_CTRL_MODE_MASK                      (0x00000003U)
+/*
+The domain clock is forced off */
+#define NN_SYS_CR_CLK_CTRL_MODE_OFF                       (0x00000000U)
+/*
+The domain clock is forced on */
+#define NN_SYS_CR_CLK_CTRL_MODE_ON                        (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define NN_SYS_CR_CLK_CTRL_MODE_AUTO                      (0x00000002U)
+
+
+/*
+    Register NN_SYS_CR_CLK_CTRL
+*/
+#define NN_SYS_CR_CLK_CTRL                                (0x0200U)
+#define NN_SYS_CR_CLK_CTRL_MASKFULL                       (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_SHIFT                   (0U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_CLRMSK                  (0XFFFFFFFCU)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_OFF                     (00000000U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_ON                      (0X00000001U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_AUTO                    (0X00000002U)
+
+
+/*
+Clock is gated and the module is inactive */
+#define NN_SYS_CR_CLK_STATUS_MODE_GATED                   (0x00000000U)
+/*
+Clock is running */
+#define NN_SYS_CR_CLK_STATUS_MODE_RUNNING                 (0x00000001U)
+
+
+/*
+    Register NN_SYS_CR_CLK_STATUS
+*/
+#define NN_SYS_CR_CLK_STATUS                              (0x0208U)
+#define NN_SYS_CR_CLK_STATUS_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_SHIFT                 (0U)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_CLRMSK                (0XFFFFFFFEU)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_GATED                 (00000000U)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_RUNNING               (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_CLEAR
+*/
+#define NN_SYS_CR_EVENT_CLEAR                             (0x0210U)
+#define NN_SYS_CR_EVENT_CLEAR_MASKFULL                    (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_SHIFT      (1U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_CLRMSK     (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_EN         (0X00000002U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_SHIFT   (0U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_CLRMSK  (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_EN      (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_ENABLE
+*/
+#define NN_SYS_CR_EVENT_ENABLE                            (0x0218U)
+#define NN_SYS_CR_EVENT_ENABLE_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_SHIFT     (1U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_CLRMSK    (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_EN        (0X00000002U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_SHIFT  (0U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_STATUS
+*/
+#define NN_SYS_CR_EVENT_STATUS                            (0x0220U)
+#define NN_SYS_CR_EVENT_STATUS_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_SHIFT     (1U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_CLRMSK    (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_EN        (0X00000002U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_SHIFT  (0U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT                   (0x0228U)
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_MASKFULL          (IMG_UINT64_C(0x000000000000001F))
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_NN_SYS_SHIFT      (0U)
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_NN_SYS_CLRMSK     (0XFFFFFFE0U)
+
+
+/*
+Power event type is power down */
+#define NN_SYS_CR_POWER_EVENT_MODE_POWER_DOWN             (0x00000000U)
+/*
+Power event type is power up  */
+#define NN_SYS_CR_POWER_EVENT_MODE_POWER_UP               (0x00000001U)
+
+
+/*
+    Register NN_SYS_CR_POWER_EVENT
+*/
+#define NN_SYS_CR_POWER_EVENT                             (0x0230U)
+#define NN_SYS_CR_POWER_EVENT_MASKFULL                    (IMG_UINT64_C(0x0000000000000033))
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_SHIFT            (5U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_CLRMSK           (0XFFFFFFDFU)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_EN               (0X00000020U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_SHIFT          (4U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_CLRMSK         (0XFFFFFFEFU)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_EN             (0X00000010U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_SHIFT               (1U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_CLRMSK              (0XFFFFFFFDU)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_POWER_DOWN          (00000000U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_POWER_UP            (0X00000002U)
+#define NN_SYS_CR_POWER_EVENT_TYPE_SHIFT                  (0U)
+#define NN_SYS_CR_POWER_EVENT_TYPE_CLRMSK                 (0XFFFFFFFEU)
+#define NN_SYS_CR_POWER_EVENT_TYPE_EN                     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_RESET_CLK_CTRL
+*/
+#define NN_SYS_CR_RESET_CLK_CTRL                          (0x0238U)
+#define NN_SYS_CR_RESET_CLK_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_RESET_CLK_CTRL_NN_SYS_SHIFT             (0U)
+#define NN_SYS_CR_RESET_CLK_CTRL_NN_SYS_CLRMSK            (0XFFFFFFFCU)
+
+
+/*
+    Register NN_SYS_CR_RESET_CTRL
+*/
+#define NN_SYS_CR_RESET_CTRL                              (0x0240U)
+#define NN_SYS_CR_RESET_CTRL_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_SHIFT                 (0U)
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_CLRMSK                (0XFFFFFFFEU)
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_EN                    (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE                     (0x0248U)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT        (0U)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK       (0XFFFFFFFEU)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN           (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_AXI_EXACCESS
+*/
+#define NN_SYS_CR_AXI_EXACCESS                            (0x0250U)
+#define NN_SYS_CR_AXI_EXACCESS_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT         (0U)
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK        (0XFFFFFFFEU)
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_EN            (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_REGBANK_REQUEST_INVALID
+*/
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID                 (0x0258U)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_MASKFULL        (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT      (0U)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK     (0XFFFFFFFEU)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_EN         (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_NOC_LOWER_ADDR1
+*/
+#define NN_SYS_CR_NOC_LOWER_ADDR1                         (0x0268U)
+#define NN_SYS_CR_NOC_LOWER_ADDR1_MASKFULL                (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define NN_SYS_CR_NOC_LOWER_ADDR1_LOWER_ADDR_SHIFT        (0U)
+#define NN_SYS_CR_NOC_LOWER_ADDR1_LOWER_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFFF000000000))
+
+
+/*
+    Register NN_SYS_CR_NOC_UPPER_ADDR1
+*/
+#define NN_SYS_CR_NOC_UPPER_ADDR1                         (0x0278U)
+#define NN_SYS_CR_NOC_UPPER_ADDR1_MASKFULL                (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define NN_SYS_CR_NOC_UPPER_ADDR1_UPPER_ADDR_SHIFT        (0U)
+#define NN_SYS_CR_NOC_UPPER_ADDR1_UPPER_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFFF000000000))
+
+
+/*
+    Register NN_SYS_CR_SYS_BUS_DIRECT_ACCESS
+*/
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS                   (0x0280U)
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_SHIFT (0U)
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register NN_SYS_CR_NNPU_ACE_QOS_CTRL
+*/
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL                       (0x02A0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_CRITICAL_SHIFT        (12U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_CRITICAL_CLRMSK       (0XFFFF0FFFU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_HIGH_SHIFT            (8U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_HIGH_CLRMSK           (0XFFFFF0FFU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MEDIUM_SHIFT          (4U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MEDIUM_CLRMSK         (0XFFFFFF0FU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_LOW_SHIFT             (0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_LOW_CLRMSK            (0XFFFFFFF0U)
+
+
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_MASK   (0x00000003U)
+/*
+Low */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_LOW    (0x00000000U)
+/*
+Medium */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_HIGH   (0x00000002U)
+/*
+Critical */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register NN_SYS_CR_NNPU_ACE_QOS_SEL
+*/
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL                        (0x02A8U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MASKFULL               (IMG_UINT64_C(0x00000000000000F1))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_SHIFT              (6U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_LOW                (IMG_UINT64_C(0000000000000000))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_MEDIUM             (IMG_UINT64_C(0x0000000000000040))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_HIGH               (IMG_UINT64_C(0x0000000000000080))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_CRITICAL           (IMG_UINT64_C(0x00000000000000c0))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_SHIFT          (4U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_LOW            (IMG_UINT64_C(0000000000000000))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_MEDIUM         (IMG_UINT64_C(0x0000000000000010))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_HIGH           (IMG_UINT64_C(0x0000000000000020))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_CRITICAL       (IMG_UINT64_C(0x0000000000000030))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_SHIFT  (0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_EN     (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register NN_SYS_CR_RTM_CTRL
+*/
+#define NN_SYS_CR_RTM_CTRL                                (0x1000U)
+#define NN_SYS_CR_RTM_CTRL_MASKFULL                       (IMG_UINT64_C(0x00000000C0000FF8))
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_SHIFT               (31U)
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_CLRMSK              (0X7FFFFFFFU)
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_EN                  (0X80000000U)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_SHIFT                (30U)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_CLRMSK               (0XBFFFFFFFU)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_EN                   (0X40000000U)
+#define NN_SYS_CR_RTM_CTRL_RTM_SELECTOR_SHIFT             (3U)
+#define NN_SYS_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK            (0XFFFFF007U)
+
+
+/*
+    Register NN_SYS_CR_RTM_DATA
+*/
+#define NN_SYS_CR_RTM_DATA                                (0x1008U)
+#define NN_SYS_CR_RTM_DATA_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_RTM_DATA_RTM_DATA_SHIFT                 (0U)
+#define NN_SYS_CR_RTM_DATA_RTM_DATA_CLRMSK                (00000000U)
+
+
+/*
+    Register NN_SYS_CR_SOCIF_BUS_UNTRUSTED
+*/
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED                     (0x1A000U)
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_MASKFULL            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_VALUE_SHIFT         (0U)
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_VALUE_CLRMSK        (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register NN_SYS_CR_SOCIF_BUS_SECURE
+*/
+#define NN_SYS_CR_SOCIF_BUS_SECURE                        (0x1A100U)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_SHIFT           (0U)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_CLRMSK          (0XFFFFFFFEU)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_EN              (0X00000001U)
+
+
+#endif /* _NN_SYS_CR_GYRUS_H_ */
+
+/*****************************************************************************
+ End of file (nn_sys_cr_gyrus.h)
+*****************************************************************************/
+

+ 364 - 0
driver/include/hwdefs/nn_sys_cr_vagus.h

@@ -0,0 +1,364 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file nn_sys_cr_vagus.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _NN_SYS_CR_VAGUS_H_
+#define _NN_SYS_CR_VAGUS_H_
+
+#define NN_SYS_CR_VAGUS_REVISION 1
+
+/*
+    Register NN_SYS_CR_PRODUCT_ID
+*/
+#define NN_SYS_CR_PRODUCT_ID                              (0x0018U)
+#define NN_SYS_CR_PRODUCT_ID_MASKFULL                     (IMG_UINT64_C(0x00000000FFFF0000))
+#define NN_SYS_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT         (16U)
+#define NN_SYS_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register NN_SYS_CR_CORE_ID
+*/
+#define NN_SYS_CR_CORE_ID                                 (0x0020U)
+#define NN_SYS_CR_CORE_ID_MASKFULL                        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define NN_SYS_CR_CORE_ID_BRANCH_ID_SHIFT                 (48U)
+#define NN_SYS_CR_CORE_ID_BRANCH_ID_CLRMSK                (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define NN_SYS_CR_CORE_ID_VERSION_ID_SHIFT                (32U)
+#define NN_SYS_CR_CORE_ID_VERSION_ID_CLRMSK               (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define NN_SYS_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT  (16U)
+#define NN_SYS_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define NN_SYS_CR_CORE_ID_CONFIG_ID_SHIFT                 (0U)
+#define NN_SYS_CR_CORE_ID_CONFIG_ID_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register NN_SYS_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID                   (0x0028U)
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT       (0U)
+#define NN_SYS_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register NN_SYS_CR_CORE_IP_CHANGELIST
+*/
+#define NN_SYS_CR_CORE_IP_CHANGELIST                      (0x0030U)
+#define NN_SYS_CR_CORE_IP_CHANGELIST_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_CORE_IP_CHANGELIST_VALUE_SHIFT          (0U)
+#define NN_SYS_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register NN_SYS_CR_CORE_IP_CONFIG
+*/
+#define NN_SYS_CR_CORE_IP_CONFIG                          (0x0038U)
+#define NN_SYS_CR_CORE_IP_CONFIG_MASKFULL                 (IMG_UINT64_C(0x0000000000000FFF))
+#define NN_SYS_CR_CORE_IP_CONFIG_NN_SYS_OCM_RAM_SIZE_4KB_SHIFT (0U)
+#define NN_SYS_CR_CORE_IP_CONFIG_NN_SYS_OCM_RAM_SIZE_4KB_CLRMSK (0XFFFFF000U)
+
+
+#define NN_SYS_CR_CLK_CTRL_MODE_MASK                      (0x00000003U)
+/*
+The domain clock is forced off */
+#define NN_SYS_CR_CLK_CTRL_MODE_OFF                       (0x00000000U)
+/*
+The domain clock is forced on */
+#define NN_SYS_CR_CLK_CTRL_MODE_ON                        (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define NN_SYS_CR_CLK_CTRL_MODE_AUTO                      (0x00000002U)
+
+
+/*
+    Register NN_SYS_CR_CLK_CTRL
+*/
+#define NN_SYS_CR_CLK_CTRL                                (0x0200U)
+#define NN_SYS_CR_CLK_CTRL_MASKFULL                       (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_SHIFT                   (0U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_CLRMSK                  (0XFFFFFFFCU)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_OFF                     (00000000U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_ON                      (0X00000001U)
+#define NN_SYS_CR_CLK_CTRL_NN_SYS_AUTO                    (0X00000002U)
+
+
+/*
+Clock is gated and the module is inactive */
+#define NN_SYS_CR_CLK_STATUS_MODE_GATED                   (0x00000000U)
+/*
+Clock is running */
+#define NN_SYS_CR_CLK_STATUS_MODE_RUNNING                 (0x00000001U)
+
+
+/*
+    Register NN_SYS_CR_CLK_STATUS
+*/
+#define NN_SYS_CR_CLK_STATUS                              (0x0208U)
+#define NN_SYS_CR_CLK_STATUS_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_SHIFT                 (0U)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_CLRMSK                (0XFFFFFFFEU)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_GATED                 (00000000U)
+#define NN_SYS_CR_CLK_STATUS_NN_SYS_RUNNING               (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_CLEAR
+*/
+#define NN_SYS_CR_EVENT_CLEAR                             (0x0210U)
+#define NN_SYS_CR_EVENT_CLEAR_MASKFULL                    (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_SHIFT      (1U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_CLRMSK     (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_ABORT_EN         (0X00000002U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_SHIFT   (0U)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_CLRMSK  (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_CLEAR_NN_SYS_PWR_COMPLETE_EN      (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_ENABLE
+*/
+#define NN_SYS_CR_EVENT_ENABLE                            (0x0218U)
+#define NN_SYS_CR_EVENT_ENABLE_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_SHIFT     (1U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_CLRMSK    (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_ABORT_EN        (0X00000002U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_SHIFT  (0U)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_ENABLE_NN_SYS_PWR_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_EVENT_STATUS
+*/
+#define NN_SYS_CR_EVENT_STATUS                            (0x0220U)
+#define NN_SYS_CR_EVENT_STATUS_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_SHIFT     (1U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_CLRMSK    (0XFFFFFFFDU)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_ABORT_EN        (0X00000002U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_SHIFT  (0U)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define NN_SYS_CR_EVENT_STATUS_NN_SYS_PWR_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT                   (0x0228U)
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_MASKFULL          (IMG_UINT64_C(0x000000000000001F))
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_NN_SYS_SHIFT      (0U)
+#define NN_SYS_CR_IDLE_HYSTERESIS_COUNT_NN_SYS_CLRMSK     (0XFFFFFFE0U)
+
+
+/*
+Power event type is power down */
+#define NN_SYS_CR_POWER_EVENT_MODE_POWER_DOWN             (0x00000000U)
+/*
+Power event type is power up  */
+#define NN_SYS_CR_POWER_EVENT_MODE_POWER_UP               (0x00000001U)
+
+
+/*
+    Register NN_SYS_CR_POWER_EVENT
+*/
+#define NN_SYS_CR_POWER_EVENT                             (0x0230U)
+#define NN_SYS_CR_POWER_EVENT_MASKFULL                    (IMG_UINT64_C(0x0000000000000033))
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_SHIFT            (5U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_CLRMSK           (0XFFFFFFDFU)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_EN               (0X00000020U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_SHIFT          (4U)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_CLRMSK         (0XFFFFFFEFU)
+#define NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_EN             (0X00000010U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_SHIFT               (1U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_CLRMSK              (0XFFFFFFFDU)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_POWER_DOWN          (00000000U)
+#define NN_SYS_CR_POWER_EVENT_REQUEST_POWER_UP            (0X00000002U)
+#define NN_SYS_CR_POWER_EVENT_TYPE_SHIFT                  (0U)
+#define NN_SYS_CR_POWER_EVENT_TYPE_CLRMSK                 (0XFFFFFFFEU)
+#define NN_SYS_CR_POWER_EVENT_TYPE_EN                     (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_RESET_CLK_CTRL
+*/
+#define NN_SYS_CR_RESET_CLK_CTRL                          (0x0238U)
+#define NN_SYS_CR_RESET_CLK_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000003))
+#define NN_SYS_CR_RESET_CLK_CTRL_NN_SYS_SHIFT             (0U)
+#define NN_SYS_CR_RESET_CLK_CTRL_NN_SYS_CLRMSK            (0XFFFFFFFCU)
+
+
+/*
+    Register NN_SYS_CR_RESET_CTRL
+*/
+#define NN_SYS_CR_RESET_CTRL                              (0x0240U)
+#define NN_SYS_CR_RESET_CTRL_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_SHIFT                 (0U)
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_CLRMSK                (0XFFFFFFFEU)
+#define NN_SYS_CR_RESET_CTRL_NN_SYS_EN                    (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE                     (0x0248U)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_MASKFULL            (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT        (0U)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK       (0XFFFFFFFEU)
+#define NN_SYS_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN           (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_AXI_EXACCESS
+*/
+#define NN_SYS_CR_AXI_EXACCESS                            (0x0250U)
+#define NN_SYS_CR_AXI_EXACCESS_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT         (0U)
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK        (0XFFFFFFFEU)
+#define NN_SYS_CR_AXI_EXACCESS_SOCIF_ENABLE_EN            (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_REGBANK_REQUEST_INVALID
+*/
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID                 (0x0258U)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_MASKFULL        (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT      (0U)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK     (0XFFFFFFFEU)
+#define NN_SYS_CR_REGBANK_REQUEST_INVALID_FLAG_EN         (0X00000001U)
+
+
+/*
+    Register NN_SYS_CR_NOC_LOWER_ADDR1
+*/
+#define NN_SYS_CR_NOC_LOWER_ADDR1                         (0x0268U)
+#define NN_SYS_CR_NOC_LOWER_ADDR1_MASKFULL                (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define NN_SYS_CR_NOC_LOWER_ADDR1_LOWER_ADDR_SHIFT        (0U)
+#define NN_SYS_CR_NOC_LOWER_ADDR1_LOWER_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFFF000000000))
+
+
+/*
+    Register NN_SYS_CR_NOC_UPPER_ADDR1
+*/
+#define NN_SYS_CR_NOC_UPPER_ADDR1                         (0x0278U)
+#define NN_SYS_CR_NOC_UPPER_ADDR1_MASKFULL                (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define NN_SYS_CR_NOC_UPPER_ADDR1_UPPER_ADDR_SHIFT        (0U)
+#define NN_SYS_CR_NOC_UPPER_ADDR1_UPPER_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFFF000000000))
+
+
+/*
+    Register NN_SYS_CR_SYS_BUS_DIRECT_ACCESS
+*/
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS                   (0x0280U)
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_SHIFT (0U)
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define NN_SYS_CR_SYS_BUS_DIRECT_ACCESS_SYS_BUS_DIRECT_ACCESS_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register NN_SYS_CR_NNPU_ACE_QOS_CTRL
+*/
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL                       (0x02A0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_CRITICAL_SHIFT        (12U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_CRITICAL_CLRMSK       (0XFFFF0FFFU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_HIGH_SHIFT            (8U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_HIGH_CLRMSK           (0XFFFFF0FFU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MEDIUM_SHIFT          (4U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_MEDIUM_CLRMSK         (0XFFFFFF0FU)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_LOW_SHIFT             (0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_CTRL_LOW_CLRMSK            (0XFFFFFFF0U)
+
+
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_MASK   (0x00000003U)
+/*
+Low */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_LOW    (0x00000000U)
+/*
+Medium */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_HIGH   (0x00000002U)
+/*
+Critical */
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register NN_SYS_CR_NNPU_ACE_QOS_SEL
+*/
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL                        (0x02A8U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MASKFULL               (IMG_UINT64_C(0x00000000000000F1))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_SHIFT              (6U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_LOW                (IMG_UINT64_C(0000000000000000))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_MEDIUM             (IMG_UINT64_C(0x0000000000000040))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_HIGH               (IMG_UINT64_C(0x0000000000000080))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_MMU_CRITICAL           (IMG_UINT64_C(0x00000000000000c0))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_SHIFT          (4U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_LOW            (IMG_UINT64_C(0000000000000000))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_MEDIUM         (IMG_UINT64_C(0x0000000000000010))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_HIGH           (IMG_UINT64_C(0x0000000000000020))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NON_MMU_CRITICAL       (IMG_UINT64_C(0x0000000000000030))  
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_SHIFT  (0U)
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define NN_SYS_CR_NNPU_ACE_QOS_SEL_NNPU_QOS_ENABLE_EN     (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register NN_SYS_CR_RTM_CTRL
+*/
+#define NN_SYS_CR_RTM_CTRL                                (0x1000U)
+#define NN_SYS_CR_RTM_CTRL_MASKFULL                       (IMG_UINT64_C(0x00000000C0000FF8))
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_SHIFT               (31U)
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_CLRMSK              (0X7FFFFFFFU)
+#define NN_SYS_CR_RTM_CTRL_RTM_ENABLE_EN                  (0X80000000U)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_SHIFT                (30U)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_CLRMSK               (0XBFFFFFFFU)
+#define NN_SYS_CR_RTM_CTRL_RTM_CHECK_EN                   (0X40000000U)
+#define NN_SYS_CR_RTM_CTRL_RTM_SELECTOR_SHIFT             (3U)
+#define NN_SYS_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK            (0XFFFFF007U)
+
+
+/*
+    Register NN_SYS_CR_RTM_DATA
+*/
+#define NN_SYS_CR_RTM_DATA                                (0x1008U)
+#define NN_SYS_CR_RTM_DATA_MASKFULL                       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define NN_SYS_CR_RTM_DATA_RTM_DATA_SHIFT                 (0U)
+#define NN_SYS_CR_RTM_DATA_RTM_DATA_CLRMSK                (00000000U)
+
+
+/*
+    Register NN_SYS_CR_SOCIF_BUS_UNTRUSTED
+*/
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED                     (0x1A000U)
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_MASKFULL            (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_VALUE_SHIFT         (0U)
+#define NN_SYS_CR_SOCIF_BUS_UNTRUSTED_VALUE_CLRMSK        (IMG_UINT64_C(0000000000000000))
+
+
+/*
+    Register NN_SYS_CR_SOCIF_BUS_SECURE
+*/
+#define NN_SYS_CR_SOCIF_BUS_SECURE                        (0x1A100U)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_SHIFT           (0U)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_CLRMSK          (0XFFFFFFFEU)
+#define NN_SYS_CR_SOCIF_BUS_SECURE_ENABLE_EN              (0X00000001U)
+
+
+#endif /* _NN_SYS_CR_VAGUS_H_ */
+
+/*****************************************************************************
+ End of file (nn_sys_cr_vagus.h)
+*****************************************************************************/
+

+ 8 - 0
driver/include/hwdefs/vagus_system.h

@@ -0,0 +1,8 @@
+#define _REG_START 0x00000000
+#define _REG_NNA_START 0x00000000
+#define _REG_NNSYS_START 0x00080000
+#define _EMUREG_START 0x0000400000
+#define _REG_SIZE 0x00080000
+#define _REG_NNA_SIZE 0x00080000
+#define _REG_NNSYS_SIZE 0x00080000
+#define _EMUREG_SIZE 0x0000104000

+ 5471 - 0
driver/include/hwdefs/vha_cr_aura.h

@@ -0,0 +1,5471 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file vha_cr_aura.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _VHA_CR_AURA_H_
+#define _VHA_CR_AURA_H_
+
+#define VHA_CR_AURA_REVISION 1
+
+
+
+
+
+
+
+#define VHA_CR_MH_CONTROL_MAX_BURST_LENGTH_MASK           (0x00000003U)
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_CLK_STATUS0_MODE_GATED                     (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_CLK_STATUS0_MODE_RUNNING                   (0x00000001U)
+
+
+/*
+    Register VHA_CR_CLK_STATUS0
+*/
+#define VHA_CR_CLK_STATUS0                                (0x0008U)
+#define VHA_CR_CLK_STATUS0_MASKFULL                       (IMG_UINT64_C(0x00000037FFDC0104))
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_SHIFT            (37U)
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_CLRMSK           (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_GATED            (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_RUNNING          (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_MMM_SHIFT                  (36U)
+#define VHA_CR_CLK_STATUS0_CNN_MMM_CLRMSK                 (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_MMM_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_MMM_RUNNING                (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_SHIFT                  (34U)
+#define VHA_CR_CLK_STATUS0_CNN_EWO_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_EWO_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_RUNNING                (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_SHIFT                 (33U)
+#define VHA_CR_CLK_STATUS0_CNN_PACK_CLRMSK                (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_PACK_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_RUNNING               (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_SHIFT                  (32U)
+#define VHA_CR_CLK_STATUS0_CNN_OIN_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_OIN_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_RUNNING                (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_SHIFT                 (31U)
+#define VHA_CR_CLK_STATUS0_CNN_POOL_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_POOL_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_RUNNING               (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_SHIFT                   (30U)
+#define VHA_CR_CLK_STATUS0_CNN_SB_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_SB_GATED                   (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_RUNNING                 (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_SHIFT                 (29U)
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_RUNNING               (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_SHIFT                 (28U)
+#define VHA_CR_CLK_STATUS0_CNN_NORM_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_NORM_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_RUNNING               (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_SHIFT                  (27U)
+#define VHA_CR_CLK_STATUS0_CNN_ACT_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACT_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_RUNNING                (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_SHIFT                (26U)
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_RUNNING              (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_SHIFT                  (25U)
+#define VHA_CR_CLK_STATUS0_CNN_CNV_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CNV_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_RUNNING                (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_SHIFT                 (24U)
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_RUNNING               (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_SHIFT                 (23U)
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_RUNNING               (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_SHIFT                  (22U)
+#define VHA_CR_CLK_STATUS0_CNN_CMD_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CMD_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_RUNNING                (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_CLK_STATUS0_CNN_SHIFT                      (20U)
+#define VHA_CR_CLK_STATUS0_CNN_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_RUNNING                    (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_SHIFT                (19U)
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_RUNNING              (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_SHIFT                (18U)
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_RUNNING              (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_CLK_STATUS0_SLC_SHIFT                      (8U)
+#define VHA_CR_CLK_STATUS0_SLC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CLK_STATUS0_SLC_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_SLC_RUNNING                    (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_STATUS0_BIF_SHIFT                      (2U)
+#define VHA_CR_CLK_STATUS0_BIF_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CLK_STATUS0_BIF_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_BIF_RUNNING                    (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_PRODUCT_ID
+*/
+#define VHA_CR_PRODUCT_ID                                 (0x0018U)
+#define VHA_CR_PRODUCT_ID_MASKFULL                        (IMG_UINT64_C(0x00000000FFFF0000))
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT            (16U)
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register VHA_CR_CORE_ID
+*/
+#define VHA_CR_CORE_ID                                    (0x0020U)
+#define VHA_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_BRANCH_ID_SHIFT                    (48U)
+#define VHA_CR_CORE_ID_BRANCH_ID_CLRMSK                   (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_VERSION_ID_SHIFT                   (32U)
+#define VHA_CR_CORE_ID_VERSION_ID_CLRMSK                  (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT     (16U)
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK    (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_CORE_ID_CONFIG_ID_SHIFT                    (0U)
+#define VHA_CR_CORE_ID_CONFIG_ID_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define VHA_CR_CORE_IP_INTEGRATOR_ID                      (0x0028U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT          (0U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CHANGELIST
+*/
+#define VHA_CR_CORE_IP_CHANGELIST                         (0x0030U)
+#define VHA_CR_CORE_IP_CHANGELIST_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_SHIFT             (0U)
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CONFIG
+*/
+#define VHA_CR_CORE_IP_CONFIG                             (0x0038U)
+#define VHA_CR_CORE_IP_CONFIG_MASKFULL                    (IMG_UINT64_C(0x00000000000FFF03))
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_SHIFT (19U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_CLRMSK (0XFFF7FFFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_EN (0X00080000U)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_SHIFT          (18U)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_CLRMSK         (0XFFFBFFFFU)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_EN             (0X00040000U)
+#define VHA_CR_CORE_IP_CONFIG_ECC_RAMS_SHIFT              (16U)
+#define VHA_CR_CORE_IP_CONFIG_ECC_RAMS_CLRMSK             (0XFFFCFFFFU)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_SHIFT      (15U)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_CLRMSK     (0XFFFF7FFFU)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_EN         (0X00008000U)
+#define VHA_CR_CORE_IP_CONFIG_MMU_VERSION_SHIFT           (12U)
+#define VHA_CR_CORE_IP_CONFIG_MMU_VERSION_CLRMSK          (0XFFFF8FFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_SHIFT (11U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_EN (0X00000800U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_SHIFT (10U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_EN (0X00000400U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_EN (0X00000200U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_SHIFT         (8U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_CLRMSK        (0XFFFFFEFFU)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_EN            (0X00000100U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_SHIFT         (1U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_EN            (0X00000002U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_SHIFT         (0U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_TIMER
+*/
+#define VHA_CR_CNN_MEM_WDT_TIMER                          (0x0048U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_SHIFT              (0U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_TIMER
+*/
+#define VHA_CR_CNN_HL_WDT_TIMER                           (0x0050U)
+#define VHA_CR_CNN_HL_WDT_TIMER_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_HL_WDT_TIMER_VALUE_SHIFT               (0U)
+#define VHA_CR_CNN_HL_WDT_TIMER_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register VHA_CR_RTM_CTRL
+*/
+#define VHA_CR_RTM_CTRL                                   (0x0058U)
+#define VHA_CR_RTM_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000CFFFFFF8))
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_SHIFT                  (31U)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_CLRMSK                 (0X7FFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_EN                     (0X80000000U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_SHIFT                   (30U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_CLRMSK                  (0XBFFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_EN                      (0X40000000U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_SHIFT                (3U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK               (0XF0000007U)
+
+
+/*
+    Register VHA_CR_RTM_DATA
+*/
+#define VHA_CR_RTM_DATA                                   (0x0060U)
+#define VHA_CR_RTM_DATA_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_RTM_DATA_RTM_DATA_SHIFT                    (0U)
+#define VHA_CR_RTM_DATA_RTM_DATA_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG0
+*/
+#define VHA_CR_CNN_IP_CONFIG0                             (0x0068U)
+#define VHA_CR_CNN_IP_CONFIG0_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_EN   (IMG_UINT64_C(0X0000000010000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_SHIFT (27U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_EN (IMG_UINT64_C(0X0000000008000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_SHIFT (26U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_EN (IMG_UINT64_C(0X0000000004000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_SHIFT (25U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_EN (IMG_UINT64_C(0X0000000002000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_EN (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_SHIFT  (23U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_EN     (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_SHIFT (22U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_EN    (IMG_UINT64_C(0X0000000000400000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_SHIFT (21U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000200000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_SHIFT      (20U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_EN         (IMG_UINT64_C(0X0000000000100000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_SHIFT (19U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000080000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_SHIFT (18U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000040000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_SHIFT (17U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_EN (IMG_UINT64_C(0X0000000000020000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000010000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_SHIFT (15U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_SHIFT (14U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_SHIFT (13U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_SHIFT (11U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_SHIFT (10U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_SHIFT (7U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_SHIFT (6U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_SHIFT (5U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_SHIFT (3U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_SHIFT (2U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_SHIFT (1U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_EN   (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG1
+*/
+#define VHA_CR_CNN_IP_CONFIG1                             (0x0070U)
+#define VHA_CR_CNN_IP_CONFIG1_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFF3F0FFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ADDRESS_ALIGNMENT_BYTES_LOG2_SHIFT (60U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ADDRESS_ALIGNMENT_BYTES_LOG2_CLRMSK (IMG_UINT64_C(0X0FFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_SHIFT (52U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_CLRMSK (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_SHIFT (39U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFF0007FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_SHIFT (37U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFF9FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_SHIFT (32U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF00F))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG2
+*/
+#define VHA_CR_CNN_IP_CONFIG2                             (0x0078U)
+#define VHA_CR_CNN_IP_CONFIG2_MASKFULL                    (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_CONCAT_MIN1_SHIFT (51U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_CONCAT_MIN1_CLRMSK (IMG_UINT64_C(0XFF07FFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_SPLIT_MIN1_SHIFT (48U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_SPLIT_MIN1_CLRMSK (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_BANKS_MIN1_SHIFT (44U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFF003FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC0FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_SHIFT (20U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0000F))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG3
+*/
+#define VHA_CR_CNN_IP_CONFIG3                             (0x0080U)
+#define VHA_CR_CNN_IP_CONFIG3_MASKFULL                    (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_SHIFT (38U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFC3FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_SHIFT (29U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC1FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFE0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_GATED                 (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_RUNNING               (0x00000001U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_STATUS0
+*/
+#define VHA_CR_SYS_CLK_STATUS0                            (0x0088U)
+#define VHA_CR_SYS_CLK_STATUS0_MASKFULL                   (IMG_UINT64_C(0x0000000000000004))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_SHIFT                  (2U)
+#define VHA_CR_SYS_CLK_STATUS0_SLC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_SLC_RUNNING                (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ
+*/
+#define VHA_CR_PERF_SLC0_READ                             (0x0200U)
+#define VHA_CR_PERF_SLC0_READ_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_COUNT_SHIFT                 (0U)
+#define VHA_CR_PERF_SLC0_READ_COUNT_CLRMSK                (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE
+*/
+#define VHA_CR_PERF_SLC0_WRITE                            (0x0208U)
+#define VHA_CR_PERF_SLC0_WRITE_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_SHIFT                (0U)
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_CLRMSK               (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_DATA_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL                 (0x0210U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_SHIFT     (0U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_STALL                       (0x0218U)
+#define VHA_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_STALL                      (0x0220U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_ID_STALL                    (0x0228U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_SHIFT        (0U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL                   (0x0230U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1                   (0x0238U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1                   (0x0240U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2                   (0x0248U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2                   (0x0250U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3                   (0x0258U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3                   (0x0260U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4                   (0x0268U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4                   (0x0270U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_RESET_FULL
+*/
+#define VHA_CR_PERF_RESET_FULL                            (0x0278U)
+#define VHA_CR_PERF_RESET_FULL_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_RESET_FULL_RANGE_SHIFT                (0U)
+#define VHA_CR_PERF_RESET_FULL_RANGE_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_PERF_RESET_FULL_RANGE_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_ENABLE_FULL
+*/
+#define VHA_CR_PERF_ENABLE_FULL                           (0x0280U)
+#define VHA_CR_PERF_ENABLE_FULL_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_SHIFT               (0U)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_EN                  (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_STATUS
+*/
+#define VHA_CR_MMU_STATUS                                 (0x0288U)
+#define VHA_CR_MMU_STATUS_MASKFULL                        (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_SHIFT               (40U)
+#define VHA_CR_MMU_STATUS_MMU_STALLED_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define VHA_CR_MMU_STATUS_PM_WRITES_SHIFT                 (38U)
+#define VHA_CR_MMU_STATUS_PM_WRITES_CLRMSK                (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PM_READS_SHIFT                  (36U)
+#define VHA_CR_MMU_STATUS_PM_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PC_READS_SHIFT                  (24U)
+#define VHA_CR_MMU_STATUS_PC_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_MMU_STATUS_PD_READS_SHIFT                  (12U)
+#define VHA_CR_MMU_STATUS_PD_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_MMU_STATUS_PT_READS_SHIFT                  (0U)
+#define VHA_CR_MMU_STATUS_PT_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS1
+*/
+#define VHA_CR_SLC_STATUS1                                (0x0290U)
+#define VHA_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT        (48U)
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK       (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS2
+*/
+#define VHA_CR_SLC_STATUS2                                (0x0298U)
+#define VHA_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_IDLE
+*/
+#define VHA_CR_SLC_IDLE                                   (0x02A0U)
+#define VHA_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT              (12U)
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK             (0XFFFF0FFFU)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (4U)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFF00FU)
+#define VHA_CR_SLC_IDLE_MMU_SHIFT                         (3U)
+#define VHA_CR_SLC_IDLE_MMU_CLRMSK                        (0XFFFFFFF7U)
+#define VHA_CR_SLC_IDLE_MMU_EN                            (0X00000008U)
+#define VHA_CR_SLC_IDLE_CCM_SHIFT                         (2U)
+#define VHA_CR_SLC_IDLE_CCM_CLRMSK                        (0XFFFFFFFBU)
+#define VHA_CR_SLC_IDLE_CCM_EN                            (0X00000004U)
+#define VHA_CR_SLC_IDLE_RDI_SHIFT                         (1U)
+#define VHA_CR_SLC_IDLE_RDI_CLRMSK                        (0XFFFFFFFDU)
+#define VHA_CR_SLC_IDLE_RDI_EN                            (0X00000002U)
+#define VHA_CR_SLC_IDLE_XBAR_SHIFT                        (0U)
+#define VHA_CR_SLC_IDLE_XBAR_CLRMSK                       (0XFFFFFFFEU)
+#define VHA_CR_SLC_IDLE_XBAR_EN                           (0X00000001U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS3
+*/
+#define VHA_CR_SLC_STATUS3                                (0x02A8U)
+#define VHA_CR_SLC_STATUS3_MASKFULL                       (IMG_UINT64_C(0x0FFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_SHIFT (50U)
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XF003FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_SHIFT (40U)
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFC00FFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_SHIFT (30U)
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_SHIFT (20U)
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC00FFFFF))
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_SHIFT (10U)
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF003FF))
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_SHIFT (0U)
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFC00))
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_STATUS
+*/
+#define VHA_CR_SLC_FAULT_STOP_STATUS                      (0x02B0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT            (0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK           (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS_DEBUG
+*/
+#define VHA_CR_SLC_STATUS_DEBUG                           (0x02B8U)
+#define VHA_CR_SLC_STATUS_DEBUG_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT         (16U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK        (0X0000FFFFU)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT      (0U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_OUTSTANDING_READ
+*/
+#define VHA_CR_BIF_OUTSTANDING_READ                       (0x02C0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT         (0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_PAGE_FAULT_STALL
+*/
+#define VHA_CR_BIF_PAGE_FAULT_STALL                       (0x02C8U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_SHIFT          (0U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_CLRMSK         (0XFFFFFFFEU)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_EN             (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC
+*/
+#define VHA_CR_PERF_SLC                                   (0x02D0U)
+#define VHA_CR_PERF_SLC_MASKFULL                          (IMG_UINT64_C(0x000000000FEFFEFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_SHIFT (27U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XF7FFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_EN  (0X08000000U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_SHIFT    (26U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_CLRMSK   (0XFBFFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_EN       (0X04000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_SHIFT (25U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_EN  (0X02000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_SHIFT    (24U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_CLRMSK   (0XFEFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_EN       (0X01000000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_SHIFT (23U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_EN  (0X00800000U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (22U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00400000U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (21U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFDFFFFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00200000U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_SHIFT (19U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFF7FFFFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_EN  (0X00080000U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_SHIFT (18U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFBFFFFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_EN  (0X00040000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_SHIFT (17U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFDFFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_EN (0X00020000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_SHIFT (16U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_EN (0X00010000U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_SHIFT (15U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFF7FFFU)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_EN (0X00008000U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_SHIFT (14U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_EN (0X00004000U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_SHIFT (13U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFDFFFU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_EN (0X00002000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_SHIFT (12U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFFEFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_EN  (0X00001000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_SHIFT    (11U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_CLRMSK   (0XFFFFF7FFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_EN       (0X00000800U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_SHIFT   (10U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFBFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_EN      (0X00000400U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_SHIFT   (9U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFDFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_EN      (0X00000200U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_SHIFT    (7U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_CLRMSK   (0XFFFFFF7FU)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_EN       (0X00000080U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_SHIFT    (6U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_CLRMSK   (0XFFFFFFBFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_EN       (0X00000040U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_SHIFT  (5U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_CLRMSK (0XFFFFFFDFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_EN     (0X00000020U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_SHIFT (4U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_EN    (0X00000010U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_SHIFT (3U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_EN   (0X00000008U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_SHIFT (2U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_EN   (0X00000004U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_SHIFT (1U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_EN   (0X00000002U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_REQ_COUNT
+*/
+#define VHA_CR_PERF_SLC_REQ_COUNT                         (0x02D8U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_MASKFULL                (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD                        (0x02E0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR                    (0x02E8U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR                    (0x02F0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR                    (0x02F8U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE                     (0x0300U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_SHIFT       (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD                      (0x0308U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD                        (0x0310U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR                        (0x0318U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD                       (0x0328U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD                       (0x0330U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR                        (0x0338U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD                   (0x0340U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD               (0x0348U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD               (0x0350U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD               (0x0358U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD                (0x0360U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_SHIFT  (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD                 (0x0368U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD                   (0x0370U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD                   (0x0378U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD                  (0x0388U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD                  (0x0390U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD                   (0x0398U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD                        (0x03A0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD                   (0x03A8U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD                        (0x03B0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD                   (0x03B8U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_BIF_RTN_FIFO_WORD_COUNT
+*/
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT                    (0x03C0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_MASKFULL           (IMG_UINT64_C(0x00000000000001FF))
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_SHIFT      (0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_CLRMSK     (0XFFFFFE00U)
+
+
+#define VHA_CR_CLK_CTRL0_MODE_MASK                        (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_CLK_CTRL0_MODE_OFF                         (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_CLK_CTRL0_MODE_ON                          (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_CLK_CTRL0_MODE_AUTO                        (0x00000002U)
+
+
+/*
+    Register VHA_CR_CLK_CTRL0
+*/
+#define VHA_CR_CLK_CTRL0                                  (0x2000U)
+#define VHA_CR_CLK_CTRL0_MASKFULL                         (IMG_UINT64_C(0xF3FFFFFF3F000330))
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_SHIFT              (62U)
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_CLRMSK             (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_OFF                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_ON                 (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_AUTO               (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_SHIFT                    (60U)
+#define VHA_CR_CLK_CTRL0_CNN_MMM_CLRMSK                   (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_MMM_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_ON                       (IMG_UINT64_C(0x1000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_AUTO                     (IMG_UINT64_C(0x2000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_SHIFT                    (56U)
+#define VHA_CR_CLK_CTRL0_CNN_EWO_CLRMSK                   (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_EWO_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_ON                       (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_AUTO                     (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_SHIFT                   (54U)
+#define VHA_CR_CLK_CTRL0_CNN_PACK_CLRMSK                  (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_PACK_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_ON                      (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_AUTO                    (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_SHIFT                    (52U)
+#define VHA_CR_CLK_CTRL0_CNN_OIN_CLRMSK                   (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OIN_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_ON                       (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_AUTO                     (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_SHIFT                   (50U)
+#define VHA_CR_CLK_CTRL0_CNN_POOL_CLRMSK                  (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_POOL_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_ON                      (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_AUTO                    (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_SHIFT                     (48U)
+#define VHA_CR_CLK_CTRL0_CNN_SB_CLRMSK                    (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_SB_OFF                       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_ON                        (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_AUTO                      (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_SHIFT                   (46U)
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_CLRMSK                  (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_ON                      (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_AUTO                    (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_SHIFT                   (44U)
+#define VHA_CR_CLK_CTRL0_CNN_NORM_CLRMSK                  (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_NORM_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_ON                      (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_AUTO                    (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_SHIFT                    (42U)
+#define VHA_CR_CLK_CTRL0_CNN_ACT_CLRMSK                   (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACT_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_ON                       (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_AUTO                     (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_SHIFT                  (40U)
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_CLRMSK                 (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_ON                     (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_AUTO                   (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_SHIFT                    (38U)
+#define VHA_CR_CLK_CTRL0_CNN_CNV_CLRMSK                   (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CNV_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_ON                       (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_AUTO                     (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_SHIFT                   (36U)
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_ON                      (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_AUTO                    (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_SHIFT                   (34U)
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_ON                      (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_AUTO                    (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_SHIFT                    (32U)
+#define VHA_CR_CLK_CTRL0_CNN_CMD_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CMD_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_ON                       (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_AUTO                     (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SHIFT                        (28U)
+#define VHA_CR_CLK_CTRL0_CNN_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ON                           (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_CTRL0_CNN_AUTO                         (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_SHIFT                  (26U)
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_ON                     (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_AUTO                   (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_SHIFT                  (24U)
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_ON                     (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_AUTO                   (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_CTRL0_SLC_SHIFT                        (8U)
+#define VHA_CR_CLK_CTRL0_SLC_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_CLK_CTRL0_SLC_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_SLC_ON                           (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_CTRL0_SLC_AUTO                         (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_CLK_CTRL0_BIF_SHIFT                        (4U)
+#define VHA_CR_CLK_CTRL0_BIF_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_CLK_CTRL0_BIF_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_BIF_ON                           (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_CLK_CTRL0_BIF_AUTO                         (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+    Register VHA_CR_VHA_AXI_RESET_CTRL
+*/
+#define VHA_CR_VHA_AXI_RESET_CTRL                         (0x2008U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_SHIFT (0U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_RESET_CTRL
+*/
+#define VHA_CR_RESET_CTRL                                 (0x2010U)
+#define VHA_CR_RESET_CTRL_MASKFULL                        (IMG_UINT64_C(0x00000000C0000107))
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_SHIFT        (31U)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_CLRMSK       (0X7FFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_EN           (0X80000000U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_SHIFT        (30U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_CLRMSK       (0XBFFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_EN           (0X40000000U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_SHIFT       (8U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_CLRMSK      (0XFFFFFEFFU)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_EN          (0X00000100U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_SHIFT        (2U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_CLRMSK       (0XFFFFFFFBU)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_EN           (0X00000004U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_SHIFT        (1U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_EN           (0X00000002U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_SHIFT            (0U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_MH_CONTROL
+*/
+#define VHA_CR_CNN_CMD_MH_CONTROL                         (0x2018U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x0000000000000034))
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (4U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFCFU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_SHIFT  (2U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_EN     (0X00000004U)
+
+
+/*
+    Register VHA_CR_CNN_IBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_IBUF_MH_CONTROL                        (0x2020U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_CBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_CBUF_MH_CONTROL                        (0x2028U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_ABUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_ABUF_MH_CONTROL                        (0x2030U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_OUTPACK_MH_CONTROL
+*/
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL                     (0x2038U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MASKFULL            (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_ELEMENTOPS_MH_CONTROL
+*/
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL                  (0x2040U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MASKFULL         (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_MMM_MH_CONTROL
+*/
+#define VHA_CR_CNN_MMM_MH_CONTROL                         (0x2048U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (3U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_TRS_A_MH_CONTROL
+*/
+#define VHA_CR_CNN_TRS_A_MH_CONTROL                       (0x2050U)
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MASKFULL              (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_TRS_B_MH_CONTROL
+*/
+#define VHA_CR_CNN_TRS_B_MH_CONTROL                       (0x2058U)
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MASKFULL              (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_DWPE_MH_CONTROL
+*/
+#define VHA_CR_CNN_DWPE_MH_CONTROL                        (0x2060U)
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_FUSA_CONTROL
+*/
+#define VHA_CR_FUSA_CONTROL                               (0x2090U)
+#define VHA_CR_FUSA_CONTROL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_SHIFT           (0U)
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_PM_VFP_TRAN_EN
+*/
+#define VHA_CR_PM_VFP_TRAN_EN                             (0x2100U)
+#define VHA_CR_PM_VFP_TRAN_EN_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PM_VFP_TRAN_EN_OP_SHIFT                    (0U)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_CLRMSK                   (0XFFFFFFFEU)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_EN                       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH                   (0x2118U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_SHIFT         (0U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_CLRMSK        (00000000U)
+
+
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_MASK     (0x00000003U)
+/*
+WDT is Disabled */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_NONE     (0x00000000U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK     (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_CTRL
+*/
+#define VHA_CR_CNN_MEM_WDT_CTRL                           (0x2120U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MASKFULL                  (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_SHIFT                (0U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_NONE                 (00000000U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK_PASS            (0X00000001U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK                 (0X00000002U)
+
+
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_MASK       (0x00000003U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_KICK_PASS  (0x00000001U)
+/*
+WDT is Cleared when CMD Parser starts a layer group or CMD parser is kicked*/
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_KICK_LAYER (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_CTRL
+*/
+#define VHA_CR_CNN_HL_WDT_CTRL                            (0x2128U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_SHIFT                 (0U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_CLRMSK                (0XFFFFFFFCU)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_KICK_PASS             (0X00000001U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_KICK_LAYER            (0X00000002U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH                    (0x2130U)
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_REG_SHIFT          (0U)
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_REG_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define VHA_CR_IDLE_HYSTERESIS_COUNT                      (0x2140U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_MASKFULL             (IMG_UINT64_C(0x0000001F00001F1F))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_SHIFT        (32U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_CLRMSK       (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_SHIFT        (8U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFE0FF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_SHIFT       (0U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register VHA_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define VHA_CR_SOCIF_WAKEUP_ENABLE                        (0x2148U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT           (0U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_RESET_CLK_CTRL
+*/
+#define VHA_CR_RESET_CLK_CTRL                             (0x2150U)
+#define VHA_CR_RESET_CLK_CTRL_MASKFULL                    (IMG_UINT64_C(0x00000000000003FF))
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_SHIFT               (8U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_CLRMSK              (0XFFFFFCFFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_SHIFT                (6U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_CLRMSK               (0XFFFFFF3FU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_SHIFT                (4U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_CLRMSK               (0XFFFFFFCFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_SHIFT               (2U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_CLRMSK              (0XFFFFFFF3U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_SHIFT              (0U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_CLRMSK             (0XFFFFFFFCU)
+
+
+#define VHA_CR_SYS_CLK_CTRL0_MODE_MASK                    (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_OFF                     (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_ON                      (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_AUTO                    (0x00000002U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_CTRL0
+*/
+#define VHA_CR_SYS_CLK_CTRL0                              (0x2158U)
+#define VHA_CR_SYS_CLK_CTRL0_MASKFULL                     (IMG_UINT64_C(0x0000000000000030))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_SHIFT                    (4U)
+#define VHA_CR_SYS_CLK_CTRL0_SLC_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_ON                       (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_AUTO                     (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+    Register VHA_CR_AXI_EXACCESS
+*/
+#define VHA_CR_AXI_EXACCESS                               (0x2168U)
+#define VHA_CR_AXI_EXACCESS_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT            (0U)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_REGBANK_REQUEST_INVALID
+*/
+#define VHA_CR_REGBANK_REQUEST_INVALID                    (0x2170U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT         (0U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY
+*/
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY             (0x2180U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_MASKFULL    (IMG_UINT64_C(0x000000000000019B))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS2_LIMIT_SHIFT (7U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS2_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE7F))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS1_LIMIT_SHIFT (3U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS1_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS0_LIMIT_SHIFT (0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS0_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL
+*/
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL               (0x2188U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_MASKFULL      (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_SHIFT (2U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_SHIFT (1U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_SHIFT (0U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_ARB_STALL_RATIO
+*/
+#define VHA_CR_CNN_ARB_STALL_RATIO                        (0x2200U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_MASKFULL               (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_SHIFT           (32U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_CLRMSK          (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_SHIFT      (28U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_SHIFT      (24U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_SHIFT      (20U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_SHIFT      (16U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_SHIFT      (12U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_SHIFT      (8U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_SHIFT      (4U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_SHIFT      (0U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_0
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0                 (0x2208U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_ACT_SHIFT (60U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_ACT_CLRMSK (IMG_UINT64_C(0X0FFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ACT_CORE_XBAR_SHIFT (56U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ACT_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XF0FFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_EWO_CORE_XBAR_SHIFT (52U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_EWO_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFF0FFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_TENSORB_CORE_XBAR_SHIFT (48U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_TENSORB_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFF0FFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_EWO_SHIFT (44U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_EWO_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_TENSORB_SHIFT (40U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_TENSORB_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_NORM_SHIFT (36U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_NORM_CLRMSK (IMG_UINT64_C(0XFFFFFF0FFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_NORM_POOL_SHIFT (32U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_NORM_POOL_CLRMSK (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CNV_ABUF_SHIFT  (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CNV_ABUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CNV_SHIFT  (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CNV_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_CORE_XBAR_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_IBUF_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_IBUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CORE_XBAR_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_CNV_SHIFT  (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_CNV_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_OUTPACK_SHIFT (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_OUTPACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_ABUF_SHIFT (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_ABUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_1
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1                 (0x2210U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_MASKFULL        (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_NORM_POOL_BYPASS_SHIFT (36U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_NORM_POOL_BYPASS_CLRMSK (IMG_UINT64_C(0XFFFFFF0FFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_B_XBAR_SHIFT (32U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_B_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_B_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_B_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_A_XBAR_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_A_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_A_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_A_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_OPK_SHIFT   (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_OPK_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_CORE_XBAR_OIN_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_CORE_XBAR_OIN_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_CORE_XBAR_SHIFT (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_SB_SHIFT   (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_SB_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_SB_SHIFT    (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_SB_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_ARB_CTRL
+*/
+#define VHA_CR_CNN_ARB_CTRL                               (0x2218U)
+#define VHA_CR_CNN_ARB_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000307))
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_SHIFT    (9U)
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_CLRMSK   (0XFFFFFDFFU)
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_EN       (0X00000200U)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_SHIFT        (8U)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_CLRMSK       (0XFFFFFEFFU)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_EN           (0X00000100U)
+#define VHA_CR_CNN_ARB_CTRL_MAX_PAGE_COUNT_MIN1_SHIFT     (0U)
+#define VHA_CR_CNN_ARB_CTRL_MAX_PAGE_COUNT_MIN1_CLRMSK    (0XFFFFFFF8U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0                 (0xE008U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1                 (0xE010U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2                 (0xE018U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3                 (0xE020U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0                  (0xE028U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1                  (0xE030U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2                  (0xE038U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3                  (0xE040U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0                     (0xE048U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0                 (0xE050U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1                 (0xE058U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2                 (0xE060U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3                 (0xE068U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0                  (0xE070U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1                  (0xE078U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2                  (0xE080U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3                  (0xE088U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1                     (0xE090U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST            (0xE098U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST            (0xE0A0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST            (0xE0A8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST            (0xE0B0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST             (0xE0B8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST             (0xE0C0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST             (0xE0C8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST             (0xE0D0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST                (0xE0D8U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST            (0xE0E0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST            (0xE0E8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST            (0xE0F0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST            (0xE0F8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST             (0xE100U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST             (0xE108U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST             (0xE110U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST             (0xE118U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST                (0xE120U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS_META                      (0xE160U)
+#define VHA_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (62U)
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (56U)
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (48U)
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (4U)
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS2_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS2_META                     (0xE198U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_MASKFULL            (IMG_UINT64_C(0x0000000000003FFF))
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT     (13U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK    (0XFFFFDFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN        (0X00002000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT   (12U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK  (0XFFFFEFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN      (0X00001000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT          (8U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT     (0U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_PM
+*/
+#define VHA_CR_MMU_FAULT_STATUS_PM                        (0xE130U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_MASKFULL               (IMG_UINT64_C(0x0000000007FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_SHIFT               (24U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT              (23U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_EN                 (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT          (3U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFF800007))
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT            (1U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT            (0U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_CTRL
+*/
+#define VHA_CR_MMU_ABORT_PM_CTRL                          (0xE188U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_SHIFT             (0U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_CLRMSK            (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_EN                (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_STATUS
+*/
+#define VHA_CR_MMU_ABORT_PM_STATUS                        (0xE190U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_SHIFT            (0U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_HOST_IRQ_ENABLE
+*/
+#define VHA_CR_MMU_HOST_IRQ_ENABLE                        (0xE1A0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_SHIFT         (0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_ONE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE                    (0xE350U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_TWO
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO                    (0xE358U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_THREE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE                  (0xE360U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL         (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT  (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT   (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT  (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR                   (0xE368U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL          (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT   (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK  (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT    (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT   (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_MASK               (0x00000003U)
+/*
+Reserved value */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_RESERVED           (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING     (0x00000001U)
+/*
+Addresses are interleaved between Cache Banks on a Cacheline boundary */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_LINEAR             (0x00000002U)
+/*
+Addresses interleaved between Cache Banks using an XOR hash of the address bits below the 4KB page granularity */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_IN_PAGE_HASH       (0x00000003U)
+
+
+/*
+    Register VHA_CR_SLC_CTRL
+*/
+#define VHA_CR_SLC_CTRL                                   (0xE200U)
+#define VHA_CR_SLC_CTRL_MASKFULL                          (IMG_UINT64_C(0x000000000001FFF3))
+#define VHA_CR_SLC_CTRL_ISCHED_CREDIT_THRESHOLD_SHIFT     (13U)
+#define VHA_CR_SLC_CTRL_ISCHED_CREDIT_THRESHOLD_CLRMSK    (0XFFFE1FFFU)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_SHIFT          (12U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_CLRMSK         (0XFFFFEFFFU)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_EN             (0X00001000U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_SHIFT          (8U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_SHIFT    (7U)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_CLRMSK   (0XFFFFFF7FU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_EN       (0X00000080U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_SHIFT                  (4U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_CLRMSK                 (0XFFFFFF8FU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_SHIFT                   (0U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_CLRMSK                  (0XFFFFFFFCU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_RESERVED                (00000000U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_PVR_V3_HASHING          (0X00000001U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_LINEAR                  (0X00000002U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_IN_PAGE_HASH            (0X00000003U)
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_CTRL
+*/
+#define VHA_CR_SLC_FAULT_STOP_CTRL                        (0xE248U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_MASKFULL               (IMG_UINT64_C(0x000000000003FFFF))
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_SHIFT              (17U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_CLRMSK             (0XFFFDFFFFU)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_EN                 (0X00020000U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_SHIFT           (0U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING0
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0                     (0xE280U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_SHIFT        (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_CLRMSK       (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_SHIFT        (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_CLRMSK       (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_SHIFT        (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_SHIFT        (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_SHIFT        (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_SHIFT        (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_SHIFT        (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_SHIFT        (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_SHIFT        (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_SHIFT        (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING1
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1                     (0xE288U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_SHIFT       (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_CLRMSK      (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_SHIFT       (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_CLRMSK      (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_SHIFT       (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_SHIFT       (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_SHIFT       (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_SHIFT       (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_SHIFT       (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_SHIFT       (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_SHIFT       (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_SHIFT       (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_ACE_QOS_CTRL
+*/
+#define VHA_CR_ACE_QOS_CTRL                               (0xE310U)
+#define VHA_CR_ACE_QOS_CTRL_MASKFULL                      (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_SHIFT                (12U)
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_CLRMSK               (0XFFFF0FFFU)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_SHIFT                    (8U)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_CLRMSK                   (0XFFFFF0FFU)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_SHIFT                  (4U)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_CLRMSK                 (0XFFFFFF0FU)
+#define VHA_CR_ACE_QOS_CTRL_LOW_SHIFT                     (0U)
+#define VHA_CR_ACE_QOS_CTRL_LOW_CLRMSK                    (0XFFFFFFF0U)
+
+
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MASK (0x00000003U)
+/*
+Low */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_LOW (0x00000000U)
+/*
+Medium */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_HIGH (0x00000002U)
+/*
+Critical */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register VHA_CR_ACE_PRIORITY_MAPPING_CTRL
+*/
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL                  (0xE318U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MASKFULL         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_SHIFT        (62U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_MEDIUM       (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_HIGH         (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CRITICAL     (IMG_UINT64_C(0xc000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_SHIFT   (32U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_CLRMSK  (IMG_UINT64_C(0XC0000000FFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_SHIFT      (30U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_MEDIUM     (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_HIGH       (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_CRITICAL   (IMG_UINT64_C(0x00000000c0000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_SHIFT      (28U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_MEDIUM     (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_HIGH       (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_CRITICAL   (IMG_UINT64_C(0x0000000030000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_SHIFT       (26U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_MEDIUM      (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_HIGH        (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_CRITICAL    (IMG_UINT64_C(0x000000000c000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_SHIFT     (24U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_MEDIUM    (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_HIGH      (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CRITICAL  (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_SHIFT     (22U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_MEDIUM    (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_HIGH      (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CRITICAL  (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_SHIFT    (20U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_MEDIUM   (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_HIGH     (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CRITICAL (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_SHIFT        (18U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_MEDIUM       (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_HIGH         (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CRITICAL     (IMG_UINT64_C(0x00000000000c0000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_SHIFT    (16U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_MEDIUM   (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_HIGH     (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CRITICAL (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_SHIFT       (14U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_MEDIUM      (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_HIGH        (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CRITICAL    (IMG_UINT64_C(0x000000000000c000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_SHIFT       (12U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_MEDIUM      (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_HIGH        (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CRITICAL    (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_SHIFT     (10U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_MEDIUM    (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_HIGH      (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CRITICAL  (IMG_UINT64_C(0x0000000000000c00))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_SHIFT      (8U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_MEDIUM     (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_HIGH       (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CRITICAL   (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_SHIFT      (6U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_MEDIUM     (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_HIGH       (IMG_UINT64_C(0x0000000000000080))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CRITICAL   (IMG_UINT64_C(0x00000000000000c0))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_SHIFT      (4U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_MEDIUM     (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_HIGH       (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CRITICAL   (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_SHIFT      (2U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_MEDIUM     (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_HIGH       (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CRITICAL   (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_SHIFT      (0U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_MEDIUM     (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_HIGH       (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CRITICAL   (IMG_UINT64_C(0x0000000000000003))  
+
+
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x00000006U)
+/*
+Write-through Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_WRITE_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x00000007U)
+/*
+Write-back Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_WRITE_ALLOCATE (0x0000000fU)
+
+
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x0000000aU)
+/*
+Write-through Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_READ_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x0000000bU)
+/*
+Write-back Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_READ_ALLOCATE (0x0000000fU)
+
+
+/*
+Non-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_NON_SHAREABLE    (0x00000000U)
+/*
+System */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_SYSTEM           (0x00000001U)
+
+
+/*
+Inner-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_INNER_SHAREABLE   (0x00000000U)
+/*
+Outer-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_OUTER_SHAREABLE   (0x00000001U)
+
+
+/*
+    Register VHA_CR_ACE_CTRL
+*/
+#define VHA_CR_ACE_CTRL                                   (0xE320U)
+#define VHA_CR_ACE_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000007FCFFF))
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_SHIFT                   (19U)
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_CLRMSK                  (0XFF87FFFFU)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT              (15U)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK             (0XFFF87FFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN    (0X00004000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_SHIFT                 (8U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK                (0XFFFFF0FFU)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE     (0X00000100U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000200U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE  (0X00000300U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0X00000600U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0X00000E00U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0X00000700U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0X00000F00U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_SHIFT                 (4U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK                (0XFFFFFF0FU)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE     (0X00000010U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000020U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE  (0X00000030U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0X000000A0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0X000000E0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0X000000B0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0X000000F0U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_SHIFT                  (2U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK                 (0XFFFFFFF3U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_SHIFT                  (1U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_CLRMSK                 (0XFFFFFFFDU)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE        (00000000U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE        (0X00000002U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT              (0U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE      (00000000U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM             (0X00000001U)
+
+
+/*
+    Register VHA_CR_ACE_STATUS
+*/
+#define VHA_CR_ACE_STATUS                                 (0xE330U)
+#define VHA_CR_ACE_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_SHIFT             (28U)
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_CLRMSK            (0X0FFFFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_SHIFT             (24U)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_CLRMSK            (0XF0FFFFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_SHIFT             (20U)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_CLRMSK            (0XFF0FFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_SHIFT             (16U)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_CLRMSK            (0XFFF0FFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_SHIFT             (12U)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_CLRMSK            (0XFFFF0FFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_SHIFT             (8U)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_CLRMSK            (0XFFFFF0FFU)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_SHIFT             (4U)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_CLRMSK            (0XFFFFFF0FU)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_SHIFT             (0U)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_CLRMSK            (0XFFFFFFF0U)
+
+
+#define VHA_CR_SOC_AXI_ENUM_COH_MASK                      (0x00000003U)
+/*
+The SoC does not support any form of Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_NO_COHERENCY              (0x00000000U)
+/*
+The SoC supports ACE-Lite or I/O Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_ACE_LITE_COHERENCY        (0x00000001U)
+/*
+The SoC supports full ACE or 2-Way Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_FULL_ACE_COHERENCY        (0x00000002U)
+
+
+/*
+    Register VHA_CR_SOC_AXI
+*/
+#define VHA_CR_SOC_AXI                                    (0xE338U)
+#define VHA_CR_SOC_AXI_MASKFULL                           (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000008U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000004U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT            (0U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK           (0XFFFFFFFCU)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY     (00000000U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0X00000001U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0X00000002U)
+
+
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_MASK            (0x00000003U)
+/*
+Addresses interleaved between Cache Banks using a weaved XOR hash of address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_WEAVED_HASH     (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING  (0x00000001U)
+
+
+/*
+    Register VHA_CR_L1_GLB_CTRL
+*/
+#define VHA_CR_L1_GLB_CTRL                                (0xE400U)
+#define VHA_CR_L1_GLB_CTRL_MASKFULL                       (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_SHIFT                (0U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_WEAVED_HASH          (00000000U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_PVR_V3_HASHING       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING2
+*/
+#define VHA_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define VHA_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING3
+*/
+#define VHA_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define VHA_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_FIX
+*/
+#define VHA_CR_SLC_FIX                                    (0xF0D8U)
+#define VHA_CR_SLC_FIX_MASKFULL                           (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_FIX_DISABLE_SHIFT                      (0U)
+#define VHA_CR_SLC_FIX_DISABLE_CLRMSK                     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_PWR_MAN_HYSTERESIS
+*/
+#define VHA_CR_PWR_MAN_HYSTERESIS                         (0xF100U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_SHIFT             (0U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_CLRMSK            (0XFFFFFFE0U)
+
+
+#define VHA_CR_CNN_MASK_CTRL_MASK                         (0x00000003U)
+/*
+No Masks Applied */
+#define VHA_CR_CNN_MASK_CTRL_NO_MASK                      (0x00000000U)
+/*
+Mask port with mask_level < 1 */
+#define VHA_CR_CNN_MASK_CTRL_MASK_L1                      (0x00000001U)
+/*
+Mask port with mask_level < 2 */
+#define VHA_CR_CNN_MASK_CTRL_MASK_L2                      (0x00000002U)
+/*
+Mask port with mask_level < 3 */
+#define VHA_CR_CNN_MASK_CTRL_MASK_L3                      (0x00000003U)
+
+
+#define VHA_CR_CNN_DEBUG_CTRL_MASK                        (0x00000003U)
+/*
+Debug is switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_DISABLE                     (0x00000000U)
+/*
+Debug is output at the end of each stream */
+#define VHA_CR_CNN_DEBUG_CTRL_STREAM                      (0x00000001U)
+/*
+Debug is output at the end of each layer */
+#define VHA_CR_CNN_DEBUG_CTRL_LAYER                       (0x00000002U)
+/*
+Debug is output at the end of each pass */
+#define VHA_CR_CNN_DEBUG_CTRL_PASS                        (0x00000003U)
+
+
+#define VHA_CR_CNN_PRELOAD_CTRL_MASK                      (0x00000007U)
+/*
+Preloads are switched off */
+#define VHA_CR_CNN_PRELOAD_CTRL_DISABLE                   (0x00000000U)
+/*
+Preloads are triggered 64 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_64                      (0x00000001U)
+/*
+Preloads are triggered 128 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_128                     (0x00000002U)
+/*
+Preloads are triggered 192 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_192                     (0x00000003U)
+/*
+Preloads are triggered 256 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_256                     (0x00000004U)
+/*
+Preloads are triggered 320 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_320                     (0x00000005U)
+/*
+Preloads are triggered 384 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_384                     (0x00000006U)
+/*
+Preloads are triggered 448 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_448                     (0x00000007U)
+
+
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PARITY_ERROR_SHIFT  (29U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PARITY_ERROR_EN     (0X20000000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_PARITY_ERROR_SHIFT      (28U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_PARITY_ERROR_CLRMSK     (0XEFFFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_PARITY_ERROR_EN         (0X10000000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_INIT_DONE_SHIFT     (25U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_INIT_DONE_CLRMSK    (0XFDFFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_INIT_DONE_EN        (0X02000000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_DETECTION_SHIFT     (24U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_DETECTION_CLRMSK    (0XFEFFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_DETECTION_EN        (0X01000000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_CORRECTION_SHIFT    (23U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_CORRECTION_CLRMSK   (0XFF7FFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ECC_CORRECTION_EN       (0X00800000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_LOCKSTEP_ERROR_SHIFT    (22U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_LOCKSTEP_ERROR_CLRMSK   (0XFFBFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_LOCKSTEP_ERROR_EN       (0X00400000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_SHIFT             (21U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_CLRMSK            (0XFFDFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_EN                (0X00200000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_SHIFT             (20U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_CLRMSK            (0XFFEFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_EN                (0X00100000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_SHIFT            (19U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_CLRMSK           (0XFFF7FFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_EN               (0X00080000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_SHIFT         (18U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_CLRMSK        (0XFFFBFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_EN            (0X00040000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_SHIFT    (16U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_CLRMSK   (0XFFFEFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_EN       (0X00010000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_SHIFT      (3U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_CLRMSK     (0XFFFFFFF7U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_EN         (0X00000008U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_SHIFT        (1U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_EN           (0X00000002U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_SHIFT     (0U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_CLRMSK    (0XFFFFFFFEU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_EN        (0X00000001U)
+
+
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_PARITY_SHIFT         (31U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_PARITY_CLRMSK        (0X7FFFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_PARITY_EN            (0X80000000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_PARITY_ERROR_EN  (0X10000000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_INIT_DONE_EN (0X02000000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_DETECTION_EN (0X01000000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_READY_SHIFT      (21U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_READY_CLRMSK     (0XFFDFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_READY_EN         (0X00200000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ERROR_SHIFT      (20U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ERROR_CLRMSK     (0XFFEFFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_ERROR_EN         (0X00100000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_HL_WDT_SHIFT     (19U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_HL_WDT_CLRMSK    (0XFFF7FFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_HL_WDT_EN        (0X00080000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_AXI_ERROR_SHIFT  (18U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_AXI_ERROR_CLRMSK (0XFFFBFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_AXI_ERROR_EN     (0X00040000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_MEM_WDT_EN  (0X00000008U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_ERROR_SHIFT (1U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_ERROR_EN    (0X00000002U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_COMPLETE_EN (0X00000001U)
+
+
+/*
+Memory buffer will be used for MODEL only (CBUF, CMD , DEBUG, PERF) */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_MODEL_ONLY               (0x00000000U)
+/*
+Memory buffer will be used for IO Only( OUTPACK , IBUF , ABUF, EWO ) or Both MODEL and IO */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_IO_OR_SHARED             (0x00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CONTROL                            (0x10000U)
+#define VHA_CR_OS0_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS0_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS0_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS0_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS0_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS
+*/
+#define VHA_CR_OS0_CNN_STATUS                             (0x10008U)
+#define VHA_CR_OS0_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C1FFFFFF))
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_SHIFT                (24U)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_CLRMSK               (0XFEFFFFFFU)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_EN                   (0X01000000U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFF0000FFU)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS2
+*/
+#define VHA_CR_OS0_CNN_STATUS2                            (0x10010U)
+#define VHA_CR_OS0_CNN_STATUS2_MASKFULL                   (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_OS0_CNN_STATUS2_PASS_COUNT_SHIFT           (0U)
+#define VHA_CR_OS0_CNN_STATUS2_PASS_COUNT_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS                   (0x10020U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (7U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000007F))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED                   (0x10038U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0                       (0x10040U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1                       (0x10048U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2                       (0x10050U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3                       (0x10058U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4                       (0x10060U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5                       (0x10068U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6                       (0x10070U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7                       (0x10078U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL                  (0x10080U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS0_VHA_EVENT_ENABLE                       (0x10088U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS0_VHA_EVENT_STATUS                       (0x10090U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000B3FD000B))
+#define VHA_CR_OS0_VHA_EVENT_STATUS_PARITY_SHIFT          (31U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_PARITY_CLRMSK         (0X7FFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_PARITY_EN             (0X80000000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS0_VHA_EVENT_CLEAR                        (0x10098U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_EN    (0X10000000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_EN   (0X02000000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_EN   (0X01000000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_EN  (0X00800000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_EN  (0X00400000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CRC_CONTROL                        (0x10100U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CRC_ADDRESS                        (0x10108U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (7U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS                      (0x10110U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (7U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS0_CNN_DEBUG_SIZE                         (0x10118U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL                      (0x10120U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_STATUS                       (0x10128U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL                    (0x10130U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8                       (0x10140U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9                       (0x10148U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10                      (0x10150U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11                      (0x10158U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12                      (0x10160U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13                      (0x10168U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14                      (0x10170U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15                      (0x10178U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS0_CNN_PERFORMANCE                        (0x101A0U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_OCM_BASE_ADDR
+*/
+#define VHA_CR_OS0_OCM_BASE_ADDR                          (0x10200U)
+#define VHA_CR_OS0_OCM_BASE_ADDR_MASKFULL                 (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_OCM_BASE_ADDR_BASE_ADDR_SHIFT          (0U)
+#define VHA_CR_OS0_OCM_BASE_ADDR_BASE_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR
+*/
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR          (0x10208U)
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_SHIFT (0U)
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_SAVE_RESTORE_CTRL
+*/
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL                      (0x10210U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_SHIFT         (1U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_DISABLE       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_64          (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_128         (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_192         (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_256         (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_320         (IMG_UINT64_C(0x000000000000000a))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_384         (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_448         (IMG_UINT64_C(0x000000000000000e))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_SHIFT (0U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_MASK_CTRL
+*/
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL                      (0x10300U)
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_MASKFULL             (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_NO_MASK        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_MASK_L1        (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_MASK_L2        (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_MASK_L3        (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL                         (0x1E000U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS                  (0x1E038U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000080000001))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_SHIFT     (31U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_CLRMSK    (0X7FFFFFFFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_EN        (0X80000000U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT              (0x1E008U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING                      (0x1E010U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS1                      (0x1E018U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS2                      (0x1E020U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS0_MMU_CTRL_LEGACY                        (0x1E040U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CONTROL
+*/
+#define VHA_CR_OS1_CNN_CONTROL                            (0x20000U)
+#define VHA_CR_OS1_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS1_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS1_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS1_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS1_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS1_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS1_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS1_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS1_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS1_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS1_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS1_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_STATUS
+*/
+#define VHA_CR_OS1_CNN_STATUS                             (0x20008U)
+#define VHA_CR_OS1_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C1FFFFFF))
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS1_CNN_STATUS_PARITY_SHIFT                (24U)
+#define VHA_CR_OS1_CNN_STATUS_PARITY_CLRMSK               (0XFEFFFFFFU)
+#define VHA_CR_OS1_CNN_STATUS_PARITY_EN                   (0X01000000U)
+#define VHA_CR_OS1_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS1_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFF0000FFU)
+#define VHA_CR_OS1_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS1_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_STATUS2
+*/
+#define VHA_CR_OS1_CNN_STATUS2                            (0x20010U)
+#define VHA_CR_OS1_CNN_STATUS2_MASKFULL                   (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_OS1_CNN_STATUS2_PASS_COUNT_SHIFT           (0U)
+#define VHA_CR_OS1_CNN_STATUS2_PASS_COUNT_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS                   (0x20020U)
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (7U)
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000007F))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED                   (0x20038U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0                       (0x20040U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1                       (0x20048U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2                       (0x20050U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3                       (0x20058U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4                       (0x20060U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5                       (0x20068U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6                       (0x20070U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7                       (0x20078U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL                  (0x20080U)
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS1_VHA_EVENT_ENABLE                       (0x20088U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS1_VHA_EVENT_STATUS                       (0x20090U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000B3FD000B))
+#define VHA_CR_OS1_VHA_EVENT_STATUS_PARITY_SHIFT          (31U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_PARITY_CLRMSK         (0X7FFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_PARITY_EN             (0X80000000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS1_VHA_EVENT_CLEAR                        (0x20098U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_EN    (0X10000000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_EN   (0X02000000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_EN   (0X01000000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_EN  (0X00800000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_EN  (0X00400000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS1_CNN_CRC_CONTROL                        (0x20100U)
+#define VHA_CR_OS1_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS1_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_CRC_ADDRESS                        (0x20108U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (7U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS                      (0x20110U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (7U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS1_CNN_DEBUG_SIZE                         (0x20118U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL                      (0x20120U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS1_CNN_DEBUG_STATUS                       (0x20128U)
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL                    (0x20130U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8                       (0x20140U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9                       (0x20148U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10                      (0x20150U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11                      (0x20158U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12                      (0x20160U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13                      (0x20168U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14                      (0x20170U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15                      (0x20178U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS1_CNN_PERFORMANCE                        (0x201A0U)
+#define VHA_CR_OS1_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS1_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS1_OCM_BASE_ADDR
+*/
+#define VHA_CR_OS1_OCM_BASE_ADDR                          (0x20200U)
+#define VHA_CR_OS1_OCM_BASE_ADDR_MASKFULL                 (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_OCM_BASE_ADDR_BASE_ADDR_SHIFT          (0U)
+#define VHA_CR_OS1_OCM_BASE_ADDR_BASE_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_SAVE_RESTORE_BUFFER_BASE_ADDR
+*/
+#define VHA_CR_OS1_SAVE_RESTORE_BUFFER_BASE_ADDR          (0x20208U)
+#define VHA_CR_OS1_SAVE_RESTORE_BUFFER_BASE_ADDR_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_SHIFT (0U)
+#define VHA_CR_OS1_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_SAVE_RESTORE_CTRL
+*/
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL                      (0x20210U)
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_SHIFT         (1U)
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_DISABLE       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_64          (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_128         (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_192         (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_256         (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_320         (IMG_UINT64_C(0x000000000000000a))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_384         (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_N_REQS_N_448         (IMG_UINT64_C(0x000000000000000e))  
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_SHIFT (0U)
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS1_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS1_CNN_CRC_MASK_CTRL
+*/
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL                      (0x20300U)
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_MASKFULL             (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_SHIFT          (0U)
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_NO_MASK        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_MASK_L1        (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_MASK_L2        (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_CNN_CRC_MASK_CTRL_LEVEL_MASK_L3        (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS1_MMU_CTRL_INVAL                         (0x2E000U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS                  (0x2E038U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000080000001))
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PARITY_SHIFT     (31U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PARITY_CLRMSK    (0X7FFFFFFFU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PARITY_EN        (0X80000000U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT              (0x2E008U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS1_MMU_CBASE_MAPPING                      (0x2E010U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS1_MMU_FAULT_STATUS1                      (0x2E018U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS1_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS1_MMU_FAULT_STATUS2                      (0x2E020U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS1_MMU_CTRL_LEGACY                        (0x2E040U)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CONTROL
+*/
+#define VHA_CR_OS2_CNN_CONTROL                            (0x30000U)
+#define VHA_CR_OS2_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS2_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS2_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS2_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS2_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS2_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS2_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS2_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS2_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS2_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS2_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS2_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_STATUS
+*/
+#define VHA_CR_OS2_CNN_STATUS                             (0x30008U)
+#define VHA_CR_OS2_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C1FFFFFF))
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS2_CNN_STATUS_PARITY_SHIFT                (24U)
+#define VHA_CR_OS2_CNN_STATUS_PARITY_CLRMSK               (0XFEFFFFFFU)
+#define VHA_CR_OS2_CNN_STATUS_PARITY_EN                   (0X01000000U)
+#define VHA_CR_OS2_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS2_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFF0000FFU)
+#define VHA_CR_OS2_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS2_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_STATUS2
+*/
+#define VHA_CR_OS2_CNN_STATUS2                            (0x30010U)
+#define VHA_CR_OS2_CNN_STATUS2_MASKFULL                   (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_OS2_CNN_STATUS2_PASS_COUNT_SHIFT           (0U)
+#define VHA_CR_OS2_CNN_STATUS2_PASS_COUNT_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS                   (0x30020U)
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (7U)
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000007F))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED                   (0x30038U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0                       (0x30040U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1                       (0x30048U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2                       (0x30050U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3                       (0x30058U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4                       (0x30060U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5                       (0x30068U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6                       (0x30070U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7                       (0x30078U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL                  (0x30080U)
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS2_VHA_EVENT_ENABLE                       (0x30088U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS2_VHA_EVENT_STATUS                       (0x30090U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000B3FD000B))
+#define VHA_CR_OS2_VHA_EVENT_STATUS_PARITY_SHIFT          (31U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_PARITY_CLRMSK         (0X7FFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_PARITY_EN             (0X80000000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN   (0X10000000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_INIT_DONE_EN  (0X02000000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_DETECTION_EN  (0X01000000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ECC_CORRECTION_EN (0X00800000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_LOCKSTEP_ERROR_EN (0X00400000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS2_VHA_EVENT_CLEAR                        (0x30098U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000033FD000B))
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_SHIFT (29U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PARITY_ERROR_EN (0X20000000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_SHIFT (28U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_PARITY_ERROR_EN    (0X10000000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_SHIFT (25U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_INIT_DONE_EN   (0X02000000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_SHIFT (24U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_CLRMSK (0XFEFFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_DETECTION_EN   (0X01000000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_SHIFT (23U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ECC_CORRECTION_EN  (0X00800000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_SHIFT (22U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_LOCKSTEP_ERROR_EN  (0X00400000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS2_CNN_CRC_CONTROL                        (0x30100U)
+#define VHA_CR_OS2_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS2_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_CRC_ADDRESS                        (0x30108U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (7U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS                      (0x30110U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (7U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS2_CNN_DEBUG_SIZE                         (0x30118U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL                      (0x30120U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS2_CNN_DEBUG_STATUS                       (0x30128U)
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL                    (0x30130U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8                       (0x30140U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9                       (0x30148U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10                      (0x30150U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11                      (0x30158U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12                      (0x30160U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13                      (0x30168U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14                      (0x30170U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15                      (0x30178U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS2_CNN_PERFORMANCE                        (0x301A0U)
+#define VHA_CR_OS2_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS2_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS2_OCM_BASE_ADDR
+*/
+#define VHA_CR_OS2_OCM_BASE_ADDR                          (0x30200U)
+#define VHA_CR_OS2_OCM_BASE_ADDR_MASKFULL                 (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_OCM_BASE_ADDR_BASE_ADDR_SHIFT          (0U)
+#define VHA_CR_OS2_OCM_BASE_ADDR_BASE_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_SAVE_RESTORE_BUFFER_BASE_ADDR
+*/
+#define VHA_CR_OS2_SAVE_RESTORE_BUFFER_BASE_ADDR          (0x30208U)
+#define VHA_CR_OS2_SAVE_RESTORE_BUFFER_BASE_ADDR_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_SHIFT (0U)
+#define VHA_CR_OS2_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_SAVE_RESTORE_CTRL
+*/
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL                      (0x30210U)
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_SHIFT         (1U)
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_DISABLE       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_64          (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_128         (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_192         (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_256         (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_320         (IMG_UINT64_C(0x000000000000000a))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_384         (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_N_REQS_N_448         (IMG_UINT64_C(0x000000000000000e))  
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_SHIFT (0U)
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS2_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS2_CNN_CRC_MASK_CTRL
+*/
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL                      (0x30300U)
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_MASKFULL             (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_SHIFT          (0U)
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_NO_MASK        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_MASK_L1        (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_MASK_L2        (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_CNN_CRC_MASK_CTRL_LEVEL_MASK_L3        (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS2_MMU_CTRL_INVAL                         (0x3E000U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS                  (0x3E038U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000080000001))
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PARITY_SHIFT     (31U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PARITY_CLRMSK    (0X7FFFFFFFU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PARITY_EN        (0X80000000U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT              (0x3E008U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS2_MMU_CBASE_MAPPING                      (0x3E010U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS2_MMU_FAULT_STATUS1                      (0x3E018U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS2_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS2_MMU_FAULT_STATUS2                      (0x3E020U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS2_MMU_CTRL_LEGACY                        (0x3E040U)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_MASK               (0x00000007U)
+/*
+Unprivileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_DATA (0x00000000U)
+/*
+Privileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_DATA (0x00000001U)
+/*
+Unprivileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_DATA (0x00000002U)
+/*
+Privileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_DATA (0x00000003U)
+/*
+Unprivileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_INSTRUCTION (0x00000004U)
+/*
+Privileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_INSTRUCTION (0x00000005U)
+/*
+Unprivileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_INSTRUCTION (0x00000006U)
+/*
+Privileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_INSTRUCTION (0x00000007U)
+
+
+/*
+    Register VHA_CR_ACE_PROT_CTRL
+*/
+#define VHA_CR_ACE_PROT_CTRL                              (0x40000U)
+#define VHA_CR_ACE_PROT_CTRL_MASKFULL                     (IMG_UINT64_C(0x0707070707070707))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_SHIFT                  (56U)
+#define VHA_CR_ACE_PROT_CTRL_OSID7_CLRMSK                 (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0300000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0500000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0600000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0700000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_SHIFT                  (48U)
+#define VHA_CR_ACE_PROT_CTRL_OSID6_CLRMSK                 (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0003000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0005000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0006000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0007000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_SHIFT                  (40U)
+#define VHA_CR_ACE_PROT_CTRL_OSID5_CLRMSK                 (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000030000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000050000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000060000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000070000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_SHIFT                  (32U)
+#define VHA_CR_ACE_PROT_CTRL_OSID4_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000300000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000500000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000600000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000700000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_SHIFT                  (24U)
+#define VHA_CR_ACE_PROT_CTRL_OSID3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000005000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000006000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000007000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_SHIFT                  (16U)
+#define VHA_CR_ACE_PROT_CTRL_OSID2_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_SHIFT                  (8U)
+#define VHA_CR_ACE_PROT_CTRL_OSID1_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_SHIFT                  (0U)
+#define VHA_CR_ACE_PROT_CTRL_OSID0_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_REQ_CTXT_OVERRIDE
+*/
+#define VHA_CR_REQ_CTXT_OVERRIDE                          (0x40010U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_SHIFT       (2U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_EN          (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_SHIFT       (1U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_SHIFT       (0U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_EN          (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_CMD_PRIORITY_LIMITS
+*/
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS                    (0x40018U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_MASKFULL           (IMG_UINT64_C(0x000000000000019B))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_SHIFT    (7U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFE7F))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_SHIFT    (3U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_SHIFT    (0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL
+*/
+#define VHA_CR_OS0_MMU_CTRL                               (0x40020U)
+#define VHA_CR_OS0_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL
+*/
+#define VHA_CR_OS1_MMU_CTRL                               (0x40028U)
+#define VHA_CR_OS1_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL
+*/
+#define VHA_CR_OS2_MMU_CTRL                               (0x40030U)
+#define VHA_CR_OS2_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_SOCIF_BUS_SECURE
+*/
+#define VHA_CR_SOCIF_BUS_SECURE                           (0x4A100U)
+#define VHA_CR_SOCIF_BUS_SECURE_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_SHIFT              (0U)
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_EN                 (0X00000001U)
+
+
+#endif /* _VHA_CR_AURA_H_ */
+
+/*****************************************************************************
+ End of file (vha_cr_aura.h)
+*****************************************************************************/
+

+ 4998 - 0
driver/include/hwdefs/vha_cr_gyrus.h

@@ -0,0 +1,4998 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file vha_cr_gyrus.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _VHA_CR_GYRUS_H_
+#define _VHA_CR_GYRUS_H_
+
+#define VHA_CR_GYRUS_REVISION 1
+
+#define VHA_CR_MH_CONTROL_PERSISTENCE_TYPE_MASK           (0x00000003U)
+
+
+
+
+
+
+#define VHA_CR_MH_CONTROL_MAX_BURST_LENGTH_MASK           (0x00000003U)
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_CLK_STATUS0_MODE_GATED                     (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_CLK_STATUS0_MODE_RUNNING                   (0x00000001U)
+
+
+/*
+    Register VHA_CR_CLK_STATUS0
+*/
+#define VHA_CR_CLK_STATUS0                                (0x0008U)
+#define VHA_CR_CLK_STATUS0_MASKFULL                       (IMG_UINT64_C(0x00000017FFD00104))
+#define VHA_CR_CLK_STATUS0_CNN_MMM_SHIFT                  (36U)
+#define VHA_CR_CLK_STATUS0_CNN_MMM_CLRMSK                 (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_MMM_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_MMM_RUNNING                (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_SHIFT                  (34U)
+#define VHA_CR_CLK_STATUS0_CNN_EWO_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_EWO_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_RUNNING                (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_SHIFT                 (33U)
+#define VHA_CR_CLK_STATUS0_CNN_PACK_CLRMSK                (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_PACK_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_RUNNING               (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_SHIFT                  (32U)
+#define VHA_CR_CLK_STATUS0_CNN_OIN_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_OIN_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_RUNNING                (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_SHIFT                 (31U)
+#define VHA_CR_CLK_STATUS0_CNN_POOL_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_POOL_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_RUNNING               (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_SHIFT                   (30U)
+#define VHA_CR_CLK_STATUS0_CNN_SB_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_SB_GATED                   (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_RUNNING                 (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_SHIFT                 (29U)
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_RUNNING               (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_SHIFT                 (28U)
+#define VHA_CR_CLK_STATUS0_CNN_NORM_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_NORM_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_RUNNING               (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_SHIFT                  (27U)
+#define VHA_CR_CLK_STATUS0_CNN_ACT_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACT_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_RUNNING                (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_SHIFT                (26U)
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_RUNNING              (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_SHIFT                  (25U)
+#define VHA_CR_CLK_STATUS0_CNN_CNV_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CNV_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_RUNNING                (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_SHIFT                 (24U)
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_RUNNING               (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_SHIFT                 (23U)
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_RUNNING               (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_SHIFT                  (22U)
+#define VHA_CR_CLK_STATUS0_CNN_CMD_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CMD_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_RUNNING                (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_CLK_STATUS0_CNN_SHIFT                      (20U)
+#define VHA_CR_CLK_STATUS0_CNN_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_RUNNING                    (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_CLK_STATUS0_SLC_SHIFT                      (8U)
+#define VHA_CR_CLK_STATUS0_SLC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CLK_STATUS0_SLC_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_SLC_RUNNING                    (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_STATUS0_BIF_SHIFT                      (2U)
+#define VHA_CR_CLK_STATUS0_BIF_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CLK_STATUS0_BIF_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_BIF_RUNNING                    (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_PRODUCT_ID
+*/
+#define VHA_CR_PRODUCT_ID                                 (0x0018U)
+#define VHA_CR_PRODUCT_ID_MASKFULL                        (IMG_UINT64_C(0x00000000FFFF0000))
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT            (16U)
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register VHA_CR_CORE_ID
+*/
+#define VHA_CR_CORE_ID                                    (0x0020U)
+#define VHA_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_BRANCH_ID_SHIFT                    (48U)
+#define VHA_CR_CORE_ID_BRANCH_ID_CLRMSK                   (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_VERSION_ID_SHIFT                   (32U)
+#define VHA_CR_CORE_ID_VERSION_ID_CLRMSK                  (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT     (16U)
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK    (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_CORE_ID_CONFIG_ID_SHIFT                    (0U)
+#define VHA_CR_CORE_ID_CONFIG_ID_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define VHA_CR_CORE_IP_INTEGRATOR_ID                      (0x0028U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT          (0U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CHANGELIST
+*/
+#define VHA_CR_CORE_IP_CHANGELIST                         (0x0030U)
+#define VHA_CR_CORE_IP_CHANGELIST_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_SHIFT             (0U)
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CONFIG
+*/
+#define VHA_CR_CORE_IP_CONFIG                             (0x0038U)
+#define VHA_CR_CORE_IP_CONFIG_MASKFULL                    (IMG_UINT64_C(0x0000000000000F03))
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_SHIFT (11U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_EN (0X00000800U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_SHIFT (10U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_EN (0X00000400U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_EN (0X00000200U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_SHIFT         (8U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_CLRMSK        (0XFFFFFEFFU)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_EN            (0X00000100U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_SHIFT         (1U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_EN            (0X00000002U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_SHIFT         (0U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_TIMER
+*/
+#define VHA_CR_CNN_MEM_WDT_TIMER                          (0x0048U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_SHIFT              (0U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_TIMER
+*/
+#define VHA_CR_CNN_HL_WDT_TIMER                           (0x0050U)
+#define VHA_CR_CNN_HL_WDT_TIMER_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_HL_WDT_TIMER_VALUE_SHIFT               (0U)
+#define VHA_CR_CNN_HL_WDT_TIMER_VALUE_CLRMSK              (00000000U)
+
+
+/*
+    Register VHA_CR_RTM_CTRL
+*/
+#define VHA_CR_RTM_CTRL                                   (0x0058U)
+#define VHA_CR_RTM_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000CFFFFFF8))
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_SHIFT                  (31U)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_CLRMSK                 (0X7FFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_EN                     (0X80000000U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_SHIFT                   (30U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_CLRMSK                  (0XBFFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_EN                      (0X40000000U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_SHIFT                (3U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK               (0XF0000007U)
+
+
+/*
+    Register VHA_CR_RTM_DATA
+*/
+#define VHA_CR_RTM_DATA                                   (0x0060U)
+#define VHA_CR_RTM_DATA_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_RTM_DATA_RTM_DATA_SHIFT                    (0U)
+#define VHA_CR_RTM_DATA_RTM_DATA_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG0
+*/
+#define VHA_CR_CNN_IP_CONFIG0                             (0x0068U)
+#define VHA_CR_CNN_IP_CONFIG0_MASKFULL                    (IMG_UINT64_C(0x00000000007FFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_SHIFT (22U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_EN    (IMG_UINT64_C(0X0000000000400000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_SHIFT (21U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000200000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_SHIFT      (20U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_EN         (IMG_UINT64_C(0X0000000000100000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_SHIFT (19U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000080000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_SHIFT (18U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000040000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_SHIFT (17U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_EN (IMG_UINT64_C(0X0000000000020000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000010000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_SHIFT (15U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_SHIFT (14U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_SHIFT (13U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_SHIFT (11U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_SHIFT (10U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_SHIFT (7U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_SHIFT (6U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_SHIFT (5U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_SHIFT (3U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_SHIFT (2U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_SHIFT (1U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_EN   (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG1
+*/
+#define VHA_CR_CNN_IP_CONFIG1                             (0x0070U)
+#define VHA_CR_CNN_IP_CONFIG1_MASKFULL                    (IMG_UINT64_C(0x0FFFFFFF3F0FFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_SHIFT (52U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_CLRMSK (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_SHIFT (39U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFF0007FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_SHIFT (37U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFF9FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_SHIFT (32U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF00F))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG2
+*/
+#define VHA_CR_CNN_IP_CONFIG2                             (0x0078U)
+#define VHA_CR_CNN_IP_CONFIG2_MASKFULL                    (IMG_UINT64_C(0x00000FFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFF003FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC0FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_SHIFT (20U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0000F))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG3
+*/
+#define VHA_CR_CNN_IP_CONFIG3                             (0x0080U)
+#define VHA_CR_CNN_IP_CONFIG3_MASKFULL                    (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_SHIFT (38U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFC3FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_SHIFT (29U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC1FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFE0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_GATED                 (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_RUNNING               (0x00000001U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_STATUS0
+*/
+#define VHA_CR_SYS_CLK_STATUS0                            (0x0088U)
+#define VHA_CR_SYS_CLK_STATUS0_MASKFULL                   (IMG_UINT64_C(0x0000000000000004))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_SHIFT                  (2U)
+#define VHA_CR_SYS_CLK_STATUS0_SLC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_SLC_RUNNING                (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ
+*/
+#define VHA_CR_PERF_SLC0_READ                             (0x0200U)
+#define VHA_CR_PERF_SLC0_READ_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_COUNT_SHIFT                 (0U)
+#define VHA_CR_PERF_SLC0_READ_COUNT_CLRMSK                (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE
+*/
+#define VHA_CR_PERF_SLC0_WRITE                            (0x0208U)
+#define VHA_CR_PERF_SLC0_WRITE_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_SHIFT                (0U)
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_CLRMSK               (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_DATA_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL                 (0x0210U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_SHIFT     (0U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_STALL                       (0x0218U)
+#define VHA_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_STALL                      (0x0220U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_ID_STALL                    (0x0228U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_SHIFT        (0U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL                   (0x0230U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1                   (0x0238U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1                   (0x0240U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2                   (0x0248U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2                   (0x0250U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3                   (0x0258U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3                   (0x0260U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4                   (0x0268U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4                   (0x0270U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_RESET_FULL
+*/
+#define VHA_CR_PERF_RESET_FULL                            (0x0278U)
+#define VHA_CR_PERF_RESET_FULL_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_RESET_FULL_RANGE_SHIFT                (0U)
+#define VHA_CR_PERF_RESET_FULL_RANGE_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_PERF_RESET_FULL_RANGE_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_ENABLE_FULL
+*/
+#define VHA_CR_PERF_ENABLE_FULL                           (0x0280U)
+#define VHA_CR_PERF_ENABLE_FULL_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_SHIFT               (0U)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_EN                  (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_STATUS
+*/
+#define VHA_CR_MMU_STATUS                                 (0x0288U)
+#define VHA_CR_MMU_STATUS_MASKFULL                        (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_SHIFT               (40U)
+#define VHA_CR_MMU_STATUS_MMU_STALLED_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define VHA_CR_MMU_STATUS_PM_WRITES_SHIFT                 (38U)
+#define VHA_CR_MMU_STATUS_PM_WRITES_CLRMSK                (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PM_READS_SHIFT                  (36U)
+#define VHA_CR_MMU_STATUS_PM_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PC_READS_SHIFT                  (24U)
+#define VHA_CR_MMU_STATUS_PC_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_MMU_STATUS_PD_READS_SHIFT                  (12U)
+#define VHA_CR_MMU_STATUS_PD_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_MMU_STATUS_PT_READS_SHIFT                  (0U)
+#define VHA_CR_MMU_STATUS_PT_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS1
+*/
+#define VHA_CR_SLC_STATUS1                                (0x0290U)
+#define VHA_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT        (48U)
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK       (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS2
+*/
+#define VHA_CR_SLC_STATUS2                                (0x0298U)
+#define VHA_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_IDLE
+*/
+#define VHA_CR_SLC_IDLE                                   (0x02A0U)
+#define VHA_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT              (12U)
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK             (0XFFFF0FFFU)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (4U)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFF00FU)
+#define VHA_CR_SLC_IDLE_MMU_SHIFT                         (3U)
+#define VHA_CR_SLC_IDLE_MMU_CLRMSK                        (0XFFFFFFF7U)
+#define VHA_CR_SLC_IDLE_MMU_EN                            (0X00000008U)
+#define VHA_CR_SLC_IDLE_CCM_SHIFT                         (2U)
+#define VHA_CR_SLC_IDLE_CCM_CLRMSK                        (0XFFFFFFFBU)
+#define VHA_CR_SLC_IDLE_CCM_EN                            (0X00000004U)
+#define VHA_CR_SLC_IDLE_RDI_SHIFT                         (1U)
+#define VHA_CR_SLC_IDLE_RDI_CLRMSK                        (0XFFFFFFFDU)
+#define VHA_CR_SLC_IDLE_RDI_EN                            (0X00000002U)
+#define VHA_CR_SLC_IDLE_XBAR_SHIFT                        (0U)
+#define VHA_CR_SLC_IDLE_XBAR_CLRMSK                       (0XFFFFFFFEU)
+#define VHA_CR_SLC_IDLE_XBAR_EN                           (0X00000001U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS3
+*/
+#define VHA_CR_SLC_STATUS3                                (0x02A8U)
+#define VHA_CR_SLC_STATUS3_MASKFULL                       (IMG_UINT64_C(0x0FFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_SHIFT (50U)
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XF003FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_SHIFT (40U)
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFC00FFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_SHIFT (30U)
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_SHIFT (20U)
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC00FFFFF))
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_SHIFT (10U)
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF003FF))
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_SHIFT (0U)
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFC00))
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_STATUS
+*/
+#define VHA_CR_SLC_FAULT_STOP_STATUS                      (0x02B0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT            (0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK           (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS_DEBUG
+*/
+#define VHA_CR_SLC_STATUS_DEBUG                           (0x02B8U)
+#define VHA_CR_SLC_STATUS_DEBUG_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT         (16U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK        (0X0000FFFFU)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT      (0U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_OUTSTANDING_READ
+*/
+#define VHA_CR_BIF_OUTSTANDING_READ                       (0x02C0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT         (0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_PAGE_FAULT_STALL
+*/
+#define VHA_CR_BIF_PAGE_FAULT_STALL                       (0x02C8U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_SHIFT          (0U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_CLRMSK         (0XFFFFFFFEU)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_EN             (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC
+*/
+#define VHA_CR_PERF_SLC                                   (0x02D0U)
+#define VHA_CR_PERF_SLC_MASKFULL                          (IMG_UINT64_C(0x000000000FEFFEFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_SHIFT (27U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XF7FFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_EN  (0X08000000U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_SHIFT    (26U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_CLRMSK   (0XFBFFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_EN       (0X04000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_SHIFT (25U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_EN  (0X02000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_SHIFT    (24U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_CLRMSK   (0XFEFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_EN       (0X01000000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_SHIFT (23U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_EN  (0X00800000U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (22U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00400000U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (21U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFDFFFFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00200000U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_SHIFT (19U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFF7FFFFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_EN  (0X00080000U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_SHIFT (18U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFBFFFFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_EN  (0X00040000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_SHIFT (17U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFDFFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_EN (0X00020000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_SHIFT (16U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_EN (0X00010000U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_SHIFT (15U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFF7FFFU)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_EN (0X00008000U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_SHIFT (14U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_EN (0X00004000U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_SHIFT (13U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFDFFFU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_EN (0X00002000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_SHIFT (12U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFFEFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_EN  (0X00001000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_SHIFT    (11U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_CLRMSK   (0XFFFFF7FFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_EN       (0X00000800U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_SHIFT   (10U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFBFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_EN      (0X00000400U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_SHIFT   (9U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFDFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_EN      (0X00000200U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_SHIFT    (7U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_CLRMSK   (0XFFFFFF7FU)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_EN       (0X00000080U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_SHIFT    (6U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_CLRMSK   (0XFFFFFFBFU)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_EN       (0X00000040U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_SHIFT  (5U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_CLRMSK (0XFFFFFFDFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_EN     (0X00000020U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_SHIFT (4U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_EN    (0X00000010U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_SHIFT (3U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_EN   (0X00000008U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_SHIFT (2U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_EN   (0X00000004U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_SHIFT (1U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_EN   (0X00000002U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_REQ_COUNT
+*/
+#define VHA_CR_PERF_SLC_REQ_COUNT                         (0x02D8U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_MASKFULL                (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD                        (0x02E0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR                    (0x02E8U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR                    (0x02F0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR                    (0x02F8U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE                     (0x0300U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_SHIFT       (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD                      (0x0308U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD                        (0x0310U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR                        (0x0318U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD                       (0x0328U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD                       (0x0330U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR                        (0x0338U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD                   (0x0340U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD               (0x0348U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD               (0x0350U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD               (0x0358U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD                (0x0360U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_SHIFT  (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD                 (0x0368U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD                   (0x0370U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD                   (0x0378U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD                  (0x0388U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD                  (0x0390U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD                   (0x0398U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD                        (0x03A0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD                   (0x03A8U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD                        (0x03B0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD                   (0x03B8U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_BIF_RTN_FIFO_WORD_COUNT
+*/
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT                    (0x03C0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_MASKFULL           (IMG_UINT64_C(0x00000000000001FF))
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_SHIFT      (0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_CLRMSK     (0XFFFFFE00U)
+
+
+#define VHA_CR_CLK_CTRL0_MODE_MASK                        (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_CLK_CTRL0_MODE_OFF                         (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_CLK_CTRL0_MODE_ON                          (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_CLK_CTRL0_MODE_AUTO                        (0x00000002U)
+
+
+/*
+    Register VHA_CR_CLK_CTRL0
+*/
+#define VHA_CR_CLK_CTRL0                                  (0x2000U)
+#define VHA_CR_CLK_CTRL0_MASKFULL                         (IMG_UINT64_C(0x33FFFFFF30000330))
+#define VHA_CR_CLK_CTRL0_CNN_MMM_SHIFT                    (60U)
+#define VHA_CR_CLK_CTRL0_CNN_MMM_CLRMSK                   (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_MMM_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_ON                       (IMG_UINT64_C(0x1000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_AUTO                     (IMG_UINT64_C(0x2000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_SHIFT                    (56U)
+#define VHA_CR_CLK_CTRL0_CNN_EWO_CLRMSK                   (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_EWO_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_ON                       (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_AUTO                     (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_SHIFT                   (54U)
+#define VHA_CR_CLK_CTRL0_CNN_PACK_CLRMSK                  (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_PACK_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_ON                      (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_AUTO                    (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_SHIFT                    (52U)
+#define VHA_CR_CLK_CTRL0_CNN_OIN_CLRMSK                   (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OIN_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_ON                       (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_AUTO                     (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_SHIFT                   (50U)
+#define VHA_CR_CLK_CTRL0_CNN_POOL_CLRMSK                  (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_POOL_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_ON                      (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_AUTO                    (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_SHIFT                     (48U)
+#define VHA_CR_CLK_CTRL0_CNN_SB_CLRMSK                    (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_SB_OFF                       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_ON                        (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_AUTO                      (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_SHIFT                   (46U)
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_CLRMSK                  (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_ON                      (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_AUTO                    (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_SHIFT                   (44U)
+#define VHA_CR_CLK_CTRL0_CNN_NORM_CLRMSK                  (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_NORM_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_ON                      (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_AUTO                    (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_SHIFT                    (42U)
+#define VHA_CR_CLK_CTRL0_CNN_ACT_CLRMSK                   (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACT_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_ON                       (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_AUTO                     (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_SHIFT                  (40U)
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_CLRMSK                 (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_ON                     (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_AUTO                   (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_SHIFT                    (38U)
+#define VHA_CR_CLK_CTRL0_CNN_CNV_CLRMSK                   (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CNV_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_ON                       (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_AUTO                     (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_SHIFT                   (36U)
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_ON                      (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_AUTO                    (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_SHIFT                   (34U)
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_ON                      (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_AUTO                    (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_SHIFT                    (32U)
+#define VHA_CR_CLK_CTRL0_CNN_CMD_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CMD_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_ON                       (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_AUTO                     (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SHIFT                        (28U)
+#define VHA_CR_CLK_CTRL0_CNN_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ON                           (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_CTRL0_CNN_AUTO                         (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_CTRL0_SLC_SHIFT                        (8U)
+#define VHA_CR_CLK_CTRL0_SLC_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_CLK_CTRL0_SLC_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_SLC_ON                           (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_CTRL0_SLC_AUTO                         (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_CLK_CTRL0_BIF_SHIFT                        (4U)
+#define VHA_CR_CLK_CTRL0_BIF_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_CLK_CTRL0_BIF_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_BIF_ON                           (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_CLK_CTRL0_BIF_AUTO                         (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+    Register VHA_CR_VHA_AXI_RESET_CTRL
+*/
+#define VHA_CR_VHA_AXI_RESET_CTRL                         (0x2008U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_SHIFT (0U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_RESET_CTRL
+*/
+#define VHA_CR_RESET_CTRL                                 (0x2010U)
+#define VHA_CR_RESET_CTRL_MASKFULL                        (IMG_UINT64_C(0x00000000C0000107))
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_SHIFT        (31U)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_CLRMSK       (0X7FFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_EN           (0X80000000U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_SHIFT        (30U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_CLRMSK       (0XBFFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_EN           (0X40000000U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_SHIFT       (8U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_CLRMSK      (0XFFFFFEFFU)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_EN          (0X00000100U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_SHIFT        (2U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_CLRMSK       (0XFFFFFFFBU)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_EN           (0X00000004U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_SHIFT        (1U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_EN           (0X00000002U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_SHIFT            (0U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_MH_CONTROL
+*/
+#define VHA_CR_CNN_CMD_MH_CONTROL                         (0x2018U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x000000000000003F))
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (4U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFCFU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (3U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_EN    (0X00000008U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_SHIFT  (2U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_EN     (0X00000004U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_PERSISTENCE_SHIFT       (0U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_PERSISTENCE_CLRMSK      (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_IBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_IBUF_MH_CONTROL                        (0x2020U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_CBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_CBUF_MH_CONTROL                        (0x2028U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_ABUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_ABUF_MH_CONTROL                        (0x2030U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_OUTPACK_MH_CONTROL
+*/
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL                     (0x2038U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MASKFULL            (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_EN (0X00000004U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_PERSISTENCE_SHIFT   (0U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_PERSISTENCE_CLRMSK  (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_ELEMENTOPS_MH_CONTROL
+*/
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL                  (0x2040U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MASKFULL         (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_EN (0X00000004U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_PERSISTENCE_SHIFT (0U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_PERSISTENCE_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_MMM_MH_CONTROL
+*/
+#define VHA_CR_CNN_MMM_MH_CONTROL                         (0x2048U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (3U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_MMM_MH_CONTROL_GPU_PIPE_COHERENT_EN    (0X00000004U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_PERSISTENCE_SHIFT       (0U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_PERSISTENCE_CLRMSK      (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_PM_VFP_TRAN_EN
+*/
+#define VHA_CR_PM_VFP_TRAN_EN                             (0x2100U)
+#define VHA_CR_PM_VFP_TRAN_EN_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PM_VFP_TRAN_EN_OP_SHIFT                    (0U)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_CLRMSK                   (0XFFFFFFFEU)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_EN                       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH                   (0x2118U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_SHIFT         (0U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_CLRMSK        (00000000U)
+
+
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_MASK     (0x00000003U)
+/*
+WDT is Disabled */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_NONE     (0x00000000U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK     (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_CTRL
+*/
+#define VHA_CR_CNN_MEM_WDT_CTRL                           (0x2120U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MASKFULL                  (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_SHIFT                (0U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_NONE                 (00000000U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK_PASS            (0X00000001U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK                 (0X00000002U)
+
+
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_MASK       (0x00000003U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_KICK_PASS  (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_HL_WDT_CTRL_CNN_HL_WDT_CTRL_KICK       (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_CTRL
+*/
+#define VHA_CR_CNN_HL_WDT_CTRL                            (0x2128U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_SHIFT                 (0U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_CLRMSK                (0XFFFFFFFCU)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_KICK_PASS             (0X00000001U)
+#define VHA_CR_CNN_HL_WDT_CTRL_MODE_KICK                  (0X00000002U)
+
+
+/*
+    Register VHA_CR_CNN_HL_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH                    (0x2130U)
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_REG_SHIFT          (0U)
+#define VHA_CR_CNN_HL_WDT_COMPAREMATCH_REG_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define VHA_CR_IDLE_HYSTERESIS_COUNT                      (0x2140U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_MASKFULL             (IMG_UINT64_C(0x0000001F00001F1F))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_SHIFT        (32U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_CLRMSK       (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_SHIFT        (8U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFE0FF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_SHIFT       (0U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register VHA_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define VHA_CR_SOCIF_WAKEUP_ENABLE                        (0x2148U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT           (0U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_RESET_CLK_CTRL
+*/
+#define VHA_CR_RESET_CLK_CTRL                             (0x2150U)
+#define VHA_CR_RESET_CLK_CTRL_MASKFULL                    (IMG_UINT64_C(0x00000000000003FF))
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_SHIFT               (8U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_CLRMSK              (0XFFFFFCFFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_SHIFT                (6U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_CLRMSK               (0XFFFFFF3FU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_SHIFT                (4U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_CLRMSK               (0XFFFFFFCFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_SHIFT               (2U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_CLRMSK              (0XFFFFFFF3U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_SHIFT              (0U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_CLRMSK             (0XFFFFFFFCU)
+
+
+#define VHA_CR_SYS_CLK_CTRL0_MODE_MASK                    (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_OFF                     (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_ON                      (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_AUTO                    (0x00000002U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_CTRL0
+*/
+#define VHA_CR_SYS_CLK_CTRL0                              (0x2158U)
+#define VHA_CR_SYS_CLK_CTRL0_MASKFULL                     (IMG_UINT64_C(0x0000000000000030))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_SHIFT                    (4U)
+#define VHA_CR_SYS_CLK_CTRL0_SLC_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_ON                       (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_AUTO                     (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+    Register VHA_CR_AXI_EXACCESS
+*/
+#define VHA_CR_AXI_EXACCESS                               (0x2168U)
+#define VHA_CR_AXI_EXACCESS_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT            (0U)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_REGBANK_REQUEST_INVALID
+*/
+#define VHA_CR_REGBANK_REQUEST_INVALID                    (0x2170U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT         (0U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY
+*/
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY             (0x2180U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_MASKFULL    (IMG_UINT64_C(0x000000000000019B))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS2_LIMIT_SHIFT (7U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS2_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE7F))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS1_LIMIT_SHIFT (3U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS1_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS0_LIMIT_SHIFT (0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_LEGACY_OS0_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL
+*/
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL               (0x2188U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_MASKFULL      (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_SHIFT (2U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_SHIFT (1U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_RAM_SCRUB_ON_SWITCH_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_SHIFT (0U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_RAM_SCRUB_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_ARB_STALL_RATIO
+*/
+#define VHA_CR_CNN_ARB_STALL_RATIO                        (0x2200U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_MASKFULL               (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_SHIFT           (32U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_CLRMSK          (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_SHIFT      (28U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_SHIFT      (24U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_SHIFT      (20U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_SHIFT      (16U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_SHIFT      (12U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_SHIFT      (8U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_SHIFT      (4U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_SHIFT      (0U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_FE
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE                (0x2208U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CNV_ABUF_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CNV_ABUF_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_CNV_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_CNV_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_ACT_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_ACT_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ACT_NORM_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ACT_NORM_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_NORM_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_NORM_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_CNV_SHIFT (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_CNV_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_OUTPACK_SHIFT (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_OUTPACK_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_ABUF_SHIFT (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_ABUF_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_BE
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE                (0x2210U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_XBAR_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_XBAR_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_OIN_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_OIN_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_OUTPACK_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_OUTPACK_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_XBAR_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_XBAR_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_POOL_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_POOL_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_SB_SHIFT  (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_SB_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_SB_SHIFT  (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_SB_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_SB_SHIFT   (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_SB_CLRMSK  (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_CNN_ARB_MAX_PAGE
+*/
+#define VHA_CR_CNN_ARB_MAX_PAGE                           (0x2218U)
+#define VHA_CR_CNN_ARB_MAX_PAGE_MASKFULL                  (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_CNN_ARB_MAX_PAGE_MAX_PAGE_COUNT_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_ARB_MAX_PAGE_MAX_PAGE_COUNT_MIN1_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_HITS
+*/
+#define VHA_CR_PERF_SLC_SNOOP_HITS                        (0x61A0U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_MISSES
+*/
+#define VHA_CR_PERF_SLC_SNOOP_MISSES                      (0x61A8U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0                 (0xE008U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1                 (0xE010U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2                 (0xE018U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3                 (0xE020U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0                  (0xE028U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1                  (0xE030U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2                  (0xE038U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3                  (0xE040U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0                     (0xE048U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0                 (0xE050U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1                 (0xE058U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2                 (0xE060U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3                 (0xE068U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0                  (0xE070U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1                  (0xE078U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2                  (0xE080U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3                  (0xE088U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC001))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1                     (0xE090U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF001))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST            (0xE098U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST            (0xE0A0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST            (0xE0A8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST            (0xE0B0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST             (0xE0B8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST             (0xE0C0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST             (0xE0C8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST             (0xE0D0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST                (0xE0D8U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST            (0xE0E0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST            (0xE0E8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST            (0xE0F0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST            (0xE0F8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST             (0xE100U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST             (0xE108U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST             (0xE110U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST             (0xE118U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST                (0xE120U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS_META                      (0xE160U)
+#define VHA_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (62U)
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (56U)
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (48U)
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (4U)
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS2_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS2_META                     (0xE198U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_MASKFULL            (IMG_UINT64_C(0x0000000000003FFF))
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT     (13U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK    (0XFFFFDFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN        (0X00002000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT   (12U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK  (0XFFFFEFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN      (0X00001000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT          (8U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT     (0U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_PM
+*/
+#define VHA_CR_MMU_FAULT_STATUS_PM                        (0xE130U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_MASKFULL               (IMG_UINT64_C(0x0000000007FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_SHIFT               (24U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT              (23U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_EN                 (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT          (3U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFF800007))
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT            (1U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT            (0U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_CTRL
+*/
+#define VHA_CR_MMU_ABORT_PM_CTRL                          (0xE188U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_SHIFT             (0U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_CLRMSK            (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_EN                (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_STATUS
+*/
+#define VHA_CR_MMU_ABORT_PM_STATUS                        (0xE190U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_SHIFT            (0U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_HOST_IRQ_ENABLE
+*/
+#define VHA_CR_MMU_HOST_IRQ_ENABLE                        (0xE1A0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_SHIFT         (0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_ONE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE                    (0xE350U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_TWO
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO                    (0xE358U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_THREE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE                  (0xE360U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL         (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT  (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT   (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT  (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR                   (0xE368U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL          (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT   (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK  (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT    (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT   (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_MASK               (0x00000003U)
+/*
+Reserved value */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_RESERVED           (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING     (0x00000001U)
+/*
+Addresses are interleaved between Cache Banks on a Cacheline boundary */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_LINEAR             (0x00000002U)
+/*
+Addresses interleaved between Cache Banks using an XOR hash of the address bits below the 4KB page granularity */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_IN_PAGE_HASH       (0x00000003U)
+
+
+/*
+    Register VHA_CR_SLC_CTRL
+*/
+#define VHA_CR_SLC_CTRL                                   (0xE200U)
+#define VHA_CR_SLC_CTRL_MASKFULL                          (IMG_UINT64_C(0x000000000001FFF3))
+#define VHA_CR_SLC_CTRL_ISCHED_CREDIT_THRESHOLD_SHIFT     (13U)
+#define VHA_CR_SLC_CTRL_ISCHED_CREDIT_THRESHOLD_CLRMSK    (0XFFFE1FFFU)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_SHIFT          (12U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_CLRMSK         (0XFFFFEFFFU)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_EN             (0X00001000U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_SHIFT          (8U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_SHIFT    (7U)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_CLRMSK   (0XFFFFFF7FU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_EN       (0X00000080U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_SHIFT                  (4U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_CLRMSK                 (0XFFFFFF8FU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_SHIFT                   (0U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_CLRMSK                  (0XFFFFFFFCU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_RESERVED                (00000000U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_PVR_V3_HASHING          (0X00000001U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_LINEAR                  (0X00000002U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_IN_PAGE_HASH            (0X00000003U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS
+*/
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS             (0xE220U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_MASKFULL    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK3_SHIFT (48U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK2_SHIFT (32U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK1_SHIFT (16U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK0_SHIFT (0U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES
+*/
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES            (0xE228U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_MASKFULL   (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK3_SHIFT (48U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK2_SHIFT (32U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK1_SHIFT (16U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK0_SHIFT (0U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_CTRL
+*/
+#define VHA_CR_SLC_FAULT_STOP_CTRL                        (0xE248U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_MASKFULL               (IMG_UINT64_C(0x000000000003FFFF))
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_SHIFT              (17U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_CLRMSK             (0XFFFDFFFFU)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_EN                 (0X00020000U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_SHIFT           (0U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING0
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0                     (0xE280U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_SHIFT        (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_CLRMSK       (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_SHIFT        (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_CLRMSK       (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_SHIFT        (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_SHIFT        (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_SHIFT        (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_SHIFT        (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_SHIFT        (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_SHIFT        (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_SHIFT        (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_SHIFT        (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING1
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1                     (0xE288U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_SHIFT       (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_CLRMSK      (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_SHIFT       (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_CLRMSK      (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_SHIFT       (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_SHIFT       (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_SHIFT       (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_SHIFT       (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_SHIFT       (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_SHIFT       (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_SHIFT       (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_SHIFT       (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_SLC_CCM_CTRL
+*/
+#define VHA_CR_SLC_CCM_CTRL                               (0xE300U)
+#define VHA_CR_SLC_CCM_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000FF00FF))
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_SHIFT            (16U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_SHIFT         (0U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_CLRMSK        (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_CCM_STATUS
+*/
+#define VHA_CR_SLC_CCM_STATUS                             (0xE308U)
+#define VHA_CR_SLC_CCM_STATUS_MASKFULL                    (IMG_UINT64_C(0x0FFFFFFFF10FF0FF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_SHIFT          (52U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_CLRMSK         (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_SHIFT          (44U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_CLRMSK         (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_SHIFT          (36U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_CLRMSK         (IMG_UINT64_C(0XFFFFF00FFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_SHIFT          (28U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_CLRMSK         (IMG_UINT64_C(0XFFFFFFF00FFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_SHIFT              (24U)
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_EN                 (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_SHIFT         (12U)
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_SHIFT        (0U)
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register VHA_CR_ACE_QOS_CTRL
+*/
+#define VHA_CR_ACE_QOS_CTRL                               (0xE310U)
+#define VHA_CR_ACE_QOS_CTRL_MASKFULL                      (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_SHIFT                (12U)
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_CLRMSK               (0XFFFF0FFFU)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_SHIFT                    (8U)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_CLRMSK                   (0XFFFFF0FFU)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_SHIFT                  (4U)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_CLRMSK                 (0XFFFFFF0FU)
+#define VHA_CR_ACE_QOS_CTRL_LOW_SHIFT                     (0U)
+#define VHA_CR_ACE_QOS_CTRL_LOW_CLRMSK                    (0XFFFFFFF0U)
+
+
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MASK (0x00000003U)
+/*
+Low */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_LOW (0x00000000U)
+/*
+Medium */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_HIGH (0x00000002U)
+/*
+Critical */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register VHA_CR_ACE_PRIORITY_MAPPING_CTRL
+*/
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL                  (0xE318U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MASKFULL         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_SHIFT        (62U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_MEDIUM       (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_HIGH         (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CRITICAL     (IMG_UINT64_C(0xc000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_SHIFT   (26U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_CLRMSK  (IMG_UINT64_C(0XC000000003FFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_SHIFT    (24U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_MEDIUM   (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_HIGH     (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CRITICAL (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_SHIFT     (22U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_MEDIUM    (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_HIGH      (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CRITICAL  (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_SHIFT     (20U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_MEDIUM    (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_HIGH      (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CRITICAL  (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_SHIFT        (18U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_MEDIUM       (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_HIGH         (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CRITICAL     (IMG_UINT64_C(0x00000000000c0000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_SHIFT    (16U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_MEDIUM   (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_HIGH     (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CRITICAL (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_SHIFT       (14U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_MEDIUM      (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_HIGH        (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CRITICAL    (IMG_UINT64_C(0x000000000000c000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_SHIFT       (12U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_MEDIUM      (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_HIGH        (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CRITICAL    (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_SHIFT     (10U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_MEDIUM    (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_HIGH      (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CRITICAL  (IMG_UINT64_C(0x0000000000000c00))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_SHIFT      (8U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_MEDIUM     (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_HIGH       (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CRITICAL   (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_SHIFT      (6U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_MEDIUM     (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_HIGH       (IMG_UINT64_C(0x0000000000000080))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CRITICAL   (IMG_UINT64_C(0x00000000000000c0))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_SHIFT      (4U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_MEDIUM     (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_HIGH       (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CRITICAL   (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_SHIFT      (2U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_MEDIUM     (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_HIGH       (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CRITICAL   (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_SHIFT      (0U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_MEDIUM     (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_HIGH       (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CRITICAL   (IMG_UINT64_C(0x0000000000000003))  
+
+
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x00000006U)
+/*
+Write-through Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_WRITE_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x00000007U)
+/*
+Write-back Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_WRITE_ALLOCATE (0x0000000fU)
+
+
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x0000000aU)
+/*
+Write-through Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_READ_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x0000000bU)
+/*
+Write-back Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_READ_ALLOCATE (0x0000000fU)
+
+
+/*
+Non-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_NON_SHAREABLE    (0x00000000U)
+/*
+System */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_SYSTEM           (0x00000001U)
+
+
+/*
+Inner-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_INNER_SHAREABLE   (0x00000000U)
+/*
+Outer-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_OUTER_SHAREABLE   (0x00000001U)
+
+
+/*
+    Register VHA_CR_ACE_CTRL
+*/
+#define VHA_CR_ACE_CTRL                                   (0xE320U)
+#define VHA_CR_ACE_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000007FCFFF))
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_SHIFT                   (19U)
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_CLRMSK                  (0XFF87FFFFU)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT              (15U)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK             (0XFFF87FFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN    (0X00004000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_SHIFT                 (8U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK                (0XFFFFF0FFU)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE     (0X00000100U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000200U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE  (0X00000300U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0X00000600U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0X00000E00U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0X00000700U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0X00000F00U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_SHIFT                 (4U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK                (0XFFFFFF0FU)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE     (0X00000010U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000020U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE  (0X00000030U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0X000000A0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0X000000E0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0X000000B0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0X000000F0U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_SHIFT                  (2U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK                 (0XFFFFFFF3U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_SHIFT                  (1U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_CLRMSK                 (0XFFFFFFFDU)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE        (00000000U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE        (0X00000002U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT              (0U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE      (00000000U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM             (0X00000001U)
+
+
+/*
+    Register VHA_CR_ACE_STATUS
+*/
+#define VHA_CR_ACE_STATUS                                 (0xE330U)
+#define VHA_CR_ACE_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_SHIFT             (28U)
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_CLRMSK            (0X0FFFFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_SHIFT             (24U)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_CLRMSK            (0XF0FFFFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_SHIFT             (20U)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_CLRMSK            (0XFF0FFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_SHIFT             (16U)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_CLRMSK            (0XFFF0FFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_SHIFT             (12U)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_CLRMSK            (0XFFFF0FFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_SHIFT             (8U)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_CLRMSK            (0XFFFFF0FFU)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_SHIFT             (4U)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_CLRMSK            (0XFFFFFF0FU)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_SHIFT             (0U)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_CLRMSK            (0XFFFFFFF0U)
+
+
+#define VHA_CR_SOC_AXI_ENUM_COH_MASK                      (0x00000003U)
+/*
+The SoC does not support any form of Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_NO_COHERENCY              (0x00000000U)
+/*
+The SoC supports ACE-Lite or I/O Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_ACE_LITE_COHERENCY        (0x00000001U)
+/*
+The SoC supports full ACE or 2-Way Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_FULL_ACE_COHERENCY        (0x00000002U)
+
+
+/*
+    Register VHA_CR_SOC_AXI
+*/
+#define VHA_CR_SOC_AXI                                    (0xE338U)
+#define VHA_CR_SOC_AXI_MASKFULL                           (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000008U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000004U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT            (0U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK           (0XFFFFFFFCU)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY     (00000000U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0X00000001U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0X00000002U)
+
+
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_MASK            (0x00000003U)
+/*
+Addresses interleaved between Cache Banks using a weaved XOR hash of address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_WEAVED_HASH     (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING  (0x00000001U)
+
+
+/*
+    Register VHA_CR_L1_GLB_CTRL
+*/
+#define VHA_CR_L1_GLB_CTRL                                (0xE400U)
+#define VHA_CR_L1_GLB_CTRL_MASKFULL                       (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_SHIFT                (0U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_WEAVED_HASH          (00000000U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_PVR_V3_HASHING       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING2
+*/
+#define VHA_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define VHA_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING3
+*/
+#define VHA_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define VHA_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_FIX
+*/
+#define VHA_CR_SLC_FIX                                    (0xF0D8U)
+#define VHA_CR_SLC_FIX_MASKFULL                           (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_FIX_DISABLE_SHIFT                      (0U)
+#define VHA_CR_SLC_FIX_DISABLE_CLRMSK                     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_PWR_MAN_HYSTERESIS
+*/
+#define VHA_CR_PWR_MAN_HYSTERESIS                         (0xF100U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_SHIFT             (0U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_CLRMSK            (0XFFFFFFE0U)
+
+
+#define VHA_CR_CNN_DEBUG_CTRL_MASK                        (0x00000003U)
+/*
+Debug is switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_DISABLE                     (0x00000000U)
+/*
+Debug is output at the end of each stream */
+#define VHA_CR_CNN_DEBUG_CTRL_STREAM                      (0x00000001U)
+/*
+Debug is output at the end of each layer */
+#define VHA_CR_CNN_DEBUG_CTRL_LAYER                       (0x00000002U)
+/*
+Debug is output at the end of each pass */
+#define VHA_CR_CNN_DEBUG_CTRL_PASS                        (0x00000003U)
+
+
+#define VHA_CR_CNN_PRELOAD_CTRL_MASK                      (0x00000007U)
+/*
+Preloads are switched off */
+#define VHA_CR_CNN_PRELOAD_CTRL_DISABLE                   (0x00000000U)
+/*
+Preloads are triggered 64 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_64                      (0x00000001U)
+/*
+Preloads are triggered 128 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_128                     (0x00000002U)
+/*
+Preloads are triggered 192 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_192                     (0x00000003U)
+/*
+Preloads are triggered 256 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_256                     (0x00000004U)
+/*
+Preloads are triggered 320 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_320                     (0x00000005U)
+/*
+Preloads are triggered 384 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_384                     (0x00000006U)
+/*
+Preloads are triggered 448 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_448                     (0x00000007U)
+
+
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_SHIFT             (21U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_CLRMSK            (0XFFDFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_READY_EN                (0X00200000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_SHIFT             (20U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_CLRMSK            (0XFFEFFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_ERROR_EN                (0X00100000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_SHIFT            (19U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_CLRMSK           (0XFFF7FFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_HL_WDT_EN               (0X00080000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_SHIFT         (18U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_CLRMSK        (0XFFFBFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_EN            (0X00040000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_SHIFT    (16U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_CLRMSK   (0XFFFEFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_EN       (0X00010000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_SHIFT      (3U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_CLRMSK     (0XFFFFFFF7U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_EN         (0X00000008U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_SHIFT        (1U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_EN           (0X00000002U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_SHIFT     (0U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_CLRMSK    (0XFFFFFFFEU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_EN        (0X00000001U)
+
+
+/*
+Memory buffer will be used for MODEL only (CBUF, CMD , DEBUG, PERF) */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_MODEL_ONLY               (0x00000000U)
+/*
+Memory buffer will be used for IO Only( OUTPACK , IBUF , ABUF, EWO ) or Both MODEL and IO */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_IO_OR_SHARED             (0x00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CONTROL                            (0x10000U)
+#define VHA_CR_OS0_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS0_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS0_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS0_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS0_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS
+*/
+#define VHA_CR_OS0_CNN_STATUS                             (0x10008U)
+#define VHA_CR_OS0_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C0FFFFFF))
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS0_CNN_STATUS_PASS_COUNT_SHIFT            (16U)
+#define VHA_CR_OS0_CNN_STATUS_PASS_COUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFFFF00FFU)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS                   (0x10020U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (8U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED                   (0x10038U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0                       (0x10040U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1                       (0x10048U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2                       (0x10050U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3                       (0x10058U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4                       (0x10060U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5                       (0x10068U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6                       (0x10070U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7                       (0x10078U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL                  (0x10080U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS0_VHA_EVENT_ENABLE                       (0x10088U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS0_VHA_EVENT_STATUS                       (0x10090U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS0_VHA_EVENT_CLEAR                        (0x10098U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CRC_CONTROL                        (0x10100U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CRC_ADDRESS                        (0x10108U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (8U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS                      (0x10110U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (8U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS0_CNN_DEBUG_SIZE                         (0x10118U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL                      (0x10120U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_STATUS                       (0x10128U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL                    (0x10130U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8                       (0x10140U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9                       (0x10148U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10                      (0x10150U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11                      (0x10158U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12                      (0x10160U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13                      (0x10168U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14                      (0x10170U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15                      (0x10178U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS0_CNN_PERFORMANCE                        (0x101A0U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL                         (0x1E000U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS                  (0x1E038U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT              (0x1E008U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING                      (0x1E010U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS1                      (0x1E018U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS2                      (0x1E020U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS0_MMU_CTRL_LEGACY                        (0x1E040U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CONTROL
+*/
+#define VHA_CR_OS1_CNN_CONTROL                            (0x20000U)
+#define VHA_CR_OS1_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS1_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS1_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS1_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS1_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS1_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS1_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS1_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS1_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS1_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS1_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS1_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS1_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_STATUS
+*/
+#define VHA_CR_OS1_CNN_STATUS                             (0x20008U)
+#define VHA_CR_OS1_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C0FFFFFF))
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS1_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS1_CNN_STATUS_PASS_COUNT_SHIFT            (16U)
+#define VHA_CR_OS1_CNN_STATUS_PASS_COUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_OS1_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS1_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFFFF00FFU)
+#define VHA_CR_OS1_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS1_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS                   (0x20020U)
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (8U)
+#define VHA_CR_OS1_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED                   (0x20038U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0                       (0x20040U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1                       (0x20048U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2                       (0x20050U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3                       (0x20058U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4                       (0x20060U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5                       (0x20068U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6                       (0x20070U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7                       (0x20078U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL                  (0x20080U)
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS1_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS1_VHA_EVENT_ENABLE                       (0x20088U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS1_VHA_EVENT_STATUS                       (0x20090U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS1_VHA_EVENT_CLEAR                        (0x20098U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS1_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS1_CNN_CRC_CONTROL                        (0x20100U)
+#define VHA_CR_OS1_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS1_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_CRC_ADDRESS                        (0x20108U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (8U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS1_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS                      (0x20110U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (8U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS1_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS1_CNN_DEBUG_SIZE                         (0x20118U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS1_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL                      (0x20120U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS1_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS1_CNN_DEBUG_STATUS                       (0x20128U)
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS1_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS1_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL                    (0x20130U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS1_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8                       (0x20140U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9                       (0x20148U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10                      (0x20150U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11                      (0x20158U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12                      (0x20160U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13                      (0x20168U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14                      (0x20170U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15                      (0x20178U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS1_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS1_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS1_CNN_PERFORMANCE                        (0x201A0U)
+#define VHA_CR_OS1_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS1_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS1_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS1_MMU_CTRL_INVAL                         (0x2E000U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS                  (0x2E038U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT              (0x2E008U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS1_MMU_CBASE_MAPPING                      (0x2E010U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS1_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS1_MMU_FAULT_STATUS1                      (0x2E018U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS1_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS1_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS1_MMU_FAULT_STATUS2                      (0x2E020U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS1_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS1_MMU_CTRL_LEGACY                        (0x2E040U)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CONTROL
+*/
+#define VHA_CR_OS2_CNN_CONTROL                            (0x30000U)
+#define VHA_CR_OS2_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000337F))
+#define VHA_CR_OS2_CNN_CONTROL_CTXT_PASID_SHIFT           (12U)
+#define VHA_CR_OS2_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFFFCFFFU)
+#define VHA_CR_OS2_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS2_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS2_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS2_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS2_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS2_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS2_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+#define VHA_CR_OS2_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS2_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS2_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_STATUS
+*/
+#define VHA_CR_OS2_CNN_STATUS                             (0x30008U)
+#define VHA_CR_OS2_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C0FFFFFF))
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS2_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS2_CNN_STATUS_PASS_COUNT_SHIFT            (16U)
+#define VHA_CR_OS2_CNN_STATUS_PASS_COUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_OS2_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS2_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFFFF00FFU)
+#define VHA_CR_OS2_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS2_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS                   (0x30020U)
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (8U)
+#define VHA_CR_OS2_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED                   (0x30038U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0                       (0x30040U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1                       (0x30048U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2                       (0x30050U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3                       (0x30058U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4                       (0x30060U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5                       (0x30068U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6                       (0x30070U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7                       (0x30078U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL                  (0x30080U)
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS2_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS2_VHA_EVENT_ENABLE                       (0x30088U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS2_VHA_EVENT_STATUS                       (0x30090U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_SHIFT       (21U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_CLRMSK      (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_READY_EN          (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_SHIFT       (20U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_CLRMSK      (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_ERROR_EN          (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_SHIFT      (19U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_CLRMSK     (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_HL_WDT_EN         (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS2_VHA_EVENT_CLEAR                        (0x30098U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x00000000003D000B))
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_SHIFT        (21U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_CLRMSK       (0XFFDFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_READY_EN           (0X00200000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_SHIFT        (20U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_CLRMSK       (0XFFEFFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_ERROR_EN           (0X00100000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_SHIFT       (19U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_CLRMSK      (0XFFF7FFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_HL_WDT_EN          (0X00080000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS2_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS2_CNN_CRC_CONTROL                        (0x30100U)
+#define VHA_CR_OS2_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS2_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_CRC_ADDRESS                        (0x30108U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (8U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS2_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS                      (0x30110U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (8U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS2_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS2_CNN_DEBUG_SIZE                         (0x30118U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (5U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS2_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL                      (0x30120U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS2_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS2_CNN_DEBUG_STATUS                       (0x30128U)
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS2_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS2_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL                    (0x30130U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS2_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8                       (0x30140U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9                       (0x30148U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10                      (0x30150U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11                      (0x30158U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12                      (0x30160U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13                      (0x30168U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14                      (0x30170U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15                      (0x30178U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS2_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS2_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS2_CNN_PERFORMANCE                        (0x301A0U)
+#define VHA_CR_OS2_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS2_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS2_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS2_MMU_CTRL_INVAL                         (0x3E000U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS                  (0x3E038U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT              (0x3E008U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS2_MMU_CBASE_MAPPING                      (0x3E010U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS2_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS2_MMU_FAULT_STATUS1                      (0x3E018U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS2_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS2_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS2_MMU_FAULT_STATUS2                      (0x3E020U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS2_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS2_MMU_CTRL_LEGACY                        (0x3E040U)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_MASK               (0x00000007U)
+/*
+Unprivileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_DATA (0x00000000U)
+/*
+Privileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_DATA (0x00000001U)
+/*
+Unprivileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_DATA (0x00000002U)
+/*
+Privileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_DATA (0x00000003U)
+/*
+Unprivileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_INSTRUCTION (0x00000004U)
+/*
+Privileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_INSTRUCTION (0x00000005U)
+/*
+Unprivileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_INSTRUCTION (0x00000006U)
+/*
+Privileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_INSTRUCTION (0x00000007U)
+
+
+/*
+    Register VHA_CR_ACE_PROT_CTRL
+*/
+#define VHA_CR_ACE_PROT_CTRL                              (0x40000U)
+#define VHA_CR_ACE_PROT_CTRL_MASKFULL                     (IMG_UINT64_C(0x0707070707070707))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_SHIFT                  (56U)
+#define VHA_CR_ACE_PROT_CTRL_OSID7_CLRMSK                 (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0300000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0500000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0600000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0700000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_SHIFT                  (48U)
+#define VHA_CR_ACE_PROT_CTRL_OSID6_CLRMSK                 (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0003000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0005000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0006000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0007000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_SHIFT                  (40U)
+#define VHA_CR_ACE_PROT_CTRL_OSID5_CLRMSK                 (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000030000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000050000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000060000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000070000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_SHIFT                  (32U)
+#define VHA_CR_ACE_PROT_CTRL_OSID4_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000300000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000500000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000600000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000700000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_SHIFT                  (24U)
+#define VHA_CR_ACE_PROT_CTRL_OSID3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000005000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000006000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000007000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_SHIFT                  (16U)
+#define VHA_CR_ACE_PROT_CTRL_OSID2_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_SHIFT                  (8U)
+#define VHA_CR_ACE_PROT_CTRL_OSID1_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_SHIFT                  (0U)
+#define VHA_CR_ACE_PROT_CTRL_OSID0_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_REQ_CTXT_OVERRIDE
+*/
+#define VHA_CR_REQ_CTXT_OVERRIDE                          (0x40010U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_SHIFT       (2U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_EN          (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_SHIFT       (1U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_SHIFT       (0U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_EN          (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_CMD_PRIORITY_LIMITS
+*/
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS                    (0x40018U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_MASKFULL           (IMG_UINT64_C(0x000000000000019B))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_SHIFT    (7U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFE7F))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_SHIFT    (3U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_SHIFT    (0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL
+*/
+#define VHA_CR_OS0_MMU_CTRL                               (0x40020U)
+#define VHA_CR_OS0_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL
+*/
+#define VHA_CR_OS1_MMU_CTRL                               (0x40028U)
+#define VHA_CR_OS1_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL
+*/
+#define VHA_CR_OS2_MMU_CTRL                               (0x40030U)
+#define VHA_CR_OS2_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_SOCIF_BUS_SECURE
+*/
+#define VHA_CR_SOCIF_BUS_SECURE                           (0x4A100U)
+#define VHA_CR_SOCIF_BUS_SECURE_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_SHIFT              (0U)
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_BUS_SECURE_ENABLE_EN                 (0X00000001U)
+
+
+#endif /* _VHA_CR_GYRUS_H_ */
+
+/*****************************************************************************
+ End of file (vha_cr_gyrus.h)
+*****************************************************************************/
+

+ 6553 - 0
driver/include/hwdefs/vha_cr_magna.h

@@ -0,0 +1,6553 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file vha_cr_magna.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _VHA_CR_MAGNA_H_
+#define _VHA_CR_MAGNA_H_
+
+#define VHA_CR_MAGNA_REVISION 1
+
+/*
+    Register VHA_CR_TLC_WM_INDIRECT
+*/
+#define VHA_CR_TLC_WM_INDIRECT                            (0x0908U)
+#define VHA_CR_TLC_WM_INDIRECT_MASKFULL                   (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_TLC_WM_INDIRECT_ADDRESS_SHIFT              (0U)
+#define VHA_CR_TLC_WM_INDIRECT_ADDRESS_CLRMSK             (0XFFFFFFF8U)
+
+
+/*
+    Register VHA_CR_TLC_MH_CORE_INDIRECT
+*/
+#define VHA_CR_TLC_MH_CORE_INDIRECT                       (0x0900U)
+#define VHA_CR_TLC_MH_CORE_INDIRECT_MASKFULL              (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_TLC_MH_CORE_INDIRECT_MASK_SHIFT            (0U)
+#define VHA_CR_TLC_MH_CORE_INDIRECT_MASK_CLRMSK           (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_IC_CORE_INDIRECT
+*/
+#define VHA_CR_IC_CORE_INDIRECT                           (0x0900U)
+#define VHA_CR_IC_CORE_INDIRECT_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_IC_CORE_INDIRECT_MASK_SHIFT                (0U)
+#define VHA_CR_IC_CORE_INDIRECT_MASK_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CORE_CTRL_INDIRECT
+*/
+#define VHA_CR_CORE_CTRL_INDIRECT                         (0x0900U)
+#define VHA_CR_CORE_CTRL_INDIRECT_MASKFULL                (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CORE_CTRL_INDIRECT_MASK_SHIFT              (0U)
+#define VHA_CR_CORE_CTRL_INDIRECT_MASK_CLRMSK             (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CORE_MH_INDIRECT
+*/
+#define VHA_CR_CORE_MH_INDIRECT                           (0x0900U)
+#define VHA_CR_CORE_MH_INDIRECT_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CORE_MH_INDIRECT_MASK_SHIFT                (0U)
+#define VHA_CR_CORE_MH_INDIRECT_MASK_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CNN_TOP_INDIRECT
+*/
+#define VHA_CR_CNN_TOP_INDIRECT                           (0x0900U)
+#define VHA_CR_CNN_TOP_INDIRECT_MASKFULL                  (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CNN_TOP_INDIRECT_MASK_SHIFT                (0U)
+#define VHA_CR_CNN_TOP_INDIRECT_MASK_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CNN_FE_INDIRECT
+*/
+#define VHA_CR_CNN_FE_INDIRECT                            (0x0900U)
+#define VHA_CR_CNN_FE_INDIRECT_MASKFULL                   (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CNN_FE_INDIRECT_MASK_SHIFT                 (0U)
+#define VHA_CR_CNN_FE_INDIRECT_MASK_CLRMSK                (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CNN_BE_INDIRECT
+*/
+#define VHA_CR_CNN_BE_INDIRECT                            (0x0900U)
+#define VHA_CR_CNN_BE_INDIRECT_MASKFULL                   (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CNN_BE_INDIRECT_MASK_SHIFT                 (0U)
+#define VHA_CR_CNN_BE_INDIRECT_MASK_CLRMSK                (0XFFFFFF00U)
+
+
+#define VHA_CR_DOMAINSPLIT_TYPE_MASK                      (0x0000FFFFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_RESERVED_SHIFT            (9U)
+#define VHA_CR_DOMAINSPLIT_TYPE_RESERVED_CLRMSK           (0XFFFF01FFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE7_SHIFT               (8U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE7_CLRMSK              (0XFFFFFEFFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE7_EN                  (0X00000100U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE6_SHIFT               (7U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE6_CLRMSK              (0XFFFFFF7FU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE6_EN                  (0X00000080U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE5_SHIFT               (6U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE5_CLRMSK              (0XFFFFFFBFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE5_EN                  (0X00000040U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE4_SHIFT               (5U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE4_CLRMSK              (0XFFFFFFDFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE4_EN                  (0X00000020U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE3_SHIFT               (4U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE3_CLRMSK              (0XFFFFFFEFU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE3_EN                  (0X00000010U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE2_SHIFT               (3U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE2_CLRMSK              (0XFFFFFFF7U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE2_EN                  (0X00000008U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE1_SHIFT               (2U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE1_CLRMSK              (0XFFFFFFFBU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE1_EN                  (0X00000004U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE0_SHIFT               (1U)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE0_CLRMSK              (0XFFFFFFFDU)
+#define VHA_CR_DOMAINSPLIT_TYPE_CORE0_EN                  (0X00000002U)
+#define VHA_CR_DOMAINSPLIT_TYPE_TLC_SHIFT                 (0U)
+#define VHA_CR_DOMAINSPLIT_TYPE_TLC_CLRMSK                (0XFFFFFFFEU)
+#define VHA_CR_DOMAINSPLIT_TYPE_TLC_EN                    (0X00000001U)
+
+
+/*
+    Register VHA_CR_PRODUCT_ID
+*/
+#define VHA_CR_PRODUCT_ID                                 (0x0018U)
+#define VHA_CR_PRODUCT_ID_MASKFULL                        (IMG_UINT64_C(0x00000000FFFF0000))
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT            (16U)
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register VHA_CR_CORE_ID
+*/
+#define VHA_CR_CORE_ID                                    (0x0020U)
+#define VHA_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_BRANCH_ID_SHIFT                    (48U)
+#define VHA_CR_CORE_ID_BRANCH_ID_CLRMSK                   (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_VERSION_ID_SHIFT                   (32U)
+#define VHA_CR_CORE_ID_VERSION_ID_CLRMSK                  (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT     (16U)
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK    (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_CORE_ID_CONFIG_ID_SHIFT                    (0U)
+#define VHA_CR_CORE_ID_CONFIG_ID_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define VHA_CR_CORE_IP_INTEGRATOR_ID                      (0x0028U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT          (0U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CHANGELIST
+*/
+#define VHA_CR_CORE_IP_CHANGELIST                         (0x0030U)
+#define VHA_CR_CORE_IP_CHANGELIST_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_SHIFT             (0U)
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CONFIG
+*/
+#define VHA_CR_CORE_IP_CONFIG                             (0x0038U)
+#define VHA_CR_CORE_IP_CONFIG_MASKFULL                    (IMG_UINT64_C(0x00000000000FFF01))
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_SHIFT (19U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_CLRMSK (0XFFF7FFFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SAFETY_EN (0X00080000U)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_SHIFT          (18U)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_CLRMSK         (0XFFFBFFFFU)
+#define VHA_CR_CORE_IP_CONFIG_OCM_SECURITY_EN             (0X00040000U)
+#define VHA_CR_CORE_IP_CONFIG_ECC_RAMS_SHIFT              (16U)
+#define VHA_CR_CORE_IP_CONFIG_ECC_RAMS_CLRMSK             (0XFFFCFFFFU)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_SHIFT      (15U)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_CLRMSK     (0XFFFF7FFFU)
+#define VHA_CR_CORE_IP_CONFIG_PARITY_REGISTERS_EN         (0X00008000U)
+#define VHA_CR_CORE_IP_CONFIG_MMU_VERSION_SHIFT           (12U)
+#define VHA_CR_CORE_IP_CONFIG_MMU_VERSION_CLRMSK          (0XFFFF8FFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_SHIFT (11U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_EN (0X00000800U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_SHIFT (10U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_EN (0X00000400U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_EN (0X00000200U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_SHIFT         (8U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_CLRMSK        (0XFFFFFEFFU)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_EN            (0X00000100U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_SHIFT         (0U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_EN            (0X00000001U)
+
+
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MASK             (0x00000007U)
+/*
+8-byte memory interface */
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MEM_WORD_8_BYTES (0x00000000U)
+/*
+16-byte memory interface */
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MEM_WORD_16_BYTES (0x00000001U)
+/*
+32-byte memory interface */
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MEM_WORD_32_BYTES (0x00000002U)
+/*
+64-byte memory interface */
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MEM_WORD_64_BYTES (0x00000003U)
+/*
+128-byte memory interface */
+#define VHA_CR_CORE_IP_CONFIG1_BUS_WIDTH_MEM_WORD_128_BYTES (0x00000004U)
+
+
+/*
+    Register VHA_CR_CORE_IP_CONFIG1
+*/
+#define VHA_CR_CORE_IP_CONFIG1                            (0x0040U)
+#define VHA_CR_CORE_IP_CONFIG1_MASKFULL                   (IMG_UINT64_C(0x00001FFFCFFF9FFF))
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_SHIFT    (42U)
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_CLRMSK   (IMG_UINT64_C(0XFFFFE3FFFFFFFFFF))
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_MEM_WORD_8_BYTES (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_MEM_WORD_16_BYTES (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_MEM_WORD_32_BYTES (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_MEM_WORD_64_BYTES (IMG_UINT64_C(0x00000c0000000000))  
+#define VHA_CR_CORE_IP_CONFIG1_EXT_MEM_BUS_WIDTH_MEM_WORD_128_BYTES (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_SUBBANKS_LOG2_SHIFT (39U)
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_SUBBANKS_LOG2_CLRMSK (IMG_UINT64_C(0XFFFFFC7FFFFFFFFF))
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_BANK_ARRAYS_MIN1_SHIFT (36U)
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_BANK_ARRAYS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_BANK_GROUPS_MIN1_SHIFT (33U)
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_NUM_BANK_GROUPS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFF1FFFFFFFF))
+#define VHA_CR_CORE_IP_CONFIG1_NUM_CORES_MIN1_SHIFT       (30U)
+#define VHA_CR_CORE_IP_CONFIG1_NUM_CORES_MIN1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFE3FFFFFFF))
+#define VHA_CR_CORE_IP_CONFIG1_CORE_OCM_RAM_SIZE_4KB_SHIFT (15U)
+#define VHA_CR_CORE_IP_CONFIG1_CORE_OCM_RAM_SIZE_4KB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0007FFF))
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_RAM_SIZE_4KB_SHIFT (0U)
+#define VHA_CR_CORE_IP_CONFIG1_SYS_OCM_RAM_SIZE_4KB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFE000))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG0
+*/
+#define VHA_CR_CNN_IP_CONFIG0                             (0x0068U)
+#define VHA_CR_CNN_IP_CONFIG0_MASKFULL                    (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CMD_CRC_FOOTER_EN   (IMG_UINT64_C(0X0000000010000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_SHIFT (27U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MEM_REQ_PRIORITISATION_EN (IMG_UINT64_C(0X0000000008000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_SHIFT (26U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PIPELINE_ORDER_XBAR_EN (IMG_UINT64_C(0X0000000004000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_SHIFT (25U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DEPTHWISE_POOLING_ENGINE_EN (IMG_UINT64_C(0X0000000002000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_PARALLEL_MMM_SUPPORTED_EN (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_SHIFT  (23U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMU_PRELOADS_EN     (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_SHIFT (22U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MMM_SUPPORTED_EN    (IMG_UINT64_C(0X0000000000400000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_SHIFT (21U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SCHEDULING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000200000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_SHIFT      (20U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_SECURITY_EN         (IMG_UINT64_C(0X0000000000100000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_SHIFT (19U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CBUF_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000080000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_SHIFT (18U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MIN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000040000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_SHIFT (17U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_UNSIGNED_SUPPORTED_EN (IMG_UINT64_C(0X0000000000020000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_MIRROR_PADDING_SUPPORTED_EN (IMG_UINT64_C(0X0000000000010000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_SHIFT (15U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_EWO_BROADCAST_SUPPORTED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_SHIFT (14U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_HALF_16BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_SHIFT (13U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_SHIFT (11U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_SHIFT (10U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_SHIFT (7U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_SHIFT (6U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_SHIFT (5U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_SHIFT (3U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_SHIFT (2U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_SHIFT (1U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_EN   (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG1
+*/
+#define VHA_CR_CNN_IP_CONFIG1                             (0x0070U)
+#define VHA_CR_CNN_IP_CONFIG1_MASKFULL                    (IMG_UINT64_C(0xFFFFFFFF3F0FFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ADDRESS_ALIGNMENT_BYTES_LOG2_SHIFT (60U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ADDRESS_ALIGNMENT_BYTES_LOG2_CLRMSK (IMG_UINT64_C(0X0FFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_SHIFT (52U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_PER_SET_MIN1_CLRMSK (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_SHIFT (39U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_MAX_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFF0007FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_SHIFT (37U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_PRIORITY_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFF9FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_SHIFT (32U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF00F))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_SCHEDULING_NUM_HOSTS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG2
+*/
+#define VHA_CR_CNN_IP_CONFIG2                             (0x0078U)
+#define VHA_CR_CNN_IP_CONFIG2_MASKFULL                    (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_CONCAT_MIN1_SHIFT (51U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_CONCAT_MIN1_CLRMSK (IMG_UINT64_C(0XFF07FFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_SPLIT_MIN1_SHIFT (48U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_TENSOR_SPLIT_MIN1_CLRMSK (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_BANKS_MIN1_SHIFT (44U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_MAX_FILTERS_WITH_BIAS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFF003FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC0FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_SHIFT (20U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0000F))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG3
+*/
+#define VHA_CR_CNN_IP_CONFIG3                             (0x0080U)
+#define VHA_CR_CNN_IP_CONFIG3_MASKFULL                    (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_SHIFT (38U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L1_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_SHIFT (34U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_PARALLELISM_L0_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFC3FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_SHIFT (29U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFC1FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_MMM_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFE0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+#define VHA_CR_SYS_CLK_CTRL0_MODE_MASK                    (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_OFF                     (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_ON                      (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_AUTO                    (0x00000002U)
+
+
+#define VHA_CR_SYS_CLK_CTRL0_MODE_NOTOFF_MASK             (0x00000003U)
+/*
+The domain clock is forced on */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_NOTOFF_ON               (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_NOTOFF_AUTO             (0x00000002U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_CTRL0
+*/
+#define VHA_CR_SYS_CLK_CTRL0                              (0x0200U)
+#define VHA_CR_SYS_CLK_CTRL0_MASKFULL                     (IMG_UINT64_C(0xFFFFFFFFFFFF0FFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE7_SHIFT                  (62U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE7_CLRMSK                 (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE7_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE7_ON                     (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE7_AUTO                   (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE6_SHIFT                  (60U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE6_CLRMSK                 (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE6_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE6_ON                     (IMG_UINT64_C(0x1000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE6_AUTO                   (IMG_UINT64_C(0x2000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE5_SHIFT                  (58U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE5_CLRMSK                 (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE5_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE5_ON                     (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE5_AUTO                   (IMG_UINT64_C(0x0800000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE4_SHIFT                  (56U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE4_CLRMSK                 (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE4_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE4_ON                     (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE4_AUTO                   (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE3_SHIFT                  (54U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE3_CLRMSK                 (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE3_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE3_ON                     (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE3_AUTO                   (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE2_SHIFT                  (52U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE2_CLRMSK                 (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE2_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE2_ON                     (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE2_AUTO                   (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE1_SHIFT                  (50U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE1_CLRMSK                 (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE1_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE1_ON                     (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE1_AUTO                   (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE0_SHIFT                  (48U)
+#define VHA_CR_SYS_CLK_CTRL0_CORE0_CLRMSK                 (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_CORE0_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE0_ON                     (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_CORE0_AUTO                   (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC7_SHIFT                   (46U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC7_CLRMSK                  (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC7_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC7_ON                      (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC7_AUTO                    (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC6_SHIFT                   (44U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC6_CLRMSK                  (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC6_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC6_ON                      (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC6_AUTO                    (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC5_SHIFT                   (42U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC5_CLRMSK                  (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC5_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC5_ON                      (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC5_AUTO                    (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC4_SHIFT                   (40U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC4_CLRMSK                  (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC4_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC4_ON                      (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC4_AUTO                    (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC3_SHIFT                   (38U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC3_CLRMSK                  (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC3_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC3_ON                      (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC3_AUTO                    (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC2_SHIFT                   (36U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC2_CLRMSK                  (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC2_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC2_ON                      (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC2_AUTO                    (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC1_SHIFT                   (34U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC1_CLRMSK                  (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC1_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC1_ON                      (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC1_AUTO                    (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC0_SHIFT                   (32U)
+#define VHA_CR_SYS_CLK_CTRL0_NOC0_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_NOC0_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC0_ON                      (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_SYS_CLK_CTRL0_NOC0_AUTO                    (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM7_SHIFT                    (30U)
+#define VHA_CR_SYS_CLK_CTRL0_WM7_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM7_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM7_ON                       (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM7_AUTO                     (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM6_SHIFT                    (28U)
+#define VHA_CR_SYS_CLK_CTRL0_WM6_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM6_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM6_ON                       (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM6_AUTO                     (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM5_SHIFT                    (26U)
+#define VHA_CR_SYS_CLK_CTRL0_WM5_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM5_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM5_ON                       (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM5_AUTO                     (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM4_SHIFT                    (24U)
+#define VHA_CR_SYS_CLK_CTRL0_WM4_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM4_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM4_ON                       (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM4_AUTO                     (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM3_SHIFT                    (22U)
+#define VHA_CR_SYS_CLK_CTRL0_WM3_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM3_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM3_ON                       (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM3_AUTO                     (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM2_SHIFT                    (20U)
+#define VHA_CR_SYS_CLK_CTRL0_WM2_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM2_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM2_ON                       (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM2_AUTO                     (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM1_SHIFT                    (18U)
+#define VHA_CR_SYS_CLK_CTRL0_WM1_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM1_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM1_ON                       (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM1_AUTO                     (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM0_SHIFT                    (16U)
+#define VHA_CR_SYS_CLK_CTRL0_WM0_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define VHA_CR_SYS_CLK_CTRL0_WM0_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM0_ON                       (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_SYS_CLK_CTRL0_WM0_AUTO                     (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_SYS_CLK_CTRL0_INTERCONNECT_SHIFT           (10U)
+#define VHA_CR_SYS_CLK_CTRL0_INTERCONNECT_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_SYS_CLK_CTRL0_INTERCONNECT_OFF             (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_INTERCONNECT_ON              (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_SYS_CLK_CTRL0_INTERCONNECT_AUTO            (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_SYS_CLK_CTRL0_AXI_SHIFT                    (8U)
+#define VHA_CR_SYS_CLK_CTRL0_AXI_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_SYS_CLK_CTRL0_AXI_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_AXI_ON                       (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_SYS_CLK_CTRL0_AXI_AUTO                     (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_SHIFT                    (6U)
+#define VHA_CR_SYS_CLK_CTRL0_SLC_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_ON                       (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_AUTO                     (IMG_UINT64_C(0x0000000000000080))  
+#define VHA_CR_SYS_CLK_CTRL0_LSYNC_SHIFT                  (4U)
+#define VHA_CR_SYS_CLK_CTRL0_LSYNC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_SYS_CLK_CTRL0_LSYNC_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_LSYNC_ON                     (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_SYS_CLK_CTRL0_LSYNC_AUTO                   (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_SYS_CLK_CTRL0_SOCM_SHIFT                   (2U)
+#define VHA_CR_SYS_CLK_CTRL0_SOCM_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define VHA_CR_SYS_CLK_CTRL0_SOCM_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_SOCM_ON                      (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_SYS_CLK_CTRL0_SOCM_AUTO                    (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_SYS_CLK_CTRL0_REGBANK_SHIFT                (0U)
+#define VHA_CR_SYS_CLK_CTRL0_REGBANK_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_SYS_CLK_CTRL0_REGBANK_ON                   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_SYS_CLK_CTRL0_REGBANK_AUTO                 (IMG_UINT64_C(0x0000000000000002))  
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_GATED                 (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_RUNNING               (0x00000001U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_STATUS0
+*/
+#define VHA_CR_SYS_CLK_STATUS0                            (0x0208U)
+#define VHA_CR_SYS_CLK_STATUS0_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFF3F))
+#define VHA_CR_SYS_CLK_STATUS0_CORE7_SHIFT                (31U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE7_CLRMSK               (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE7_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE7_RUNNING              (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE6_SHIFT                (30U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE6_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE6_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE6_RUNNING              (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE5_SHIFT                (29U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE5_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE5_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE5_RUNNING              (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE4_SHIFT                (28U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE4_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE4_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE4_RUNNING              (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE3_SHIFT                (27U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE3_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE3_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE3_RUNNING              (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE2_SHIFT                (26U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE2_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE2_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE2_RUNNING              (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE1_SHIFT                (25U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE1_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE1_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE1_RUNNING              (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE0_SHIFT                (24U)
+#define VHA_CR_SYS_CLK_STATUS0_CORE0_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_CORE0_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_CORE0_RUNNING              (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC7_SHIFT                 (23U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC7_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC7_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC7_RUNNING               (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC6_SHIFT                 (22U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC6_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC6_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC6_RUNNING               (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC5_SHIFT                 (21U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC5_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC5_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC5_RUNNING               (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC4_SHIFT                 (20U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC4_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC4_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC4_RUNNING               (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC3_SHIFT                 (19U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC3_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC3_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC3_RUNNING               (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC2_SHIFT                 (18U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC2_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC2_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC2_RUNNING               (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC1_SHIFT                 (17U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC1_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC1_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC1_RUNNING               (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC0_SHIFT                 (16U)
+#define VHA_CR_SYS_CLK_STATUS0_NOC0_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_SYS_CLK_STATUS0_NOC0_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_NOC0_RUNNING               (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM7_SHIFT                  (15U)
+#define VHA_CR_SYS_CLK_STATUS0_WM7_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM7_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM7_RUNNING                (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM6_SHIFT                  (14U)
+#define VHA_CR_SYS_CLK_STATUS0_WM6_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM6_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM6_RUNNING                (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM5_SHIFT                  (13U)
+#define VHA_CR_SYS_CLK_STATUS0_WM5_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM5_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM5_RUNNING                (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM4_SHIFT                  (12U)
+#define VHA_CR_SYS_CLK_STATUS0_WM4_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM4_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM4_RUNNING                (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM3_SHIFT                  (11U)
+#define VHA_CR_SYS_CLK_STATUS0_WM3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_SYS_CLK_STATUS0_WM3_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM3_RUNNING                (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_SYS_CLK_STATUS0_WM2_SHIFT                  (10U)
+#define VHA_CR_SYS_CLK_STATUS0_WM2_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM2_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM2_RUNNING                (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_SYS_CLK_STATUS0_WM1_SHIFT                  (9U)
+#define VHA_CR_SYS_CLK_STATUS0_WM1_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM1_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM1_RUNNING                (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_SYS_CLK_STATUS0_WM0_SHIFT                  (8U)
+#define VHA_CR_SYS_CLK_STATUS0_WM0_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_SYS_CLK_STATUS0_WM0_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_WM0_RUNNING                (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_SYS_CLK_STATUS0_INTERCONNECT_SHIFT         (5U)
+#define VHA_CR_SYS_CLK_STATUS0_INTERCONNECT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_SYS_CLK_STATUS0_INTERCONNECT_GATED         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_INTERCONNECT_RUNNING       (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_SYS_CLK_STATUS0_AXI_SHIFT                  (4U)
+#define VHA_CR_SYS_CLK_STATUS0_AXI_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_SYS_CLK_STATUS0_AXI_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_AXI_RUNNING                (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_SYS_CLK_STATUS0_SLC_SHIFT                  (3U)
+#define VHA_CR_SYS_CLK_STATUS0_SLC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_SLC_RUNNING                (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_SYS_CLK_STATUS0_LSYNC_SHIFT                (2U)
+#define VHA_CR_SYS_CLK_STATUS0_LSYNC_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_SYS_CLK_STATUS0_LSYNC_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_LSYNC_RUNNING              (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_SYS_CLK_STATUS0_SOCM_SHIFT                 (1U)
+#define VHA_CR_SYS_CLK_STATUS0_SOCM_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_SYS_CLK_STATUS0_SOCM_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_SOCM_RUNNING               (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_SYS_CLK_STATUS0_REGBANK_SHIFT              (0U)
+#define VHA_CR_SYS_CLK_STATUS0_REGBANK_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_SYS_CLK_STATUS0_REGBANK_GATED              (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_REGBANK_RUNNING            (IMG_UINT64_C(0x0000000000000001))  
+
+
+/*
+    Register VHA_CR_SYS_RESET_CTRL
+*/
+#define VHA_CR_SYS_RESET_CTRL                             (0x0210U)
+#define VHA_CR_SYS_RESET_CTRL_MASKFULL                    (IMG_UINT64_C(0x0000000000FFFF1E))
+#define VHA_CR_SYS_RESET_CTRL_CORE_SHIFT                  (16U)
+#define VHA_CR_SYS_RESET_CTRL_CORE_CLRMSK                 (0XFF00FFFFU)
+#define VHA_CR_SYS_RESET_CTRL_WM_SHIFT                    (8U)
+#define VHA_CR_SYS_RESET_CTRL_WM_CLRMSK                   (0XFFFF00FFU)
+#define VHA_CR_SYS_RESET_CTRL_INTERCONNECT_SHIFT          (4U)
+#define VHA_CR_SYS_RESET_CTRL_INTERCONNECT_CLRMSK         (0XFFFFFFEFU)
+#define VHA_CR_SYS_RESET_CTRL_INTERCONNECT_EN             (0X00000010U)
+#define VHA_CR_SYS_RESET_CTRL_SLC_SHIFT                   (3U)
+#define VHA_CR_SYS_RESET_CTRL_SLC_CLRMSK                  (0XFFFFFFF7U)
+#define VHA_CR_SYS_RESET_CTRL_SLC_EN                      (0X00000008U)
+#define VHA_CR_SYS_RESET_CTRL_MH_SHIFT                    (2U)
+#define VHA_CR_SYS_RESET_CTRL_MH_CLRMSK                   (0XFFFFFFFBU)
+#define VHA_CR_SYS_RESET_CTRL_MH_EN                       (0X00000004U)
+#define VHA_CR_SYS_RESET_CTRL_REGBANK_SHIFT               (1U)
+#define VHA_CR_SYS_RESET_CTRL_REGBANK_CLRMSK              (0XFFFFFFFDU)
+#define VHA_CR_SYS_RESET_CTRL_REGBANK_EN                  (0X00000002U)
+
+
+/*
+    Register VHA_CR_HOST_EVENT_SOURCE
+*/
+#define VHA_CR_HOST_EVENT_SOURCE                          (0x0218U)
+#define VHA_CR_HOST_EVENT_SOURCE_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFF01))
+#define VHA_CR_HOST_EVENT_SOURCE_IC_SHIFT                 (24U)
+#define VHA_CR_HOST_EVENT_SOURCE_IC_CLRMSK                (0X00FFFFFFU)
+#define VHA_CR_HOST_EVENT_SOURCE_CORE_SHIFT               (16U)
+#define VHA_CR_HOST_EVENT_SOURCE_CORE_CLRMSK              (0XFF00FFFFU)
+#define VHA_CR_HOST_EVENT_SOURCE_WM_SHIFT                 (8U)
+#define VHA_CR_HOST_EVENT_SOURCE_WM_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_HOST_EVENT_SOURCE_SYS_SHIFT                (0U)
+#define VHA_CR_HOST_EVENT_SOURCE_SYS_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_HOST_EVENT_SOURCE_SYS_EN                   (0X00000001U)
+
+
+#define VHA_CR_SYS_EVENT_TYPE_LOGIC_ERROR_SHIFT           (30U)
+#define VHA_CR_SYS_EVENT_TYPE_LOGIC_ERROR_CLRMSK          (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_LOGIC_ERROR_EN              (0X40000000U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_CORRECTION_SHIFT        (29U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_CORRECTION_CLRMSK       (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_CORRECTION_EN           (0X20000000U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_DETECTION_SHIFT         (28U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_DETECTION_CLRMSK        (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_DETECTION_EN            (0X10000000U)
+#define VHA_CR_SYS_EVENT_TYPE_LSYNC_INV_REQ_SHIFT         (27U)
+#define VHA_CR_SYS_EVENT_TYPE_LSYNC_INV_REQ_CLRMSK        (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_LSYNC_INV_REQ_EN            (0X08000000U)
+#define VHA_CR_SYS_EVENT_TYPE_SOCM_SCRUB_DONE_SHIFT       (26U)
+#define VHA_CR_SYS_EVENT_TYPE_SOCM_SCRUB_DONE_CLRMSK      (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_SOCM_SCRUB_DONE_EN          (0X04000000U)
+#define VHA_CR_SYS_EVENT_TYPE_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_TYPE_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_SYS_MEM_WDT_SHIFT           (20U)
+#define VHA_CR_SYS_EVENT_TYPE_SYS_MEM_WDT_CLRMSK          (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_SYS_MEM_WDT_EN              (0X00100000U)
+#define VHA_CR_SYS_EVENT_TYPE_MMU_PARITY_ERROR_SHIFT      (16U)
+#define VHA_CR_SYS_EVENT_TYPE_MMU_PARITY_ERROR_CLRMSK     (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_TYPE_MMU_PAGE_FAULT_SHIFT        (8U)
+#define VHA_CR_SYS_EVENT_TYPE_MMU_PAGE_FAULT_CLRMSK       (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_TYPE_AXI_ERROR_SHIFT             (4U)
+#define VHA_CR_SYS_EVENT_TYPE_AXI_ERROR_CLRMSK            (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_INIT_DONE_SHIFT         (3U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_INIT_DONE_CLRMSK        (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_TYPE_RAM_INIT_DONE_EN            (0X00000008U)
+#define VHA_CR_SYS_EVENT_TYPE_MEMBUS_RESET_DONE_SHIFT     (2U)
+#define VHA_CR_SYS_EVENT_TYPE_MEMBUS_RESET_DONE_CLRMSK    (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_TYPE_MEMBUS_RESET_DONE_EN        (0X00000004U)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_ABORT_SHIFT           (1U)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_ABORT_CLRMSK          (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_ABORT_EN              (0X00000002U)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_COMPLETE_SHIFT        (0U)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_COMPLETE_CLRMSK       (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_TYPE_POWER_COMPLETE_EN           (0X00000001U)
+
+
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_PARITY_SHIFT         (31U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_PARITY_CLRMSK        (0X7FFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_PARITY_EN            (0X80000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LOGIC_ERROR_SHIFT    (30U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LOGIC_ERROR_CLRMSK   (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LOGIC_ERROR_EN       (0X40000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_CORRECTION_EN    (0X20000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_DETECTION_SHIFT  (28U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_DETECTION_EN     (0X10000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LSYNC_INV_REQ_SHIFT  (27U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LSYNC_INV_REQ_CLRMSK (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_LSYNC_INV_REQ_EN     (0X08000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SOCM_SCRUB_DONE_SHIFT (26U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SOCM_SCRUB_DONE_CLRMSK (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SOCM_SCRUB_DONE_EN   (0X04000000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SYS_MEM_WDT_SHIFT    (20U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SYS_MEM_WDT_CLRMSK   (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_SYS_MEM_WDT_EN       (0X00100000U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MMU_PARITY_ERROR_SHIFT (16U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MMU_PARITY_ERROR_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MMU_PAGE_FAULT_SHIFT (8U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MMU_PAGE_FAULT_CLRMSK (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_AXI_ERROR_SHIFT      (4U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_AXI_ERROR_CLRMSK     (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_INIT_DONE_SHIFT  (3U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_RAM_INIT_DONE_EN     (0X00000008U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_ABORT_SHIFT    (1U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_ABORT_CLRMSK   (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_ABORT_EN       (0X00000002U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_COMPLETE_SHIFT (0U)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_STATUS_TYPE_POWER_COMPLETE_EN    (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_ENABLE
+*/
+#define VHA_CR_SYS_EVENT_ENABLE                           (0x0220U)
+#define VHA_CR_SYS_EVENT_ENABLE_MASKFULL                  (IMG_UINT64_C(0x000000007DFFFFFF))
+#define VHA_CR_SYS_EVENT_ENABLE_LOGIC_ERROR_SHIFT         (30U)
+#define VHA_CR_SYS_EVENT_ENABLE_LOGIC_ERROR_CLRMSK        (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_LOGIC_ERROR_EN            (0X40000000U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_CORRECTION_SHIFT      (29U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_CORRECTION_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_CORRECTION_EN         (0X20000000U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_DETECTION_SHIFT       (28U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_DETECTION_CLRMSK      (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_DETECTION_EN          (0X10000000U)
+#define VHA_CR_SYS_EVENT_ENABLE_LSYNC_INV_REQ_SHIFT       (27U)
+#define VHA_CR_SYS_EVENT_ENABLE_LSYNC_INV_REQ_CLRMSK      (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_LSYNC_INV_REQ_EN          (0X08000000U)
+#define VHA_CR_SYS_EVENT_ENABLE_SOCM_SCRUB_DONE_SHIFT     (26U)
+#define VHA_CR_SYS_EVENT_ENABLE_SOCM_SCRUB_DONE_CLRMSK    (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_SOCM_SCRUB_DONE_EN        (0X04000000U)
+#define VHA_CR_SYS_EVENT_ENABLE_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_ENABLE_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_SYS_MEM_WDT_SHIFT         (20U)
+#define VHA_CR_SYS_EVENT_ENABLE_SYS_MEM_WDT_CLRMSK        (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_SYS_MEM_WDT_EN            (0X00100000U)
+#define VHA_CR_SYS_EVENT_ENABLE_MMU_PARITY_ERROR_SHIFT    (16U)
+#define VHA_CR_SYS_EVENT_ENABLE_MMU_PARITY_ERROR_CLRMSK   (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT      (8U)
+#define VHA_CR_SYS_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK     (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_ENABLE_AXI_ERROR_SHIFT           (4U)
+#define VHA_CR_SYS_EVENT_ENABLE_AXI_ERROR_CLRMSK          (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_INIT_DONE_SHIFT       (3U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_INIT_DONE_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_ENABLE_RAM_INIT_DONE_EN          (0X00000008U)
+#define VHA_CR_SYS_EVENT_ENABLE_MEMBUS_RESET_DONE_SHIFT   (2U)
+#define VHA_CR_SYS_EVENT_ENABLE_MEMBUS_RESET_DONE_CLRMSK  (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_ENABLE_MEMBUS_RESET_DONE_EN      (0X00000004U)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_ABORT_SHIFT         (1U)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_ABORT_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_ABORT_EN            (0X00000002U)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_COMPLETE_SHIFT      (0U)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_COMPLETE_CLRMSK     (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_ENABLE_POWER_COMPLETE_EN         (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_STATUS
+*/
+#define VHA_CR_SYS_EVENT_STATUS                           (0x0228U)
+#define VHA_CR_SYS_EVENT_STATUS_MASKFULL                  (IMG_UINT64_C(0x00000000FDFFFFFF))
+#define VHA_CR_SYS_EVENT_STATUS_PARITY_SHIFT              (31U)
+#define VHA_CR_SYS_EVENT_STATUS_PARITY_CLRMSK             (0X7FFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_PARITY_EN                 (0X80000000U)
+#define VHA_CR_SYS_EVENT_STATUS_LOGIC_ERROR_SHIFT         (30U)
+#define VHA_CR_SYS_EVENT_STATUS_LOGIC_ERROR_CLRMSK        (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_LOGIC_ERROR_EN            (0X40000000U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_CORRECTION_SHIFT      (29U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_CORRECTION_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_CORRECTION_EN         (0X20000000U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_DETECTION_SHIFT       (28U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_DETECTION_CLRMSK      (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_DETECTION_EN          (0X10000000U)
+#define VHA_CR_SYS_EVENT_STATUS_LSYNC_INV_REQ_SHIFT       (27U)
+#define VHA_CR_SYS_EVENT_STATUS_LSYNC_INV_REQ_CLRMSK      (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_LSYNC_INV_REQ_EN          (0X08000000U)
+#define VHA_CR_SYS_EVENT_STATUS_SOCM_SCRUB_DONE_SHIFT     (26U)
+#define VHA_CR_SYS_EVENT_STATUS_SOCM_SCRUB_DONE_CLRMSK    (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_SOCM_SCRUB_DONE_EN        (0X04000000U)
+#define VHA_CR_SYS_EVENT_STATUS_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_STATUS_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_SYS_MEM_WDT_SHIFT         (20U)
+#define VHA_CR_SYS_EVENT_STATUS_SYS_MEM_WDT_CLRMSK        (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_SYS_MEM_WDT_EN            (0X00100000U)
+#define VHA_CR_SYS_EVENT_STATUS_MMU_PARITY_ERROR_SHIFT    (16U)
+#define VHA_CR_SYS_EVENT_STATUS_MMU_PARITY_ERROR_CLRMSK   (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT      (8U)
+#define VHA_CR_SYS_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK     (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_STATUS_AXI_ERROR_SHIFT           (4U)
+#define VHA_CR_SYS_EVENT_STATUS_AXI_ERROR_CLRMSK          (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_INIT_DONE_SHIFT       (3U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_INIT_DONE_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_STATUS_RAM_INIT_DONE_EN          (0X00000008U)
+#define VHA_CR_SYS_EVENT_STATUS_MEMBUS_RESET_DONE_SHIFT   (2U)
+#define VHA_CR_SYS_EVENT_STATUS_MEMBUS_RESET_DONE_CLRMSK  (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_STATUS_MEMBUS_RESET_DONE_EN      (0X00000004U)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_ABORT_SHIFT         (1U)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_ABORT_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_ABORT_EN            (0X00000002U)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_COMPLETE_SHIFT      (0U)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_COMPLETE_CLRMSK     (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_STATUS_POWER_COMPLETE_EN         (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_STATUS_DISABLE
+*/
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE                   (0x0290U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MASKFULL          (IMG_UINT64_C(0x000000007DFFFFFF))
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LOGIC_ERROR_EN    (0X40000000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_CORRECTION_EN (0X20000000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_DETECTION_EN  (0X10000000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LSYNC_INV_REQ_SHIFT (27U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LSYNC_INV_REQ_CLRMSK (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_LSYNC_INV_REQ_EN  (0X08000000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SOCM_SCRUB_DONE_SHIFT (26U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SOCM_SCRUB_DONE_CLRMSK (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SOCM_SCRUB_DONE_EN (0X04000000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SYS_MEM_WDT_SHIFT (20U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SYS_MEM_WDT_CLRMSK (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_SYS_MEM_WDT_EN    (0X00100000U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MMU_PARITY_ERROR_SHIFT (16U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MMU_PARITY_ERROR_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MMU_PAGE_FAULT_SHIFT (8U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MMU_PAGE_FAULT_CLRMSK (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_AXI_ERROR_SHIFT   (4U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_AXI_ERROR_CLRMSK  (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_RAM_INIT_DONE_EN  (0X00000008U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_ABORT_SHIFT (1U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_ABORT_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_ABORT_EN    (0X00000002U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_COMPLETE_SHIFT (0U)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_STATUS_DISABLE_POWER_COMPLETE_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_CLEAR
+*/
+#define VHA_CR_SYS_EVENT_CLEAR                            (0x0230U)
+#define VHA_CR_SYS_EVENT_CLEAR_MASKFULL                   (IMG_UINT64_C(0x000000007DFFFFFF))
+#define VHA_CR_SYS_EVENT_CLEAR_LOGIC_ERROR_SHIFT          (30U)
+#define VHA_CR_SYS_EVENT_CLEAR_LOGIC_ERROR_CLRMSK         (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_LOGIC_ERROR_EN             (0X40000000U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_CORRECTION_SHIFT       (29U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_CORRECTION_CLRMSK      (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_CORRECTION_EN          (0X20000000U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_DETECTION_SHIFT        (28U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_DETECTION_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_DETECTION_EN           (0X10000000U)
+#define VHA_CR_SYS_EVENT_CLEAR_LSYNC_INV_REQ_SHIFT        (27U)
+#define VHA_CR_SYS_EVENT_CLEAR_LSYNC_INV_REQ_CLRMSK       (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_LSYNC_INV_REQ_EN           (0X08000000U)
+#define VHA_CR_SYS_EVENT_CLEAR_SOCM_SCRUB_DONE_SHIFT      (26U)
+#define VHA_CR_SYS_EVENT_CLEAR_SOCM_SCRUB_DONE_CLRMSK     (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_SOCM_SCRUB_DONE_EN         (0X04000000U)
+#define VHA_CR_SYS_EVENT_CLEAR_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_CLEAR_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_SYS_MEM_WDT_SHIFT          (20U)
+#define VHA_CR_SYS_EVENT_CLEAR_SYS_MEM_WDT_CLRMSK         (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_SYS_MEM_WDT_EN             (0X00100000U)
+#define VHA_CR_SYS_EVENT_CLEAR_MMU_PARITY_ERROR_SHIFT     (16U)
+#define VHA_CR_SYS_EVENT_CLEAR_MMU_PARITY_ERROR_CLRMSK    (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT       (8U)
+#define VHA_CR_SYS_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK      (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_CLEAR_AXI_ERROR_SHIFT            (4U)
+#define VHA_CR_SYS_EVENT_CLEAR_AXI_ERROR_CLRMSK           (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_INIT_DONE_SHIFT        (3U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_INIT_DONE_CLRMSK       (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_CLEAR_RAM_INIT_DONE_EN           (0X00000008U)
+#define VHA_CR_SYS_EVENT_CLEAR_MEMBUS_RESET_DONE_SHIFT    (2U)
+#define VHA_CR_SYS_EVENT_CLEAR_MEMBUS_RESET_DONE_CLRMSK   (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_CLEAR_MEMBUS_RESET_DONE_EN       (0X00000004U)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_ABORT_SHIFT          (1U)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_ABORT_CLRMSK         (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_ABORT_EN             (0X00000002U)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_COMPLETE_SHIFT       (0U)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_COMPLETE_CLRMSK      (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_CLEAR_POWER_COMPLETE_EN          (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_INJECT
+*/
+#define VHA_CR_SYS_EVENT_INJECT                           (0x0288U)
+#define VHA_CR_SYS_EVENT_INJECT_MASKFULL                  (IMG_UINT64_C(0x000000007DFFFFFF))
+#define VHA_CR_SYS_EVENT_INJECT_LOGIC_ERROR_SHIFT         (30U)
+#define VHA_CR_SYS_EVENT_INJECT_LOGIC_ERROR_CLRMSK        (0XBFFFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_LOGIC_ERROR_EN            (0X40000000U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_CORRECTION_SHIFT      (29U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_CORRECTION_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_CORRECTION_EN         (0X20000000U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_DETECTION_SHIFT       (28U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_DETECTION_CLRMSK      (0XEFFFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_DETECTION_EN          (0X10000000U)
+#define VHA_CR_SYS_EVENT_INJECT_LSYNC_INV_REQ_SHIFT       (27U)
+#define VHA_CR_SYS_EVENT_INJECT_LSYNC_INV_REQ_CLRMSK      (0XF7FFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_LSYNC_INV_REQ_EN          (0X08000000U)
+#define VHA_CR_SYS_EVENT_INJECT_SOCM_SCRUB_DONE_SHIFT     (26U)
+#define VHA_CR_SYS_EVENT_INJECT_SOCM_SCRUB_DONE_CLRMSK    (0XFBFFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_SOCM_SCRUB_DONE_EN        (0X04000000U)
+#define VHA_CR_SYS_EVENT_INJECT_AXI_MEMORY_PARITY_ERROR_SHIFT (21U)
+#define VHA_CR_SYS_EVENT_INJECT_AXI_MEMORY_PARITY_ERROR_CLRMSK (0XFE1FFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_SYS_MEM_WDT_SHIFT         (20U)
+#define VHA_CR_SYS_EVENT_INJECT_SYS_MEM_WDT_CLRMSK        (0XFFEFFFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_SYS_MEM_WDT_EN            (0X00100000U)
+#define VHA_CR_SYS_EVENT_INJECT_MMU_PARITY_ERROR_SHIFT    (16U)
+#define VHA_CR_SYS_EVENT_INJECT_MMU_PARITY_ERROR_CLRMSK   (0XFFF0FFFFU)
+#define VHA_CR_SYS_EVENT_INJECT_MMU_PAGE_FAULT_SHIFT      (8U)
+#define VHA_CR_SYS_EVENT_INJECT_MMU_PAGE_FAULT_CLRMSK     (0XFFFF00FFU)
+#define VHA_CR_SYS_EVENT_INJECT_AXI_ERROR_SHIFT           (4U)
+#define VHA_CR_SYS_EVENT_INJECT_AXI_ERROR_CLRMSK          (0XFFFFFF0FU)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_INIT_DONE_SHIFT       (3U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_INIT_DONE_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_SYS_EVENT_INJECT_RAM_INIT_DONE_EN          (0X00000008U)
+#define VHA_CR_SYS_EVENT_INJECT_MEMBUS_RESET_DONE_SHIFT   (2U)
+#define VHA_CR_SYS_EVENT_INJECT_MEMBUS_RESET_DONE_CLRMSK  (0XFFFFFFFBU)
+#define VHA_CR_SYS_EVENT_INJECT_MEMBUS_RESET_DONE_EN      (0X00000004U)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_ABORT_SHIFT         (1U)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_ABORT_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_ABORT_EN            (0X00000002U)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_COMPLETE_SHIFT      (0U)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_COMPLETE_CLRMSK     (0XFFFFFFFEU)
+#define VHA_CR_SYS_EVENT_INJECT_POWER_COMPLETE_EN         (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_THRESHOLD
+*/
+#define VHA_CR_SYS_EVENT_THRESHOLD                        (0x0238U)
+#define VHA_CR_SYS_EVENT_THRESHOLD_MASKFULL               (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SYS_EVENT_THRESHOLD_RAM_CORRECTION_SHIFT   (0U)
+#define VHA_CR_SYS_EVENT_THRESHOLD_RAM_CORRECTION_CLRMSK  (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_SYS_EVENT_THRESHOLD_VAL
+*/
+#define VHA_CR_SYS_EVENT_THRESHOLD_VAL                    (0x0240U)
+#define VHA_CR_SYS_EVENT_THRESHOLD_VAL_MASKFULL           (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SYS_EVENT_THRESHOLD_VAL_RAM_CORRECTION_SHIFT (0U)
+#define VHA_CR_SYS_EVENT_THRESHOLD_VAL_RAM_CORRECTION_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_POWER_EVENT
+*/
+#define VHA_CR_POWER_EVENT                                (0x0248U)
+#define VHA_CR_POWER_EVENT_MASKFULL                       (IMG_UINT64_C(0x0000000000FFFF03))
+#define VHA_CR_POWER_EVENT_DOMAIN_SHIFT                   (8U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CLRMSK                  (0XFF0000FFU)
+#define VHA_CR_POWER_EVENT_DOMAIN_RESERVED_SHIFT          (17U)
+#define VHA_CR_POWER_EVENT_DOMAIN_RESERVED_CLRMSK         (0xff01ffff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE7_SHIFT             (16U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE7_CLRMSK            (0xfffeffff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE6_SHIFT             (15U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE6_CLRMSK            (0xffff7fff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE5_SHIFT             (14U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE5_CLRMSK            (0xffffbfff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE4_SHIFT             (13U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE4_CLRMSK            (0xffffdfff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE3_SHIFT             (12U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE3_CLRMSK            (0xffffefff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE2_SHIFT             (11U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE2_CLRMSK            (0xfffff7ff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE1_SHIFT             (10U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE1_CLRMSK            (0xfffffbff)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE0_SHIFT             (9U)
+#define VHA_CR_POWER_EVENT_DOMAIN_CORE0_CLRMSK            (0xfffffdff)
+#define VHA_CR_POWER_EVENT_DOMAIN_TLC_SHIFT               (8U)
+#define VHA_CR_POWER_EVENT_DOMAIN_TLC_CLRMSK              (0xfffffeff)
+#define VHA_CR_POWER_EVENT_REQ_SHIFT                      (1U)
+#define VHA_CR_POWER_EVENT_REQ_CLRMSK                     (0XFFFFFFFDU)
+#define VHA_CR_POWER_EVENT_REQ_EN                         (0X00000002U)
+#define VHA_CR_POWER_EVENT_TYPE_SHIFT                     (0U)
+#define VHA_CR_POWER_EVENT_TYPE_CLRMSK                    (0XFFFFFFFEU)
+#define VHA_CR_POWER_EVENT_TYPE_POWER_DOWN                (00000000U)
+#define VHA_CR_POWER_EVENT_TYPE_POWER_UP                  (0X00000001U)
+
+
+/*
+    Register VHA_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define VHA_CR_IDLE_HYSTERESIS_COUNT                      (0x0250U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_MASKFULL             (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VALUE_SHIFT          (0U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+#define VHA_CR_RESET_CLK_CTRL_MODE_MASK                   (0x00000003U)
+/*
+Clock always OFF during reset. */
+#define VHA_CR_RESET_CLK_CTRL_MODE_OFF                    (0x00000000U)
+/*
+Clock ON for 16 clock cycles during reset. */
+#define VHA_CR_RESET_CLK_CTRL_MODE_ON_16                  (0x00000001U)
+/*
+Clock ON for 64 clock cycles during reset. */
+#define VHA_CR_RESET_CLK_CTRL_MODE_ON_64                  (0x00000002U)
+/*
+Clock always ON during reset. */
+#define VHA_CR_RESET_CLK_CTRL_MODE_ON                     (0x00000003U)
+
+
+/*
+    Register VHA_CR_RESET_CLK_CTRL
+*/
+#define VHA_CR_RESET_CLK_CTRL                             (0x0258U)
+#define VHA_CR_RESET_CLK_CTRL_MASKFULL                    (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_RESET_CLK_CTRL_CTRL_SHIFT                  (0U)
+#define VHA_CR_RESET_CLK_CTRL_CTRL_CLRMSK                 (0XFFFFFFFCU)
+#define VHA_CR_RESET_CLK_CTRL_CTRL_OFF                    (00000000U)
+#define VHA_CR_RESET_CLK_CTRL_CTRL_ON_16                  (0X00000001U)
+#define VHA_CR_RESET_CLK_CTRL_CTRL_ON_64                  (0X00000002U)
+#define VHA_CR_RESET_CLK_CTRL_CTRL_ON                     (0X00000003U)
+
+
+/*
+    Register VHA_CR_VHA_AXI_RESET_CTRL
+*/
+#define VHA_CR_VHA_AXI_RESET_CTRL                         (0x0260U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_SHIFT (0U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_SYS_RAM_INIT
+*/
+#define VHA_CR_SYS_RAM_INIT                               (0x0268U)
+#define VHA_CR_SYS_RAM_INIT_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SYS_RAM_INIT_KICK_SHIFT                    (0U)
+#define VHA_CR_SYS_RAM_INIT_KICK_CLRMSK                   (0XFFFFFFFEU)
+#define VHA_CR_SYS_RAM_INIT_KICK_EN                       (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_SOURCE
+*/
+#define VHA_CR_WM_EVENT_SOURCE                            (0x0270U)
+#define VHA_CR_WM_EVENT_SOURCE_MASKFULL                   (IMG_UINT64_C(0x00000000FFFF0000))
+#define VHA_CR_WM_EVENT_SOURCE_IC_SHIFT                   (24U)
+#define VHA_CR_WM_EVENT_SOURCE_IC_CLRMSK                  (0X00FFFFFFU)
+#define VHA_CR_WM_EVENT_SOURCE_CORE_SHIFT                 (16U)
+#define VHA_CR_WM_EVENT_SOURCE_CORE_CLRMSK                (0XFF00FFFFU)
+
+
+/*
+    Register VHA_CR_SYS_RTM_CTRL
+*/
+#define VHA_CR_SYS_RTM_CTRL                               (0x0278U)
+#define VHA_CR_SYS_RTM_CTRL_MASKFULL                      (IMG_UINT64_C(0x00000000C0FFFFF8))
+#define VHA_CR_SYS_RTM_CTRL_RTM_ENABLE_SHIFT              (31U)
+#define VHA_CR_SYS_RTM_CTRL_RTM_ENABLE_CLRMSK             (0X7FFFFFFFU)
+#define VHA_CR_SYS_RTM_CTRL_RTM_ENABLE_EN                 (0X80000000U)
+#define VHA_CR_SYS_RTM_CTRL_RTM_CHECK_SHIFT               (30U)
+#define VHA_CR_SYS_RTM_CTRL_RTM_CHECK_CLRMSK              (0XBFFFFFFFU)
+#define VHA_CR_SYS_RTM_CTRL_RTM_CHECK_EN                  (0X40000000U)
+#define VHA_CR_SYS_RTM_CTRL_RTM_SELECTOR_SHIFT            (3U)
+#define VHA_CR_SYS_RTM_CTRL_RTM_SELECTOR_CLRMSK           (0XFF000007U)
+
+
+/*
+    Register VHA_CR_SYS_RTM_DATA
+*/
+#define VHA_CR_SYS_RTM_DATA                               (0x0280U)
+#define VHA_CR_SYS_RTM_DATA_MASKFULL                      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SYS_RTM_DATA_RTM_DATA_SHIFT                (0U)
+#define VHA_CR_SYS_RTM_DATA_RTM_DATA_CLRMSK               (00000000U)
+
+
+/*
+    Register VHA_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define VHA_CR_SOCIF_WAKEUP_ENABLE                        (0x0400U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT           (0U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_AXI_EXACCESS
+*/
+#define VHA_CR_AXI_EXACCESS                               (0x0408U)
+#define VHA_CR_AXI_EXACCESS_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT            (0U)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_REGBANK_REQUEST_INVALID
+*/
+#define VHA_CR_REGBANK_REQUEST_INVALID                    (0x0410U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT         (0U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_EN            (0X00000001U)
+
+
+#define VHA_CR_CORE_MAPPING_MASK                          (0x0000000FU)
+/*
+Core mapped to Workload Manager 0 */
+#define VHA_CR_CORE_MAPPING_WM0                           (0x00000000U)
+/*
+Core mapped to Workload Manager 1 */
+#define VHA_CR_CORE_MAPPING_WM1                           (0x00000001U)
+/*
+Core mapped to Workload Manager 2 */
+#define VHA_CR_CORE_MAPPING_WM2                           (0x00000002U)
+/*
+Core mapped to Workload Manager 3 */
+#define VHA_CR_CORE_MAPPING_WM3                           (0x00000003U)
+/*
+Core mapped to Workload Manager 4 */
+#define VHA_CR_CORE_MAPPING_WM4                           (0x00000004U)
+/*
+Core mapped to Workload Manager 5 */
+#define VHA_CR_CORE_MAPPING_WM5                           (0x00000005U)
+/*
+Core mapped to Workload Manager 6 */
+#define VHA_CR_CORE_MAPPING_WM6                           (0x00000006U)
+/*
+Core mapped to Workload Manager 7 */
+#define VHA_CR_CORE_MAPPING_WM7                           (0x00000007U)
+/*
+Dual lockstep with Core N-1 */
+#define VHA_CR_CORE_MAPPING_LOCKSTEP                      (0x00000008U)
+/*
+Unallocated */
+#define VHA_CR_CORE_MAPPING_UNALLOCATED                   (0x00000009U)
+
+
+/*
+    Register VHA_CR_CORE_ASSIGNMENT
+*/
+#define VHA_CR_CORE_ASSIGNMENT                            (0x0418U)
+#define VHA_CR_CORE_ASSIGNMENT_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_SHIFT    (28U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_CLRMSK   (0X0FFFFFFFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM1      (0X10000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM2      (0X20000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM3      (0X30000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM4      (0X40000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM5      (0X50000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM6      (0X60000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_WM7      (0X70000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_LOCKSTEP (0X80000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_UNALLOCATED (0X90000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_SHIFT    (24U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_CLRMSK   (0XF0FFFFFFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM1      (0X01000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM2      (0X02000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM3      (0X03000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM4      (0X04000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM5      (0X05000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM6      (0X06000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_WM7      (0X07000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_LOCKSTEP (0X08000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_UNALLOCATED (0X09000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_SHIFT    (20U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_CLRMSK   (0XFF0FFFFFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM1      (0X00100000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM2      (0X00200000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM3      (0X00300000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM4      (0X00400000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM5      (0X00500000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM6      (0X00600000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_WM7      (0X00700000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_LOCKSTEP (0X00800000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_UNALLOCATED (0X00900000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_SHIFT    (16U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_CLRMSK   (0XFFF0FFFFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM1      (0X00010000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM2      (0X00020000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM3      (0X00030000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM4      (0X00040000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM5      (0X00050000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM6      (0X00060000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_WM7      (0X00070000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_LOCKSTEP (0X00080000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_UNALLOCATED (0X00090000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_SHIFT    (12U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_CLRMSK   (0XFFFF0FFFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM1      (0X00001000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM2      (0X00002000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM3      (0X00003000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM4      (0X00004000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM5      (0X00005000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM6      (0X00006000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_WM7      (0X00007000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_LOCKSTEP (0X00008000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_UNALLOCATED (0X00009000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_SHIFT    (8U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_CLRMSK   (0XFFFFF0FFU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM1      (0X00000100U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM2      (0X00000200U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM3      (0X00000300U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM4      (0X00000400U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM5      (0X00000500U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM6      (0X00000600U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_WM7      (0X00000700U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_LOCKSTEP (0X00000800U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_UNALLOCATED (0X00000900U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_SHIFT    (4U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_CLRMSK   (0XFFFFFF0FU)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM1      (0X00000010U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM2      (0X00000020U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM3      (0X00000030U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM4      (0X00000040U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM5      (0X00000050U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM6      (0X00000060U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_WM7      (0X00000070U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_LOCKSTEP (0X00000080U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_UNALLOCATED (0X00000090U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_SHIFT    (0U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_CLRMSK   (0XFFFFFFF0U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM0      (00000000U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM1      (0X00000001U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM2      (0X00000002U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM3      (0X00000003U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM4      (0X00000004U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM5      (0X00000005U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM6      (0X00000006U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_WM7      (0X00000007U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_LOCKSTEP (0X00000008U)
+#define VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED (0X00000009U)
+
+
+/*
+    Register VHA_CR_SOCM_BUF_ASSIGNMENT
+*/
+#define VHA_CR_SOCM_BUF_ASSIGNMENT                        (0x0420U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_SHIFT (28U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM1 (0X10000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM2 (0X20000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM3 (0X30000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM4 (0X40000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM5 (0X50000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM6 (0X60000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_WM7 (0X70000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_LOCKSTEP (0X80000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_UNALLOCATED (0X90000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_SHIFT (24U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM1 (0X01000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM2 (0X02000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM3 (0X03000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM4 (0X04000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM5 (0X05000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM6 (0X06000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_WM7 (0X07000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_LOCKSTEP (0X08000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_UNALLOCATED (0X09000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_SHIFT (20U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM1 (0X00100000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM2 (0X00200000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM3 (0X00300000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM4 (0X00400000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM5 (0X00500000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM6 (0X00600000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_WM7 (0X00700000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_LOCKSTEP (0X00800000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_UNALLOCATED (0X00900000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_SHIFT (16U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM1 (0X00010000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM2 (0X00020000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM3 (0X00030000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM4 (0X00040000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM5 (0X00050000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM6 (0X00060000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_WM7 (0X00070000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_LOCKSTEP (0X00080000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_UNALLOCATED (0X00090000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_SHIFT (12U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM1 (0X00001000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM2 (0X00002000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM3 (0X00003000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM4 (0X00004000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM5 (0X00005000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM6 (0X00006000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_WM7 (0X00007000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_LOCKSTEP (0X00008000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_UNALLOCATED (0X00009000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_SHIFT (8U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM1 (0X00000100U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM2 (0X00000200U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM3 (0X00000300U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM4 (0X00000400U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM5 (0X00000500U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM6 (0X00000600U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_WM7 (0X00000700U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_LOCKSTEP (0X00000800U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_UNALLOCATED (0X00000900U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_SHIFT (4U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM1 (0X00000010U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM2 (0X00000020U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM3 (0X00000030U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM4 (0X00000040U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM5 (0X00000050U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM6 (0X00000060U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_WM7 (0X00000070U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_LOCKSTEP (0X00000080U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_UNALLOCATED (0X00000090U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_SHIFT (0U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_CLRMSK (0XFFFFFFF0U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM0 (00000000U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM1 (0X00000001U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM2 (0X00000002U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM3 (0X00000003U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM4 (0X00000004U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM5 (0X00000005U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM6 (0X00000006U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_WM7 (0X00000007U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_LOCKSTEP (0X00000008U)
+#define VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_UNALLOCATED (0X00000009U)
+
+
+/*
+Host gets assigned the same priority level as the rest of internal system bus requestors */
+#define VHA_CR_SOCIF_ARBITER_CONFIG_ENUM_SOCIF_PRIORITIES_EQUAL (0x00000000U)
+/*
+Host gets assigned the higher priority level than the rest of internal system bus requestors */
+#define VHA_CR_SOCIF_ARBITER_CONFIG_ENUM_SOCIF_PRIORITIES_HIGHEST (0x00000001U)
+
+
+/*
+    Register VHA_CR_SOCIF_ARBITER_CONFIG
+*/
+#define VHA_CR_SOCIF_ARBITER_CONFIG                       (0x0428U)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_MASKFULL              (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_EXTRA_ALLOC_SHIFT (1U)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_EXTRA_ALLOC_CLRMSK (0XFFFFFFE1U)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_PRIORITY_SHIFT (0U)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_PRIORITY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_PRIORITY_EQUAL (00000000U)
+#define VHA_CR_SOCIF_ARBITER_CONFIG_SOCIF_HOST_PRIORITY_HIGHEST (0X00000001U)
+
+
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOGIC_ERROR_SHIFT  (30U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOGIC_ERROR_EN     (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_TYPE_LOCKSTEP_ERROR_EN  (0X00000001U)
+
+
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_PARITY_SHIFT (31U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_PARITY_CLRMSK (0X7FFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_PARITY_EN   (0X80000000U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_STATUS_TYPE_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE
+*/
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE             (0x0500U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_MASKFULL    (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_ENABLE_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_HOST_STATUS
+*/
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS             (0x0508U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_MASKFULL    (IMG_UINT64_C(0x00000000C0000111))
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_PARITY_SHIFT (31U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_PARITY_CLRMSK (0X7FFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_PARITY_EN   (0X80000000U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE
+*/
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE     (0x0530U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_MASKFULL (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_STATUS_DISABLE_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR
+*/
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR              (0x0510U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_MASKFULL     (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_HOST_CLEAR_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_WM_ENABLE
+*/
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE               (0x0518U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_MASKFULL      (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_ENABLE_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_WM_STATUS
+*/
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS               (0x0520U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_MASKFULL      (IMG_UINT64_C(0x00000000C0000111))
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_PARITY_SHIFT  (31U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_PARITY_CLRMSK (0X7FFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_PARITY_EN     (0X80000000U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE
+*/
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE       (0x0538U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_MASKFULL (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_STATUS_DISABLE_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_WM_CLEAR
+*/
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR                (0x0528U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_MASKFULL       (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_WM_CLEAR_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_INTERCONNECT_EVENT_INJECT
+*/
+#define VHA_CR_INTERCONNECT_EVENT_INJECT                  (0x0540U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_MASKFULL         (IMG_UINT64_C(0x0000000040000111))
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOGIC_ERROR_EN   (0X40000000U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_MISMATCH_SHIFT (8U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_MISMATCH_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_MISMATCH_EN (0X00000100U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_UNRESPONSIVE_SHIFT (4U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_UNRESPONSIVE_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_SOCIF_READ_UNRESPONSIVE_EN (0X00000010U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOCKSTEP_ERROR_SHIFT (0U)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOCKSTEP_ERROR_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_INTERCONNECT_EVENT_INJECT_LOCKSTEP_ERROR_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL
+*/
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL                   (0x0800U)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL_MASKFULL          (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_SHIFT (0U)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_EN   (0X00000001U)
+
+
+/*
+    Register VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS
+*/
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS                 (0x0808U)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS_MASKFULL        (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_SHIFT (0U)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_IC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO
+*/
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO            (0x1000U)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_MASKFULL   (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_LSYNC_SOCIF_RTN_SHIFT (12U)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_LSYNC_SOCIF_RTN_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_LSYNC_SOCIF_SHIFT (8U)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_LSYNC_SOCIF_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_HOST_SOCIF_RTN_SHIFT (4U)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_HOST_SOCIF_RTN_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_HOST_SOCIF_SHIFT (0U)
+#define VHA_CR_NN_SYS2_SYSBUS_HOST_STALL_RATIO_HOST_SOCIF_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO
+*/
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO              (0x1008U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_MASKFULL     (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM7_SOCIF_RTN_SHIFT (60U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM7_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0X0FFFFFFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM7_SOCIF_SHIFT (56U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM7_SOCIF_CLRMSK (IMG_UINT64_C(0XF0FFFFFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM6_SOCIF_RTN_SHIFT (52U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM6_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFF0FFFFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM6_SOCIF_SHIFT (48U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM6_SOCIF_CLRMSK (IMG_UINT64_C(0XFFF0FFFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM5_SOCIF_RTN_SHIFT (44U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM5_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM5_SOCIF_SHIFT (40U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM5_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM4_SOCIF_RTN_SHIFT (36U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM4_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFFFF0FFFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM4_SOCIF_SHIFT (32U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM4_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM3_SOCIF_RTN_SHIFT (28U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM3_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM3_SOCIF_SHIFT (24U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM3_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM2_SOCIF_RTN_SHIFT (20U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM2_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM2_SOCIF_SHIFT (16U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM2_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM1_SOCIF_RTN_SHIFT (12U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM1_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM1_SOCIF_SHIFT (8U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM1_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM0_SOCIF_RTN_SHIFT (4U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM0_SOCIF_RTN_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM0_SOCIF_SHIFT (0U)
+#define VHA_CR_NN_SYS2_SYSBUS_WM_STALL_RATIO_WM0_SOCIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL
+*/
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL                  (0x1200U)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL_MASKFULL         (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_SHIFT (0U)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS
+*/
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS                (0x1208U)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS_MASKFULL       (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_SHIFT (0U)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_TLC_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_LOW_LEVEL_SYNC_BASE_ADDR
+*/
+#define VHA_CR_LOW_LEVEL_SYNC_BASE_ADDR                   (0x3000U)
+#define VHA_CR_LOW_LEVEL_SYNC_BASE_ADDR_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFFE0))
+#define VHA_CR_LOW_LEVEL_SYNC_BASE_ADDR_BASE_ADDR_SHIFT   (5U)
+#define VHA_CR_LOW_LEVEL_SYNC_BASE_ADDR_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+    Register VHA_CR_SOCM_BASE_ADDR
+*/
+#define VHA_CR_SOCM_BASE_ADDR                             (0x3008U)
+#define VHA_CR_SOCM_BASE_ADDR_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFE0))
+#define VHA_CR_SOCM_BASE_ADDR_BASE_ADDR_SHIFT             (5U)
+#define VHA_CR_SOCM_BASE_ADDR_BASE_ADDR_CLRMSK            (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+    Register VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE
+*/
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE                  (0x3010U)
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFF80))
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE_SOCM_CIRCULAR_BUFFER_SIZE_SHIFT (7U)
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE_SOCM_CIRCULAR_BUFFER_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000007F))
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE_SOCM_CIRCULAR_BUFFER_SIZE_ALIGNSHIFT (7U)
+#define VHA_CR_SOCM_CIRCULAR_BUFFER_SIZE_SOCM_CIRCULAR_BUFFER_SIZE_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_SOCM_B7_XOR_BITS
+*/
+#define VHA_CR_SOCM_B7_XOR_BITS                           (0x3018U)
+#define VHA_CR_SOCM_B7_XOR_BITS_MASKFULL                  (IMG_UINT64_C(0x0000000001FFFE00))
+#define VHA_CR_SOCM_B7_XOR_BITS_SOCM_B7_XOR_BITS_SHIFT    (9U)
+#define VHA_CR_SOCM_B7_XOR_BITS_SOCM_B7_XOR_BITS_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFE0001FF))
+
+
+/*
+    Register VHA_CR_SOCM_B8_XOR_BITS
+*/
+#define VHA_CR_SOCM_B8_XOR_BITS                           (0x3020U)
+#define VHA_CR_SOCM_B8_XOR_BITS_MASKFULL                  (IMG_UINT64_C(0x0000000001FFFE00))
+#define VHA_CR_SOCM_B8_XOR_BITS_SOCM_B8_XOR_BITS_SHIFT    (9U)
+#define VHA_CR_SOCM_B8_XOR_BITS_SOCM_B8_XOR_BITS_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFE0001FF))
+
+
+/*
+    Register VHA_CR_SOCM_MASKEDWRITE_STALL
+*/
+#define VHA_CR_SOCM_MASKEDWRITE_STALL                     (0x3028U)
+#define VHA_CR_SOCM_MASKEDWRITE_STALL_MASKFULL            (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_SOCM_MASKEDWRITE_STALL_IDLE_SHIFT          (6U)
+#define VHA_CR_SOCM_MASKEDWRITE_STALL_IDLE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define VHA_CR_SOCM_MASKEDWRITE_STALL_BUSY_SHIFT          (0U)
+#define VHA_CR_SOCM_MASKEDWRITE_STALL_BUSY_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFC0))
+
+
+/*
+    Register VHA_CR_BW_LIMIT_CTRL
+*/
+#define VHA_CR_BW_LIMIT_CTRL                              (0x3030U)
+#define VHA_CR_BW_LIMIT_CTRL_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_BW_LIMIT_CTRL_BW_LIMIT_ENABLE_SHIFT        (0U)
+#define VHA_CR_BW_LIMIT_CTRL_BW_LIMIT_ENABLE_CLRMSK       (0XFFFFFFFEU)
+#define VHA_CR_BW_LIMIT_CTRL_BW_LIMIT_ENABLE_EN           (0X00000001U)
+
+
+/*
+    Register VHA_CR_BW_LIMIT_CTRL1
+*/
+#define VHA_CR_BW_LIMIT_CTRL1                             (0x3038U)
+#define VHA_CR_BW_LIMIT_CTRL1_MASKFULL                    (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_MAX_FLEX_SHIFT     (24U)
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_MAX_FLEX_CLRMSK    (IMG_UINT64_C(0XFFFFFF0000FFFFFF))
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_MAX_SLOPE_SHIFT    (16U)
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_MAX_SLOPE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_REQ_INCR_SHIFT     (8U)
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_REQ_INCR_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_PERIOD_MIN1_SHIFT  (0U)
+#define VHA_CR_BW_LIMIT_CTRL1_BW_LIMIT_PERIOD_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register VHA_CR_NOC_BWM_CONTROL
+*/
+#define VHA_CR_NOC_BWM_CONTROL                            (0x3040U)
+#define VHA_CR_NOC_BWM_CONTROL_MASKFULL                   (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_NOC_BWM_CONTROL_NOC_BWM_REQUESTER_SHIFT    (1U)
+#define VHA_CR_NOC_BWM_CONTROL_NOC_BWM_REQUESTER_CLRMSK   (0XFFFFFF01U)
+#define VHA_CR_NOC_BWM_CONTROL_NOC_BWM_ENABLE_SHIFT       (0U)
+#define VHA_CR_NOC_BWM_CONTROL_NOC_BWM_ENABLE_CLRMSK      (0XFFFFFFFEU)
+#define VHA_CR_NOC_BWM_CONTROL_NOC_BWM_ENABLE_EN          (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_BW_SOCM_RD
+*/
+#define VHA_CR_CORE_BW_SOCM_RD                            (0x3100U)
+#define VHA_CR_CORE_BW_SOCM_RD_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_SOCM_RD_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_SOCM_RD_BW_CLRMSK                  (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_SOCM_WR
+*/
+#define VHA_CR_CORE_BW_SOCM_WR                            (0x3108U)
+#define VHA_CR_CORE_BW_SOCM_WR_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_SOCM_WR_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_SOCM_WR_BW_CLRMSK                  (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_SOCM_MWR
+*/
+#define VHA_CR_CORE_BW_SOCM_MWR                           (0x3110U)
+#define VHA_CR_CORE_BW_SOCM_MWR_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_SOCM_MWR_BW_SHIFT                  (0U)
+#define VHA_CR_CORE_BW_SOCM_MWR_BW_CLRMSK                 (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_DDR_RD
+*/
+#define VHA_CR_CORE_BW_DDR_RD                             (0x3118U)
+#define VHA_CR_CORE_BW_DDR_RD_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_DDR_RD_BW_SHIFT                    (0U)
+#define VHA_CR_CORE_BW_DDR_RD_BW_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_DDR_WR
+*/
+#define VHA_CR_CORE_BW_DDR_WR                             (0x3120U)
+#define VHA_CR_CORE_BW_DDR_WR_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_DDR_WR_BW_SHIFT                    (0U)
+#define VHA_CR_CORE_BW_DDR_WR_BW_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_DDR_MWR
+*/
+#define VHA_CR_CORE_BW_DDR_MWR                            (0x3128U)
+#define VHA_CR_CORE_BW_DDR_MWR_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_DDR_MWR_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_DDR_MWR_BW_CLRMSK                  (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_SOCM_RD_WORD
+*/
+#define VHA_CR_CORE_BW_SOCM_RD_WORD                       (0x3130U)
+#define VHA_CR_CORE_BW_SOCM_RD_WORD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_SOCM_RD_WORD_BW_SHIFT              (0U)
+#define VHA_CR_CORE_BW_SOCM_RD_WORD_BW_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_SOCM_WR_WORD
+*/
+#define VHA_CR_CORE_BW_SOCM_WR_WORD                       (0x3138U)
+#define VHA_CR_CORE_BW_SOCM_WR_WORD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_SOCM_WR_WORD_BW_SHIFT              (0U)
+#define VHA_CR_CORE_BW_SOCM_WR_WORD_BW_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_DDR_RD_WORD
+*/
+#define VHA_CR_CORE_BW_DDR_RD_WORD                        (0x3140U)
+#define VHA_CR_CORE_BW_DDR_RD_WORD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_DDR_RD_WORD_BW_SHIFT               (0U)
+#define VHA_CR_CORE_BW_DDR_RD_WORD_BW_CLRMSK              (00000000U)
+
+
+/*
+    Register VHA_CR_CORE_BW_DDR_WR_WORD
+*/
+#define VHA_CR_CORE_BW_DDR_WR_WORD                        (0x3148U)
+#define VHA_CR_CORE_BW_DDR_WR_WORD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_DDR_WR_WORD_BW_SHIFT               (0U)
+#define VHA_CR_CORE_BW_DDR_WR_WORD_BW_CLRMSK              (00000000U)
+
+
+/*
+    Register VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO
+*/
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO             (0x3150U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_LSYNC_RTN_SHIFT (28U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_LSYNC_RTN_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_LSYNC_SHIFT (24U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_LSYNC_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SOCM_RTN_SHIFT (20U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SOCM_RTN_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SOCM_SHIFT (16U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SOCM_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SLC_RTN_SHIFT (12U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SLC_RTN_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SLC_SHIFT (8U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_SYS_NOC_SLC_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_CORE_SYS_NOC_RTN_SHIFT (4U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_CORE_SYS_NOC_RTN_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_CORE_SYS_NOC_SHIFT (0U)
+#define VHA_CR_NN_SYS2_MEMBUS_SYS_STALL_RATIO_CORE_SYS_NOC_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+SYS_MEM_WDT is Disabled */
+#define VHA_CR_SYS_MEM_WDT_CTRL_SYS_MEM_WDT_CTRL_NONE     (0x00000000U)
+/*
+SYS_MEM_WDT is Cleared when WM is running */
+#define VHA_CR_SYS_MEM_WDT_CTRL_SYS_MEM_WDT_CTRL_KICK_WL  (0x00000001U)
+
+
+/*
+    Register VHA_CR_SYS_MEM_WDT_CTRL
+*/
+#define VHA_CR_SYS_MEM_WDT_CTRL                           (0x5000U)
+#define VHA_CR_SYS_MEM_WDT_CTRL_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SYS_MEM_WDT_CTRL_MODE_SHIFT                (0U)
+#define VHA_CR_SYS_MEM_WDT_CTRL_MODE_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_SYS_MEM_WDT_CTRL_MODE_NONE                 (00000000U)
+#define VHA_CR_SYS_MEM_WDT_CTRL_MODE_KICK_WL              (0X00000001U)
+
+
+/*
+    Register VHA_CR_SYS_MEM_WDT_COMPAREMATCH
+*/
+#define VHA_CR_SYS_MEM_WDT_COMPAREMATCH                   (0x5008U)
+#define VHA_CR_SYS_MEM_WDT_COMPAREMATCH_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SYS_MEM_WDT_COMPAREMATCH_VALUE_SHIFT       (0U)
+#define VHA_CR_SYS_MEM_WDT_COMPAREMATCH_VALUE_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_SYS_MEM_WDT_TIMER
+*/
+#define VHA_CR_SYS_MEM_WDT_TIMER                          (0x5010U)
+#define VHA_CR_SYS_MEM_WDT_TIMER_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SYS_MEM_WDT_TIMER_VALUE_SHIFT              (0U)
+#define VHA_CR_SYS_MEM_WDT_TIMER_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_LOW_LEVEL_SYNC_STATUS
+*/
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS                      (0x5018U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE7_SHIFT          (28U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE7_CLRMSK         (0X0FFFFFFFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE6_SHIFT          (24U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE6_CLRMSK         (0XF0FFFFFFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE5_SHIFT          (20U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE5_CLRMSK         (0XFF0FFFFFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE4_SHIFT          (16U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE4_CLRMSK         (0XFFF0FFFFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE3_SHIFT          (12U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE3_CLRMSK         (0XFFFF0FFFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE2_SHIFT          (8U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE2_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE1_SHIFT          (4U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE1_CLRMSK         (0XFFFFFF0FU)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE0_SHIFT          (0U)
+#define VHA_CR_LOW_LEVEL_SYNC_STATUS_CORE0_CLRMSK         (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_LOW_LEVEL_SYNC_CLEAR
+*/
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR                       (0x5020U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_MASKFULL              (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE7_SHIFT           (7U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE7_CLRMSK          (0XFFFFFF7FU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE7_EN              (0X00000080U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE6_SHIFT           (6U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE6_CLRMSK          (0XFFFFFFBFU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE6_EN              (0X00000040U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE5_SHIFT           (5U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE5_CLRMSK          (0XFFFFFFDFU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE5_EN              (0X00000020U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE4_SHIFT           (4U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE4_CLRMSK          (0XFFFFFFEFU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE4_EN              (0X00000010U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE3_SHIFT           (3U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE3_CLRMSK          (0XFFFFFFF7U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE3_EN              (0X00000008U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE2_SHIFT           (2U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE2_CLRMSK          (0XFFFFFFFBU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE2_EN              (0X00000004U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE1_SHIFT           (1U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE1_CLRMSK          (0XFFFFFFFDU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE1_EN              (0X00000002U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE0_SHIFT           (0U)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE0_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_LOW_LEVEL_SYNC_CLEAR_CORE0_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_SOCM_SCRUB_CTRL
+*/
+#define VHA_CR_SOCM_SCRUB_CTRL                            (0x5028U)
+#define VHA_CR_SOCM_SCRUB_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000000000000011))
+#define VHA_CR_SOCM_SCRUB_CTRL_MODE_SHIFT                 (4U)
+#define VHA_CR_SOCM_SCRUB_CTRL_MODE_CLRMSK                (0XFFFFFFEFU)
+#define VHA_CR_SOCM_SCRUB_CTRL_MODE_EN                    (0X00000010U)
+#define VHA_CR_SOCM_SCRUB_CTRL_KICK_SHIFT                 (0U)
+#define VHA_CR_SOCM_SCRUB_CTRL_KICK_CLRMSK                (0XFFFFFFFEU)
+#define VHA_CR_SOCM_SCRUB_CTRL_KICK_EN                    (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ
+*/
+#define VHA_CR_PERF_SLC0_READ                             (0x7000U)
+#define VHA_CR_PERF_SLC0_READ_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_COUNT_SHIFT                 (0U)
+#define VHA_CR_PERF_SLC0_READ_COUNT_CLRMSK                (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE
+*/
+#define VHA_CR_PERF_SLC0_WRITE                            (0x7008U)
+#define VHA_CR_PERF_SLC0_WRITE_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_SHIFT                (0U)
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_CLRMSK               (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_DATA_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL                 (0x7010U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_SHIFT     (0U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_STALL                       (0x7018U)
+#define VHA_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_STALL                      (0x7020U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_ID_STALL                    (0x7028U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_SHIFT        (0U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL                   (0x7030U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1                   (0x7038U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1                   (0x7040U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2                   (0x7048U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2                   (0x7050U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3                   (0x7058U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3                   (0x7060U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4                   (0x7068U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4                   (0x7070U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_RESET_FULL
+*/
+#define VHA_CR_PERF_RESET_FULL                            (0x7078U)
+#define VHA_CR_PERF_RESET_FULL_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_RESET_FULL_RANGE_SHIFT                (0U)
+#define VHA_CR_PERF_RESET_FULL_RANGE_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_PERF_RESET_FULL_RANGE_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_ENABLE_FULL
+*/
+#define VHA_CR_PERF_ENABLE_FULL                           (0x7080U)
+#define VHA_CR_PERF_ENABLE_FULL_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_SHIFT               (0U)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_EN                  (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_STATUS
+*/
+#define VHA_CR_MMU_STATUS                                 (0x7088U)
+#define VHA_CR_MMU_STATUS_MASKFULL                        (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_SHIFT               (40U)
+#define VHA_CR_MMU_STATUS_MMU_STALLED_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define VHA_CR_MMU_STATUS_PM_WRITES_SHIFT                 (38U)
+#define VHA_CR_MMU_STATUS_PM_WRITES_CLRMSK                (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PM_READS_SHIFT                  (36U)
+#define VHA_CR_MMU_STATUS_PM_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PC_READS_SHIFT                  (24U)
+#define VHA_CR_MMU_STATUS_PC_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_MMU_STATUS_PD_READS_SHIFT                  (12U)
+#define VHA_CR_MMU_STATUS_PD_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_MMU_STATUS_PT_READS_SHIFT                  (0U)
+#define VHA_CR_MMU_STATUS_PT_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS1
+*/
+#define VHA_CR_SLC_STATUS1                                (0x7090U)
+#define VHA_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_RESERVED_SHIFT                 (48U)
+#define VHA_CR_SLC_STATUS1_RESERVED_CLRMSK                (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS2
+*/
+#define VHA_CR_SLC_STATUS2                                (0x7098U)
+#define VHA_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_IDLE
+*/
+#define VHA_CR_SLC_IDLE                                   (0x70A0U)
+#define VHA_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT              (12U)
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK             (0XFFFF0FFFU)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (4U)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFF00FU)
+#define VHA_CR_SLC_IDLE_MMU_SHIFT                         (3U)
+#define VHA_CR_SLC_IDLE_MMU_CLRMSK                        (0XFFFFFFF7U)
+#define VHA_CR_SLC_IDLE_MMU_EN                            (0X00000008U)
+#define VHA_CR_SLC_IDLE_CCM_SHIFT                         (2U)
+#define VHA_CR_SLC_IDLE_CCM_CLRMSK                        (0XFFFFFFFBU)
+#define VHA_CR_SLC_IDLE_CCM_EN                            (0X00000004U)
+#define VHA_CR_SLC_IDLE_RDI_SHIFT                         (1U)
+#define VHA_CR_SLC_IDLE_RDI_CLRMSK                        (0XFFFFFFFDU)
+#define VHA_CR_SLC_IDLE_RDI_EN                            (0X00000002U)
+#define VHA_CR_SLC_IDLE_XBAR_SHIFT                        (0U)
+#define VHA_CR_SLC_IDLE_XBAR_CLRMSK                       (0XFFFFFFFEU)
+#define VHA_CR_SLC_IDLE_XBAR_EN                           (0X00000001U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS3
+*/
+#define VHA_CR_SLC_STATUS3                                (0x70A8U)
+#define VHA_CR_SLC_STATUS3_MASKFULL                       (IMG_UINT64_C(0x0FFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_SHIFT (50U)
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XF003FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_SHIFT (40U)
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFC00FFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_SHIFT (30U)
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_SHIFT (20U)
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC00FFFFF))
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_SHIFT (10U)
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF003FF))
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_SHIFT (0U)
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFC00))
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_STATUS
+*/
+#define VHA_CR_SLC_FAULT_STOP_STATUS                      (0x70B0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT            (0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK           (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS_DEBUG
+*/
+#define VHA_CR_SLC_STATUS_DEBUG                           (0x70B8U)
+#define VHA_CR_SLC_STATUS_DEBUG_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT         (16U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK        (0X0000FFFFU)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT      (0U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC
+*/
+#define VHA_CR_PERF_SLC                                   (0x70C0U)
+#define VHA_CR_PERF_SLC_MASKFULL                          (IMG_UINT64_C(0x000000000FEFFEFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_SHIFT (27U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000008000000))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_SHIFT    (26U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000004000000))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_SHIFT (25U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000002000000))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_SHIFT    (24U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_SHIFT (23U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (22U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000400000))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (21U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000200000))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_SHIFT (19U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000000080000))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_SHIFT (18U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000000040000))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_SHIFT (17U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000020000))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_SHIFT (16U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000010000))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_SHIFT (15U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_SHIFT (14U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_SHIFT (13U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_SHIFT (12U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_EN  (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_SHIFT    (11U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_SHIFT   (10U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_EN      (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_SHIFT   (9U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_EN      (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_SHIFT    (7U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_SHIFT    (6U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_SHIFT  (5U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_EN     (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_SHIFT (4U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_EN    (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_SHIFT (3U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_EN   (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_SHIFT (2U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_EN   (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_SHIFT (1U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_EN   (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_EN       (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_PERF_SLC_REQ_COUNT
+*/
+#define VHA_CR_PERF_SLC_REQ_COUNT                         (0x70C8U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_MASKFULL                (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD                        (0x70D0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR                    (0x70D8U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR                    (0x70E0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR                    (0x70E8U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE                     (0x70F0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_SHIFT       (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD                      (0x70F8U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD                        (0x7100U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR                        (0x7108U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD                       (0x7110U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD                       (0x7118U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR                        (0x7120U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD                   (0x7128U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD               (0x7130U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD               (0x7138U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD               (0x7140U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD                (0x7148U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_SHIFT  (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD                 (0x7150U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD                   (0x7158U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMM_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD                   (0x7160U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMM_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD                  (0x7168U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD                  (0x7170U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD                   (0x7178U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD                        (0x7180U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD                   (0x7188U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD                        (0x7190U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD                   (0x7198U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_ONE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE                    (0x7468U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_TWO
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO                    (0x7470U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL           (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT    (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT     (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT    (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_THREE
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE                  (0x7478U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL         (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT  (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT   (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT  (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+    Register VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR
+*/
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR                   (0x7480U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL          (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT   (38U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK  (IMG_UINT64_C(0XFFFFFE3FFFFFFFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT    (19U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK   (IMG_UINT64_C(0XFFFFFFC00007FFFF))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT   (0U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFF80000))
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U)
+#define VHA_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_MASK               (0x00000003U)
+/*
+Reserved value */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_RESERVED           (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING     (0x00000001U)
+/*
+Addresses are interleaved between Cache Banks on a Cacheline boundary */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_LINEAR             (0x00000002U)
+/*
+Addresses interleaved between Cache Banks using an XOR hash of the address bits below the 4KB page granularity */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_IN_PAGE_HASH       (0x00000003U)
+
+
+/*
+    Register VHA_CR_SLC_CTRL
+*/
+#define VHA_CR_SLC_CTRL                                   (0x7488U)
+#define VHA_CR_SLC_CTRL_MASKFULL                          (IMG_UINT64_C(0x000000000001FFF3))
+#define VHA_CR_SLC_CTRL_RESERVED_SHIFT                    (7U)
+#define VHA_CR_SLC_CTRL_RESERVED_CLRMSK                   (0XFFFE007FU)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_SHIFT                  (4U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_CLRMSK                 (0XFFFFFF8FU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_SHIFT                   (0U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_CLRMSK                  (0XFFFFFFFCU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_RESERVED                (00000000U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_PVR_V3_HASHING          (0X00000001U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_LINEAR                  (0X00000002U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_IN_PAGE_HASH            (0X00000003U)
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_CTRL
+*/
+#define VHA_CR_SLC_FAULT_STOP_CTRL                        (0x7498U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_MASKFULL               (IMG_UINT64_C(0x000000000003FFFF))
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_SHIFT              (17U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_CLRMSK             (0XFFFDFFFFU)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_EN                 (0X00020000U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_SHIFT           (0U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING0
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0                     (0x7500U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_15_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_14_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_13_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_12_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_11_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_10_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_SHIFT        (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_9_CLRMSK       (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_SHIFT        (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_8_CLRMSK       (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_SHIFT        (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_7_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_SHIFT        (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_6_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_SHIFT        (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_5_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_SHIFT        (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_4_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_SHIFT        (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_3_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_SHIFT        (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_2_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_SHIFT        (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_1_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_SHIFT        (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING0_OSID_0_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_MMU_OSID_CTXT_MAPPING1
+*/
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1                     (0x7508U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_MASKFULL            (IMG_UINT64_C(0x7777777777777777))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_SHIFT       (60U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_31_CLRMSK      (IMG_UINT64_C(0X8FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_SHIFT       (56U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_30_CLRMSK      (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_SHIFT       (52U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_29_CLRMSK      (IMG_UINT64_C(0XFF8FFFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_SHIFT       (48U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_28_CLRMSK      (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_SHIFT       (44U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_27_CLRMSK      (IMG_UINT64_C(0XFFFF8FFFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_SHIFT       (40U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_26_CLRMSK      (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_SHIFT       (36U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_25_CLRMSK      (IMG_UINT64_C(0XFFFFFF8FFFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_SHIFT       (32U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_24_CLRMSK      (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_SHIFT       (28U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_23_CLRMSK      (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_SHIFT       (24U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_22_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_SHIFT       (20U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_21_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_SHIFT       (16U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_20_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_SHIFT       (12U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_19_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_SHIFT       (8U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_18_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_SHIFT       (4U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_17_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_SHIFT       (0U)
+#define VHA_CR_MMU_OSID_CTXT_MAPPING1_OSID_16_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_ACE_QOS_CTRL
+*/
+#define VHA_CR_ACE_QOS_CTRL                               (0x7580U)
+#define VHA_CR_ACE_QOS_CTRL_MASKFULL                      (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_SHIFT                (12U)
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_CLRMSK               (0XFFFF0FFFU)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_SHIFT                    (8U)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_CLRMSK                   (0XFFFFF0FFU)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_SHIFT                  (4U)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_CLRMSK                 (0XFFFFFF0FU)
+#define VHA_CR_ACE_QOS_CTRL_LOW_SHIFT                     (0U)
+#define VHA_CR_ACE_QOS_CTRL_LOW_CLRMSK                    (0XFFFFFFF0U)
+
+
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MASK (0x00000003U)
+/*
+Low */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_LOW (0x00000000U)
+/*
+Medium */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_HIGH (0x00000002U)
+/*
+Critical */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register VHA_CR_ACE_PRIORITY_MAPPING_CTRL
+*/
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL                  (0x7588U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MASKFULL         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_SHIFT        (62U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_MEDIUM       (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_HIGH         (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CRITICAL     (IMG_UINT64_C(0xc000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_SHIFT   (32U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RESERVED_CLRMSK  (IMG_UINT64_C(0XC0000000FFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_SHIFT      (30U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_MEDIUM     (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_HIGH       (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_B_CRITICAL   (IMG_UINT64_C(0x00000000c0000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_SHIFT      (28U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_MEDIUM     (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_HIGH       (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TRS_A_CRITICAL   (IMG_UINT64_C(0x0000000030000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_SHIFT       (26U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_MEDIUM      (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_HIGH        (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_POOL_CRITICAL    (IMG_UINT64_C(0x000000000c000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_SHIFT     (24U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_MEDIUM    (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_HIGH      (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_WR_CRITICAL  (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_SHIFT     (22U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_MEDIUM    (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_HIGH      (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMM_RD_CRITICAL  (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_SHIFT    (20U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_MEDIUM   (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_HIGH     (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_PSF_CRITICAL (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_SHIFT        (18U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_MEDIUM       (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_HIGH         (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_EWO_CRITICAL     (IMG_UINT64_C(0x00000000000c0000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_SHIFT    (16U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_MEDIUM   (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_HIGH     (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_OUTPACK_CRITICAL (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_SHIFT       (14U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_MEDIUM      (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_HIGH        (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ABUF_CRITICAL    (IMG_UINT64_C(0x000000000000c000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_SHIFT       (12U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_MEDIUM      (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_HIGH        (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CBUF_CRITICAL    (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_SHIFT     (10U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_MEDIUM    (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_HIGH      (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IBUF_0_CRITICAL  (IMG_UINT64_C(0x0000000000000c00))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_SHIFT      (8U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_MEDIUM     (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_HIGH       (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_4_CRITICAL   (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_SHIFT      (6U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_MEDIUM     (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_HIGH       (IMG_UINT64_C(0x0000000000000080))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_3_CRITICAL   (IMG_UINT64_C(0x00000000000000c0))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_SHIFT      (4U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_MEDIUM     (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_HIGH       (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_2_CRITICAL   (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_SHIFT      (2U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_MEDIUM     (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_HIGH       (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_1_CRITICAL   (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_SHIFT      (0U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_MEDIUM     (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_HIGH       (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CMD_0_CRITICAL   (IMG_UINT64_C(0x0000000000000003))  
+
+
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x00000006U)
+/*
+Write-through Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_WRITE_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x00000007U)
+/*
+Write-back Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_WRITE_ALLOCATE (0x0000000fU)
+
+
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x0000000aU)
+/*
+Write-through Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_READ_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x0000000bU)
+/*
+Write-back Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_READ_ALLOCATE (0x0000000fU)
+
+
+/*
+Non-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_NON_SHAREABLE    (0x00000000U)
+/*
+System */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_SYSTEM           (0x00000001U)
+
+
+/*
+Inner-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_INNER_SHAREABLE   (0x00000000U)
+/*
+Outer-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_OUTER_SHAREABLE   (0x00000001U)
+
+
+/*
+    Register VHA_CR_ACE_CTRL
+*/
+#define VHA_CR_ACE_CTRL                                   (0x7590U)
+#define VHA_CR_ACE_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000007FCFFF))
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_SHIFT                   (19U)
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_CLRMSK                  (0XFF87FFFFU)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT              (15U)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK             (0XFFF87FFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN    (0X00004000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_SHIFT                 (8U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK                (0XFFFFF0FFU)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE     (0X00000100U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000200U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE  (0X00000300U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0X00000600U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0X00000E00U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0X00000700U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0X00000F00U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_SHIFT                 (4U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK                (0XFFFFFF0FU)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE     (0X00000010U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000020U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE  (0X00000030U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0X000000A0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0X000000E0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0X000000B0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0X000000F0U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_SHIFT                  (2U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK                 (0XFFFFFFF3U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_SHIFT                  (1U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_CLRMSK                 (0XFFFFFFFDU)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE        (00000000U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE        (0X00000002U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT              (0U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE      (00000000U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM             (0X00000001U)
+
+
+/*
+    Register VHA_CR_ACE_STATUS
+*/
+#define VHA_CR_ACE_STATUS                                 (0x7598U)
+#define VHA_CR_ACE_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_SHIFT             (28U)
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_CLRMSK            (0X0FFFFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_SHIFT             (24U)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_CLRMSK            (0XF0FFFFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_SHIFT             (20U)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_CLRMSK            (0XFF0FFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_SHIFT             (16U)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_CLRMSK            (0XFFF0FFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_SHIFT             (12U)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_CLRMSK            (0XFFFF0FFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_SHIFT             (8U)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_CLRMSK            (0XFFFFF0FFU)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_SHIFT             (4U)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_CLRMSK            (0XFFFFFF0FU)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_SHIFT             (0U)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_CLRMSK            (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_PWR_MAN_HYSTERESIS
+*/
+#define VHA_CR_PWR_MAN_HYSTERESIS                         (0x7608U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_SHIFT             (0U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_CLRMSK            (0XFFFFFFE0U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL                         (0x76A8U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL_STATUS
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS                  (0x76B0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_MASKFULL         (IMG_UINT64_C(0x0000000080000001))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_SHIFT     (31U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_CLRMSK    (0X7FFFFFFFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PARITY_EN        (0X80000000U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_SHIFT    (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_STATUS_PENDING_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT              (0x76B8U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING                      (0x76C0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_LEGACY
+*/
+#define VHA_CR_OS0_MMU_CTRL_LEGACY                        (0x76E8U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_LEGACY_RESERVED_EN            (0X00000001U)
+
+
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_MASK               (0x00000007U)
+/*
+Unprivileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_DATA (0x00000000U)
+/*
+Privileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_DATA (0x00000001U)
+/*
+Unprivileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_DATA (0x00000002U)
+/*
+Privileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_DATA (0x00000003U)
+/*
+Unprivileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_INSTRUCTION (0x00000004U)
+/*
+Privileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_INSTRUCTION (0x00000005U)
+/*
+Unprivileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_INSTRUCTION (0x00000006U)
+/*
+Privileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_INSTRUCTION (0x00000007U)
+
+
+/*
+    Register VHA_CR_ACE_PROT_CTRL
+*/
+#define VHA_CR_ACE_PROT_CTRL                              (0x76F8U)
+#define VHA_CR_ACE_PROT_CTRL_MASKFULL                     (IMG_UINT64_C(0x0707070707070707))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_SHIFT                  (56U)
+#define VHA_CR_ACE_PROT_CTRL_OSID7_CLRMSK                 (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0300000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0500000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0600000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0700000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_SHIFT                  (48U)
+#define VHA_CR_ACE_PROT_CTRL_OSID6_CLRMSK                 (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0003000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0005000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0006000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0007000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_SHIFT                  (40U)
+#define VHA_CR_ACE_PROT_CTRL_OSID5_CLRMSK                 (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000030000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000050000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000060000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000070000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_SHIFT                  (32U)
+#define VHA_CR_ACE_PROT_CTRL_OSID4_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000300000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000500000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000600000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000700000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_SHIFT                  (24U)
+#define VHA_CR_ACE_PROT_CTRL_OSID3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000005000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000006000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000007000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_SHIFT                  (16U)
+#define VHA_CR_ACE_PROT_CTRL_OSID2_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_SHIFT                  (8U)
+#define VHA_CR_ACE_PROT_CTRL_OSID1_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_SHIFT                  (0U)
+#define VHA_CR_ACE_PROT_CTRL_OSID0_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL
+*/
+#define VHA_CR_OS0_MMU_CTRL                               (0x7708U)
+#define VHA_CR_OS0_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS1_MMU_CTRL
+*/
+#define VHA_CR_OS1_MMU_CTRL                               (0x7710U)
+#define VHA_CR_OS1_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS1_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS2_MMU_CTRL
+*/
+#define VHA_CR_OS2_MMU_CTRL                               (0x7718U)
+#define VHA_CR_OS2_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS2_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE0_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1                    (0x7B00U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE0_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2                    (0x7B08U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE0_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE1_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1                    (0x7B10U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE1_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2                    (0x7B18U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE1_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE2_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1                    (0x7B20U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE2_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2                    (0x7B28U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE2_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE3_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1                    (0x7B30U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE3_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2                    (0x7B38U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE3_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE4_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1                    (0x7B40U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE4_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2                    (0x7B48U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE4_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE5_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1                    (0x7B50U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE5_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2                    (0x7B58U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE5_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE6_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1                    (0x7B60U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE6_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2                    (0x7B68U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE6_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_CORE7_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1                    (0x7B70U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_MASKFULL           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_LEVEL_SHIFT        (62U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_LEVEL_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_REQ_ID_SHIFT       (56U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_REQ_ID_CLRMSK      (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_CONTEXT_SHIFT      (48U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_CONTEXT_CLRMSK     (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_ADDRESS_SHIFT      (4U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_ADDRESS_CLRMSK     (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_RNW_SHIFT          (3U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_RNW_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_RNW_EN             (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_TYPE_SHIFT         (1U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_TYPE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_FAULT_SHIFT        (0U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_FAULT_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS1_FAULT_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE7_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2                    (0x7B78U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_MASKFULL           (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_WRITEBACK_SHIFT    (29U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK   (0XDFFFFFFFU)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_WRITEBACK_EN       (0X20000000U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT  (28U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_CLEANUNIQUE_EN     (0X10000000U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_BANK_SHIFT         (24U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_BANK_CLRMSK        (0XF0FFFFFFU)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT    (16U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK   (0XFF00FFFFU)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT    (10U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_FBM_FAULT_EN       (0X00000400U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_BIF_ID_SHIFT       (0U)
+#define VHA_CR_CORE7_MMU_FAULT_STATUS2_BIF_ID_CLRMSK      (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_WM_WL_CONTROL
+*/
+#define VHA_CR_WM_WL_CONTROL                              (0x10000U)
+#define VHA_CR_WM_WL_CONTROL_MASKFULL                     (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_WM_WL_CONTROL_WL_START_SHIFT               (0U)
+#define VHA_CR_WM_WL_CONTROL_WL_START_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_WM_WL_CONTROL_WL_START_EN                  (0X00000001U)
+
+
+#define VHA_CR_WM_STATUS_STATE_MASK                       (0x00000007U)
+/*
+The WM is ready to be programmed */
+#define VHA_CR_WM_STATUS_STATE_IDLE                       (0x00000000U)
+/*
+The WM is initiating the execution of a Workload*/
+#define VHA_CR_WM_STATUS_STATE_PENDING                    (0x00000001U)
+/*
+A Workload is being run */
+#define VHA_CR_WM_STATUS_STATE_RUN                        (0x00000002U)
+/*
+The Workload had an error and execution is halted. A WM reset is required */
+#define VHA_CR_WM_STATUS_STATE_HALTED                     (0x00000003U)
+/*
+The Workload Manager's Response FIFO is full. Workloads cannot be kicked in the WM unless a RESPONSE_FIFO_READ is issued*/
+#define VHA_CR_WM_STATUS_STATE_RESPONSE_FIFO_FULL         (0x00000004U)
+
+
+/*
+    Register VHA_CR_WM_STATUS
+*/
+#define VHA_CR_WM_STATUS                                  (0x10008U)
+#define VHA_CR_WM_STATUS_MASKFULL                         (IMG_UINT64_C(0x0000000001000007))
+#define VHA_CR_WM_STATUS_PARITY_SHIFT                     (24U)
+#define VHA_CR_WM_STATUS_PARITY_CLRMSK                    (0XFEFFFFFFU)
+#define VHA_CR_WM_STATUS_PARITY_EN                        (0X01000000U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_SHIFT              (0U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_CLRMSK             (0XFFFFFFF8U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_IDLE               (00000000U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_PENDING            (0X00000001U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_RUN                (0X00000002U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_HALTED             (0X00000003U)
+#define VHA_CR_WM_STATUS_CURRENT_STATE_RESPONSE_FIFO_FULL (0X00000004U)
+
+
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_WDT_SHIFT              (4U)
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_WDT_CLRMSK             (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_WDT_EN                 (0X00000010U)
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_IDLE_WDT_SHIFT         (3U)
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_IDLE_WDT_CLRMSK        (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_TYPE_WM_WL_IDLE_WDT_EN            (0X00000008U)
+#define VHA_CR_WM_EVENT_TYPE_WM_SOCIF_WDT_SHIFT           (2U)
+#define VHA_CR_WM_EVENT_TYPE_WM_SOCIF_WDT_CLRMSK          (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_TYPE_WM_SOCIF_WDT_EN              (0X00000004U)
+#define VHA_CR_WM_EVENT_TYPE_LOGIC_FAULT_SHIFT            (1U)
+#define VHA_CR_WM_EVENT_TYPE_LOGIC_FAULT_CLRMSK           (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_TYPE_LOGIC_FAULT_EN               (0X00000002U)
+#define VHA_CR_WM_EVENT_TYPE_RESPONSE_FIFO_READY_SHIFT    (0U)
+#define VHA_CR_WM_EVENT_TYPE_RESPONSE_FIFO_READY_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_TYPE_RESPONSE_FIFO_READY_EN       (0X00000001U)
+
+
+#define VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_SHIFT          (31U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_CLRMSK         (0X7FFFFFFFU)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_EN             (0X80000000U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_WDT_SHIFT       (4U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_WDT_CLRMSK      (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_WDT_EN          (0X00000010U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_IDLE_WDT_SHIFT  (3U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_IDLE_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_WL_IDLE_WDT_EN     (0X00000008U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_SOCIF_WDT_SHIFT    (2U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_SOCIF_WDT_CLRMSK   (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_WM_SOCIF_WDT_EN       (0X00000004U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_LOGIC_FAULT_SHIFT     (1U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_LOGIC_FAULT_CLRMSK    (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_LOGIC_FAULT_EN        (0X00000002U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_SHIFT (0U)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_ENABLE
+*/
+#define VHA_CR_WM_EVENT_ENABLE                            (0x10010U)
+#define VHA_CR_WM_EVENT_ENABLE_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_WDT_SHIFT            (4U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_WDT_CLRMSK           (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_WDT_EN               (0X00000010U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_IDLE_WDT_SHIFT       (3U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_IDLE_WDT_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_WL_IDLE_WDT_EN          (0X00000008U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_SOCIF_WDT_SHIFT         (2U)
+#define VHA_CR_WM_EVENT_ENABLE_WM_SOCIF_WDT_CLRMSK        (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_ENABLE_WM_SOCIF_WDT_EN            (0X00000004U)
+#define VHA_CR_WM_EVENT_ENABLE_LOGIC_FAULT_SHIFT          (1U)
+#define VHA_CR_WM_EVENT_ENABLE_LOGIC_FAULT_CLRMSK         (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_ENABLE_LOGIC_FAULT_EN             (0X00000002U)
+#define VHA_CR_WM_EVENT_ENABLE_RESPONSE_FIFO_READY_SHIFT  (0U)
+#define VHA_CR_WM_EVENT_ENABLE_RESPONSE_FIFO_READY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_ENABLE_RESPONSE_FIFO_READY_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_STATUS
+*/
+#define VHA_CR_WM_EVENT_STATUS                            (0x10018U)
+#define VHA_CR_WM_EVENT_STATUS_MASKFULL                   (IMG_UINT64_C(0x000000008000001F))
+#define VHA_CR_WM_EVENT_STATUS_PARITY_SHIFT               (31U)
+#define VHA_CR_WM_EVENT_STATUS_PARITY_CLRMSK              (0X7FFFFFFFU)
+#define VHA_CR_WM_EVENT_STATUS_PARITY_EN                  (0X80000000U)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_WDT_SHIFT            (4U)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_WDT_CLRMSK           (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_WDT_EN               (0X00000010U)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_IDLE_WDT_SHIFT       (3U)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_IDLE_WDT_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_STATUS_WM_WL_IDLE_WDT_EN          (0X00000008U)
+#define VHA_CR_WM_EVENT_STATUS_WM_SOCIF_WDT_SHIFT         (2U)
+#define VHA_CR_WM_EVENT_STATUS_WM_SOCIF_WDT_CLRMSK        (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_STATUS_WM_SOCIF_WDT_EN            (0X00000004U)
+#define VHA_CR_WM_EVENT_STATUS_LOGIC_FAULT_SHIFT          (1U)
+#define VHA_CR_WM_EVENT_STATUS_LOGIC_FAULT_CLRMSK         (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_STATUS_LOGIC_FAULT_EN             (0X00000002U)
+#define VHA_CR_WM_EVENT_STATUS_RESPONSE_FIFO_READY_SHIFT  (0U)
+#define VHA_CR_WM_EVENT_STATUS_RESPONSE_FIFO_READY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_STATUS_RESPONSE_FIFO_READY_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_STATUS_DISABLE
+*/
+#define VHA_CR_WM_EVENT_STATUS_DISABLE                    (0x10028U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_MASKFULL           (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_WDT_SHIFT    (4U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_WDT_CLRMSK   (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_WDT_EN       (0X00000010U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_IDLE_WDT_SHIFT (3U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_IDLE_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_WL_IDLE_WDT_EN  (0X00000008U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_SOCIF_WDT_SHIFT (2U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_SOCIF_WDT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_WM_SOCIF_WDT_EN    (0X00000004U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_LOGIC_FAULT_SHIFT  (1U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_LOGIC_FAULT_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_LOGIC_FAULT_EN     (0X00000002U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_RESPONSE_FIFO_READY_SHIFT (0U)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_RESPONSE_FIFO_READY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_STATUS_DISABLE_RESPONSE_FIFO_READY_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_CLEAR
+*/
+#define VHA_CR_WM_EVENT_CLEAR                             (0x10020U)
+#define VHA_CR_WM_EVENT_CLEAR_MASKFULL                    (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_WDT_SHIFT             (4U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_WDT_CLRMSK            (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_WDT_EN                (0X00000010U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_IDLE_WDT_SHIFT        (3U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_IDLE_WDT_CLRMSK       (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_WL_IDLE_WDT_EN           (0X00000008U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_SOCIF_WDT_SHIFT          (2U)
+#define VHA_CR_WM_EVENT_CLEAR_WM_SOCIF_WDT_CLRMSK         (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_CLEAR_WM_SOCIF_WDT_EN             (0X00000004U)
+#define VHA_CR_WM_EVENT_CLEAR_LOGIC_FAULT_SHIFT           (1U)
+#define VHA_CR_WM_EVENT_CLEAR_LOGIC_FAULT_CLRMSK          (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_CLEAR_LOGIC_FAULT_EN              (0X00000002U)
+#define VHA_CR_WM_EVENT_CLEAR_RESPONSE_FIFO_READY_SHIFT   (0U)
+#define VHA_CR_WM_EVENT_CLEAR_RESPONSE_FIFO_READY_CLRMSK  (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_CLEAR_RESPONSE_FIFO_READY_EN      (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_EVENT_INJECT
+*/
+#define VHA_CR_WM_EVENT_INJECT                            (0x10030U)
+#define VHA_CR_WM_EVENT_INJECT_MASKFULL                   (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_WDT_SHIFT            (4U)
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_WDT_CLRMSK           (0XFFFFFFEFU)
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_WDT_EN               (0X00000010U)
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_IDLE_WDT_SHIFT       (3U)
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_IDLE_WDT_CLRMSK      (0XFFFFFFF7U)
+#define VHA_CR_WM_EVENT_INJECT_WM_WL_IDLE_WDT_EN          (0X00000008U)
+#define VHA_CR_WM_EVENT_INJECT_WM_SOCIF_WDT_SHIFT         (2U)
+#define VHA_CR_WM_EVENT_INJECT_WM_SOCIF_WDT_CLRMSK        (0XFFFFFFFBU)
+#define VHA_CR_WM_EVENT_INJECT_WM_SOCIF_WDT_EN            (0X00000004U)
+#define VHA_CR_WM_EVENT_INJECT_LOGIC_FAULT_SHIFT          (1U)
+#define VHA_CR_WM_EVENT_INJECT_LOGIC_FAULT_CLRMSK         (0XFFFFFFFDU)
+#define VHA_CR_WM_EVENT_INJECT_LOGIC_FAULT_EN             (0X00000002U)
+#define VHA_CR_WM_EVENT_INJECT_RESPONSE_FIFO_READY_SHIFT  (0U)
+#define VHA_CR_WM_EVENT_INJECT_RESPONSE_FIFO_READY_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_WM_EVENT_INJECT_RESPONSE_FIFO_READY_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_WL_ID
+*/
+#define VHA_CR_WM_WL_ID                                   (0x10040U)
+#define VHA_CR_WM_WL_ID_MASKFULL                          (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_WM_WL_ID_WL_ID_SHIFT                       (0U)
+#define VHA_CR_WM_WL_ID_WL_ID_CLRMSK                      (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_WM_DEBUG_CONTROL
+*/
+#define VHA_CR_WM_DEBUG_CONTROL                           (0x10048U)
+#define VHA_CR_WM_DEBUG_CONTROL_MASKFULL                  (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_WM_DEBUG_CONTROL_BW_ENABLE_SHIFT           (1U)
+#define VHA_CR_WM_DEBUG_CONTROL_BW_ENABLE_CLRMSK          (0XFFFFFFFDU)
+#define VHA_CR_WM_DEBUG_CONTROL_BW_ENABLE_EN              (0X00000002U)
+#define VHA_CR_WM_DEBUG_CONTROL_PERF_ENABLE_SHIFT         (0U)
+#define VHA_CR_WM_DEBUG_CONTROL_PERF_ENABLE_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_WM_DEBUG_CONTROL_PERF_ENABLE_EN            (0X00000001U)
+
+
+/*
+WM_WL_WDT is Disabled */
+#define VHA_CR_WM_WL_WDT_CTRL_WL_WDT_CTRL_NONE            (0x00000000U)
+/*
+WM_WL_WDT is Cleared when WM kicks a Workload */
+#define VHA_CR_WM_WL_WDT_CTRL_WL_WDT_CTRL_KICK_WL         (0x00000001U)
+
+
+/*
+    Register VHA_CR_WM_WL_WDT_CTRL
+*/
+#define VHA_CR_WM_WL_WDT_CTRL                             (0x10100U)
+#define VHA_CR_WM_WL_WDT_CTRL_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_WM_WL_WDT_CTRL_MODE_SHIFT                  (0U)
+#define VHA_CR_WM_WL_WDT_CTRL_MODE_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_WM_WL_WDT_CTRL_MODE_NONE                   (00000000U)
+#define VHA_CR_WM_WL_WDT_CTRL_MODE_KICK_WL                (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_WL_WDT_COMPAREMATCH
+*/
+#define VHA_CR_WM_WL_WDT_COMPAREMATCH                     (0x10108U)
+#define VHA_CR_WM_WL_WDT_COMPAREMATCH_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_WL_WDT_COMPAREMATCH_VALUE_SHIFT         (0U)
+#define VHA_CR_WM_WL_WDT_COMPAREMATCH_VALUE_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_WM_WL_WDT_TIMER
+*/
+#define VHA_CR_WM_WL_WDT_TIMER                            (0x10110U)
+#define VHA_CR_WM_WL_WDT_TIMER_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_WL_WDT_TIMER_VALUE_SHIFT                (0U)
+#define VHA_CR_WM_WL_WDT_TIMER_VALUE_CLRMSK               (00000000U)
+
+
+/*
+WL_IDLE_WDT is Disabled */
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_WL_IDLE_WDT_CTRL_NONE  (0x00000000U)
+/*
+WL_IDLE_WDT is Enabled  */
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_WL_IDLE_WDT_CTRL_ENABLED (0x00000001U)
+
+
+/*
+    Register VHA_CR_WM_WL_IDLE_WDT_CTRL
+*/
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL                        (0x10118U)
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_MODE_SHIFT             (0U)
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_MODE_CLRMSK            (0XFFFFFFFEU)
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_MODE_NONE              (00000000U)
+#define VHA_CR_WM_WL_IDLE_WDT_CTRL_MODE_ENABLED           (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_WL_IDLE_WDT_COMPAREMATCH
+*/
+#define VHA_CR_WM_WL_IDLE_WDT_COMPAREMATCH                (0x10120U)
+#define VHA_CR_WM_WL_IDLE_WDT_COMPAREMATCH_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_WL_IDLE_WDT_COMPAREMATCH_VALUE_SHIFT    (0U)
+#define VHA_CR_WM_WL_IDLE_WDT_COMPAREMATCH_VALUE_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_WL_IDLE_WDT_TIMER
+*/
+#define VHA_CR_WM_WL_IDLE_WDT_TIMER                       (0x10128U)
+#define VHA_CR_WM_WL_IDLE_WDT_TIMER_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_WL_IDLE_WDT_TIMER_VALUE_SHIFT           (0U)
+#define VHA_CR_WM_WL_IDLE_WDT_TIMER_VALUE_CLRMSK          (00000000U)
+
+
+/*
+WM_SOCIF_WDT is Disabled */
+#define VHA_CR_WM_SOCIF_WDT_CTRL_WM_SOCIF_WDT_CTRL_NONE   (0x00000000U)
+/*
+WM_SOFIC_WDT is Enabled  */
+#define VHA_CR_WM_SOCIF_WDT_CTRL_WM_SOCIF_WDT_CTRL_ENABLED (0x00000001U)
+
+
+/*
+    Register VHA_CR_WM_SOCIF_WDT_CTRL
+*/
+#define VHA_CR_WM_SOCIF_WDT_CTRL                          (0x10130U)
+#define VHA_CR_WM_SOCIF_WDT_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_WM_SOCIF_WDT_CTRL_MODE_SHIFT               (0U)
+#define VHA_CR_WM_SOCIF_WDT_CTRL_MODE_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_WM_SOCIF_WDT_CTRL_MODE_NONE                (00000000U)
+#define VHA_CR_WM_SOCIF_WDT_CTRL_MODE_ENABLED             (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_SOCIF_WDT_COMPAREMATCH
+*/
+#define VHA_CR_WM_SOCIF_WDT_COMPAREMATCH                  (0x10138U)
+#define VHA_CR_WM_SOCIF_WDT_COMPAREMATCH_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_SOCIF_WDT_COMPAREMATCH_VALUE_SHIFT      (0U)
+#define VHA_CR_WM_SOCIF_WDT_COMPAREMATCH_VALUE_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_WM_SOCIF_WDT_TIMER
+*/
+#define VHA_CR_WM_SOCIF_WDT_TIMER                         (0x10140U)
+#define VHA_CR_WM_SOCIF_WDT_TIMER_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_SOCIF_WDT_TIMER_VALUE_SHIFT             (0U)
+#define VHA_CR_WM_SOCIF_WDT_TIMER_VALUE_CLRMSK            (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_READ
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_READ                      (0x101F8U)
+#define VHA_CR_WM_RESPONSE_FIFO_READ_MASKFULL             (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_SHIFT      (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_CLRMSK     (0XFFFFFFFEU)
+#define VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_EN         (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_ID
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_ID                     (0x10200U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_ID_MASKFULL            (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_ID_WL_ID_SHIFT         (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_ID_WL_ID_CLRMSK        (0XFFFF0000U)
+
+
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_MASK (0x0000000FU)
+/*
+Workload was not kicked because an interrupt was raised from a core before start, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CORE_IRQ_BEFORE_KICK (0x00000000U)
+/*
+Error while read-back CORE_MASK for multi-cast kick*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_INDIRECT_MASK_SET_ERROR (0x00000001U)
+/*
+Error while read-back CORE_MASK for single-core checks, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_KICK_CORE_ACCESS_ERROR (0x00000002U)
+/*
+Error while checking VHA_CR_OS0_CNN_CONTROL.START, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CNN_CONTROL_START_HIGH (0x00000003U)
+/*
+Error while checking VHA_CR_OS0_CNN_STATUS, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CNN_STATUS_ERROR (0x00000004U)
+/*
+Error while read-back CORE_MASK for single-core checks, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_INT_CORE_ACCESS_ERROR (0x00000005U)
+/*
+Error while checking VHA_CR_CORE_EVENT_WM_STATUS before clear, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CORE_EVENT_ERROR (0x00000006U)
+/*
+Error while checking VHA_CR_CORE_EVENT_WM_STATUS after clear, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CORE_EVENT_NOT_CLEARED (0x00000007U)
+/*
+Error while checking IRQ signal after clear, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_CORE_EVENT_IRQ_HIGH (0x00000008U)
+/*
+Detected an error in the interconnect parity or on lockstep cores, FAILED_CORE_IDX indicates the core associated with the error.*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_INTERCONNECT_ERROR (0x00000009U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_STATUS
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS                 (0x10208U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL        (IMG_UINT64_C(0x00000000800007F3))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_SHIFT    (31U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_CLRMSK   (0X7FFFFFFFU)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN       (0X80000000U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_FAILED_CORE_IDX_SHIFT (8U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_FAILED_CORE_IDX_CLRMSK (0XFFFFF8FFU)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_SHIFT (4U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CORE_IRQ_BEFORE_KICK (00000000U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_INDIRECT_MASK_SET_ERROR (0X00000010U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_KICK_CORE_ACCESS_ERROR (0X00000020U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CNN_CONTROL_START_HIGH (0X00000030U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CNN_STATUS_ERROR (0X00000040U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_INT_CORE_ACCESS_ERROR (0X00000050U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CORE_EVENT_ERROR (0X00000060U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CORE_EVENT_NOT_CLEARED (0X00000070U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_CORE_EVENT_IRQ_HIGH (0X00000080U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_ERROR_CODE_INTERCONNECT_ERROR (0X00000090U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_FAILURE_SHIFT (1U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_FAILURE_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_FAILURE_EN   (0X00000002U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_SHIFT   (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_CLRMSK  (0XFFFFFFFEU)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN      (0X00000001U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_PERF
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_PERF                   (0x10210U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_PERF_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_PERF_CYCLES_SHIFT      (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_PERF_CYCLES_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD             (0x10218U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_BW_SHIFT    (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_BW_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR             (0x10220U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_BW_SHIFT    (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_BW_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_MWR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_MWR            (0x10228U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_MWR_MASKFULL   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_MWR_BW_SHIFT   (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_MWR_BW_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD             (0x10230U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_BW_SHIFT    (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_BW_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR             (0x10238U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_BW_SHIFT    (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_BW_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_MWR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_MWR            (0x10240U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_MWR_MASKFULL   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_MWR_BW_SHIFT   (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_MWR_BW_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD              (0x10248U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_BW_SHIFT     (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_BW_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR              (0x10250U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_MASKFULL     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_BW_SHIFT     (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_BW_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_MWR
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_MWR             (0x10258U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_MWR_MASKFULL    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_MWR_BW_SHIFT    (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_MWR_BW_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_WORD        (0x10260U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_RD_WORD_BW_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_WORD        (0x10268U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_LOCM_WR_WORD_BW_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_WORD        (0x10270U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_RD_WORD_BW_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_WORD        (0x10278U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_SOCM_WR_WORD_BW_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_WORD         (0x10280U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_RD_WORD_BW_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_WORD
+*/
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_WORD         (0x10288U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_WORD_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_WORD_BW_SHIFT (0U)
+#define VHA_CR_WM_RESPONSE_FIFO_WL_BW_DDR_WR_WORD_BW_CLRMSK (00000000U)
+
+
+
+
+
+
+
+
+#define VHA_CR_MH_CONTROL_MAX_BURST_LENGTH_MASK           (0x00000003U)
+
+
+/*
+    Register VHA_CR_CORE_MASK
+*/
+#define VHA_CR_CORE_MASK                                  (0x20000U)
+#define VHA_CR_CORE_MASK_MASKFULL                         (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_CORE_MASK_CORE_MASK_SHIFT                  (0U)
+#define VHA_CR_CORE_MASK_CORE_MASK_CLRMSK                 (0XFFFFFF00U)
+
+
+#define VHA_CR_CLK_CTRL0_MODE_MASK                        (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_CLK_CTRL0_MODE_OFF                         (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_CLK_CTRL0_MODE_ON                          (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_CLK_CTRL0_MODE_AUTO                        (0x00000002U)
+
+
+/*
+    Register VHA_CR_CLK_CTRL0
+*/
+#define VHA_CR_CLK_CTRL0                                  (0x20100U)
+#define VHA_CR_CLK_CTRL0_MASKFULL                         (IMG_UINT64_C(0xF3FFFFFF3F00FF33))
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_SHIFT              (62U)
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_CLRMSK             (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_OFF                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_ON                 (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CORE_XBAR_AUTO               (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_SHIFT                    (60U)
+#define VHA_CR_CLK_CTRL0_CNN_MMM_CLRMSK                   (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_MMM_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_ON                       (IMG_UINT64_C(0x1000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_MMM_AUTO                     (IMG_UINT64_C(0x2000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_SHIFT                    (56U)
+#define VHA_CR_CLK_CTRL0_CNN_EWO_CLRMSK                   (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_EWO_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_ON                       (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_AUTO                     (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_SHIFT                   (54U)
+#define VHA_CR_CLK_CTRL0_CNN_PACK_CLRMSK                  (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_PACK_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_ON                      (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_AUTO                    (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_SHIFT                    (52U)
+#define VHA_CR_CLK_CTRL0_CNN_OIN_CLRMSK                   (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OIN_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_ON                       (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_AUTO                     (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_SHIFT                   (50U)
+#define VHA_CR_CLK_CTRL0_CNN_POOL_CLRMSK                  (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_POOL_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_ON                      (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_AUTO                    (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_SHIFT                     (48U)
+#define VHA_CR_CLK_CTRL0_CNN_SB_CLRMSK                    (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_SB_OFF                       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_ON                        (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_AUTO                      (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_SHIFT                   (46U)
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_CLRMSK                  (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_ON                      (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_AUTO                    (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_SHIFT                   (44U)
+#define VHA_CR_CLK_CTRL0_CNN_NORM_CLRMSK                  (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_NORM_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_ON                      (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_AUTO                    (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_SHIFT                    (42U)
+#define VHA_CR_CLK_CTRL0_CNN_ACT_CLRMSK                   (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACT_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_ON                       (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_AUTO                     (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_SHIFT                  (40U)
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_CLRMSK                 (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_ON                     (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_AUTO                   (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_SHIFT                    (38U)
+#define VHA_CR_CLK_CTRL0_CNN_CNV_CLRMSK                   (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CNV_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_ON                       (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_AUTO                     (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_SHIFT                   (36U)
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_ON                      (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_AUTO                    (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_SHIFT                   (34U)
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_ON                      (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_AUTO                    (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_SHIFT                    (32U)
+#define VHA_CR_CLK_CTRL0_CNN_CMD_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CMD_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_ON                       (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_AUTO                     (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SHIFT                        (28U)
+#define VHA_CR_CLK_CTRL0_CNN_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ON                           (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_CTRL0_CNN_AUTO                         (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_SHIFT                  (26U)
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_ON                     (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_A_AUTO                   (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_SHIFT                  (24U)
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_ON                     (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_CTRL0_CNN_TRS_B_AUTO                   (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_CTRL0_MEMBUS_RESET_SHIFT               (14U)
+#define VHA_CR_CLK_CTRL0_MEMBUS_RESET_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define VHA_CR_CLK_CTRL0_MEMBUS_RESET_OFF                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_MEMBUS_RESET_ON                  (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_CLK_CTRL0_MEMBUS_RESET_AUTO                (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_CLK_CTRL0_BWM_SHIFT                        (12U)
+#define VHA_CR_CLK_CTRL0_BWM_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define VHA_CR_CLK_CTRL0_BWM_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_BWM_ON                           (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_CLK_CTRL0_BWM_AUTO                         (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_CLK_CTRL0_LOCM_SHIFT                       (10U)
+#define VHA_CR_CLK_CTRL0_LOCM_CLRMSK                      (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_CLK_CTRL0_LOCM_OFF                         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_LOCM_ON                          (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_CLK_CTRL0_LOCM_AUTO                        (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_CLK_CTRL0_NOC_SHIFT                        (8U)
+#define VHA_CR_CLK_CTRL0_NOC_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_CLK_CTRL0_NOC_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_NOC_ON                           (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_CTRL0_NOC_AUTO                         (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_CLK_CTRL0_ARB_SHIFT                        (4U)
+#define VHA_CR_CLK_CTRL0_ARB_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_CLK_CTRL0_ARB_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_ARB_ON                           (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_CLK_CTRL0_ARB_AUTO                         (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_CLK_CTRL0_BIF_SHIFT                        (0U)
+#define VHA_CR_CLK_CTRL0_BIF_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_CLK_CTRL0_BIF_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_BIF_ON                           (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_CLK_CTRL0_BIF_AUTO                         (IMG_UINT64_C(0x0000000000000002))  
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_CLK_STATUS0_MODE_GATED                     (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_CLK_STATUS0_MODE_RUNNING                   (0x00000001U)
+
+
+/*
+    Register VHA_CR_CLK_STATUS0
+*/
+#define VHA_CR_CLK_STATUS0                                (0x20108U)
+#define VHA_CR_CLK_STATUS0_MASKFULL                       (IMG_UINT64_C(0x00000037FFDC1F04))
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_SHIFT            (37U)
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_CLRMSK           (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_GATED            (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CORE_XBAR_RUNNING          (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_MMM_SHIFT                  (36U)
+#define VHA_CR_CLK_STATUS0_CNN_MMM_CLRMSK                 (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_MMM_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_MMM_RUNNING                (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_SHIFT                  (34U)
+#define VHA_CR_CLK_STATUS0_CNN_EWO_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_EWO_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_RUNNING                (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_SHIFT                 (33U)
+#define VHA_CR_CLK_STATUS0_CNN_PACK_CLRMSK                (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_PACK_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_RUNNING               (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_SHIFT                  (32U)
+#define VHA_CR_CLK_STATUS0_CNN_OIN_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_OIN_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_RUNNING                (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_SHIFT                 (31U)
+#define VHA_CR_CLK_STATUS0_CNN_POOL_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_POOL_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_RUNNING               (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_SHIFT                   (30U)
+#define VHA_CR_CLK_STATUS0_CNN_SB_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_SB_GATED                   (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_RUNNING                 (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_SHIFT                 (29U)
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_RUNNING               (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_SHIFT                 (28U)
+#define VHA_CR_CLK_STATUS0_CNN_NORM_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_NORM_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_RUNNING               (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_SHIFT                  (27U)
+#define VHA_CR_CLK_STATUS0_CNN_ACT_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACT_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_RUNNING                (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_SHIFT                (26U)
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_RUNNING              (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_SHIFT                  (25U)
+#define VHA_CR_CLK_STATUS0_CNN_CNV_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CNV_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_RUNNING                (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_SHIFT                 (24U)
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_RUNNING               (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_SHIFT                 (23U)
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_RUNNING               (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_SHIFT                  (22U)
+#define VHA_CR_CLK_STATUS0_CNN_CMD_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CMD_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_RUNNING                (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_CLK_STATUS0_CNN_SHIFT                      (20U)
+#define VHA_CR_CLK_STATUS0_CNN_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_RUNNING                    (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_SHIFT                (19U)
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_A_RUNNING              (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_SHIFT                (18U)
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_TRS_B_RUNNING              (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_CLK_STATUS0_MEMBUS_RESET_SHIFT             (12U)
+#define VHA_CR_CLK_STATUS0_MEMBUS_RESET_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_CLK_STATUS0_MEMBUS_RESET_GATED             (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_MEMBUS_RESET_RUNNING           (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_CLK_STATUS0_BWM_SHIFT                      (11U)
+#define VHA_CR_CLK_STATUS0_BWM_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_CLK_STATUS0_BWM_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_BWM_RUNNING                    (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_CLK_STATUS0_LOCM_SHIFT                     (10U)
+#define VHA_CR_CLK_STATUS0_LOCM_CLRMSK                    (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_CLK_STATUS0_LOCM_GATED                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_LOCM_RUNNING                   (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_CLK_STATUS0_NOC_SHIFT                      (9U)
+#define VHA_CR_CLK_STATUS0_NOC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_CLK_STATUS0_NOC_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_NOC_RUNNING                    (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_CLK_STATUS0_ARB_SHIFT                      (8U)
+#define VHA_CR_CLK_STATUS0_ARB_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CLK_STATUS0_ARB_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_ARB_RUNNING                    (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_STATUS0_BIF_SHIFT                      (2U)
+#define VHA_CR_CLK_STATUS0_BIF_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CLK_STATUS0_BIF_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_BIF_RUNNING                    (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_CORE_SOFT_RESET
+*/
+#define VHA_CR_CORE_SOFT_RESET                            (0x20110U)
+#define VHA_CR_CORE_SOFT_RESET_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_CORE_SOFT_RESET_CORE_RESET_SHIFT           (0U)
+#define VHA_CR_CORE_SOFT_RESET_CORE_RESET_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_CORE_SOFT_RESET_CORE_RESET_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_MEMBUS_RESET_CTRL
+*/
+#define VHA_CR_MEMBUS_RESET_CTRL                          (0x20118U)
+#define VHA_CR_MEMBUS_RESET_CTRL_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_MEMBUS_RESET_CTRL_SOFT_RESET_CYCLES_SHIFT  (0U)
+#define VHA_CR_MEMBUS_RESET_CTRL_SOFT_RESET_CYCLES_CLRMSK (00000000U)
+
+
+#define VHA_CR_CORE_EVENT_TYPE_LOGIC_ERROR_SHIFT          (30U)
+#define VHA_CR_CORE_EVENT_TYPE_LOGIC_ERROR_CLRMSK         (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_TYPE_LOGIC_ERROR_EN             (0X40000000U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_CORRECTION_SHIFT       (29U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_CORRECTION_CLRMSK      (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_CORRECTION_EN          (0X20000000U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_DETECTION_SHIFT        (28U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_DETECTION_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_DETECTION_EN           (0X10000000U)
+#define VHA_CR_CORE_EVENT_TYPE_LOCM_SCRUB_DONE_SHIFT      (16U)
+#define VHA_CR_CORE_EVENT_TYPE_LOCM_SCRUB_DONE_CLRMSK     (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_TYPE_LOCM_SCRUB_DONE_EN         (0X00010000U)
+#define VHA_CR_CORE_EVENT_TYPE_DMR_COMPLETE_SHIFT         (11U)
+#define VHA_CR_CORE_EVENT_TYPE_DMR_COMPLETE_CLRMSK        (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_TYPE_DMR_COMPLETE_EN            (0X00000800U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_SYNC_ERROR_SHIFT      (10U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_SYNC_ERROR_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_SYNC_ERROR_EN         (0X00000400U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_WDT_SHIFT             (9U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_WDT_CLRMSK            (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_WDT_EN                (0X00000200U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_MEM_WDT_SHIFT         (8U)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_MEM_WDT_CLRMSK        (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_TYPE_CORE_MEM_WDT_EN            (0X00000100U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_INIT_DONE_SHIFT        (3U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_INIT_DONE_CLRMSK       (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_TYPE_RAM_INIT_DONE_EN           (0X00000008U)
+#define VHA_CR_CORE_EVENT_TYPE_MEMBUS_RESET_DONE_SHIFT    (2U)
+#define VHA_CR_CORE_EVENT_TYPE_MEMBUS_RESET_DONE_CLRMSK   (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_TYPE_MEMBUS_RESET_DONE_EN       (0X00000004U)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_ERROR_SHIFT            (1U)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_ERROR_CLRMSK           (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_ERROR_EN               (0X00000002U)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_COMPLETE_SHIFT         (0U)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_COMPLETE_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_TYPE_CNN_COMPLETE_EN            (0X00000001U)
+
+
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_PARITY_SHIFT        (31U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_PARITY_CLRMSK       (0X7FFFFFFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_PARITY_EN           (0X80000000U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOGIC_ERROR_SHIFT   (30U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOGIC_ERROR_CLRMSK  (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOGIC_ERROR_EN      (0X40000000U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_CORRECTION_EN   (0X20000000U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_DETECTION_EN    (0X10000000U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_LOCM_SCRUB_DONE_EN  (0X00010000U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_DMR_COMPLETE_SHIFT  (11U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_DMR_COMPLETE_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_DMR_COMPLETE_EN     (0X00000800U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_SYNC_ERROR_EN  (0X00000400U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_WDT_SHIFT      (9U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_WDT_CLRMSK     (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_WDT_EN         (0X00000200U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_MEM_WDT_SHIFT  (8U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_MEM_WDT_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CORE_MEM_WDT_EN     (0X00000100U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_RAM_INIT_DONE_EN    (0X00000008U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_ERROR_SHIFT     (1U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_ERROR_CLRMSK    (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_ERROR_EN        (0X00000002U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_COMPLETE_SHIFT  (0U)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_STATUS_TYPE_CNN_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_ENABLE
+*/
+#define VHA_CR_CORE_EVENT_HOST_ENABLE                     (0x20120U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_MASKFULL            (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOGIC_ERROR_SHIFT   (30U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOGIC_ERROR_CLRMSK  (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOGIC_ERROR_EN      (0X40000000U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_CORRECTION_EN   (0X20000000U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_DETECTION_EN    (0X10000000U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_LOCM_SCRUB_DONE_EN  (0X00010000U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_DMR_COMPLETE_SHIFT  (11U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_DMR_COMPLETE_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_DMR_COMPLETE_EN     (0X00000800U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_SYNC_ERROR_EN  (0X00000400U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_WDT_SHIFT      (9U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_WDT_CLRMSK     (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_WDT_EN         (0X00000200U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_MEM_WDT_SHIFT  (8U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_MEM_WDT_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CORE_MEM_WDT_EN     (0X00000100U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_RAM_INIT_DONE_EN    (0X00000008U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_ERROR_SHIFT     (1U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_ERROR_CLRMSK    (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_ERROR_EN        (0X00000002U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_COMPLETE_SHIFT  (0U)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_HOST_ENABLE_CNN_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_STATUS
+*/
+#define VHA_CR_CORE_EVENT_HOST_STATUS                     (0x20128U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_MASKFULL            (IMG_UINT64_C(0x00000000F0010F0F))
+#define VHA_CR_CORE_EVENT_HOST_STATUS_PARITY_SHIFT        (31U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_PARITY_CLRMSK       (0X7FFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_PARITY_EN           (0X80000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOGIC_ERROR_SHIFT   (30U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOGIC_ERROR_CLRMSK  (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOGIC_ERROR_EN      (0X40000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_CORRECTION_EN   (0X20000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_DETECTION_EN    (0X10000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_LOCM_SCRUB_DONE_EN  (0X00010000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DMR_COMPLETE_SHIFT  (11U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DMR_COMPLETE_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DMR_COMPLETE_EN     (0X00000800U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_SYNC_ERROR_EN  (0X00000400U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_WDT_SHIFT      (9U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_WDT_CLRMSK     (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_WDT_EN         (0X00000200U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_MEM_WDT_SHIFT  (8U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_MEM_WDT_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CORE_MEM_WDT_EN     (0X00000100U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_RAM_INIT_DONE_EN    (0X00000008U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_ERROR_SHIFT     (1U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_ERROR_CLRMSK    (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_ERROR_EN        (0X00000002U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_COMPLETE_SHIFT  (0U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_CNN_COMPLETE_EN     (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE
+*/
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE             (0x20178U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_MASKFULL    (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_CORRECTION_EN (0X20000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_DETECTION_EN (0X10000000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_LOCM_SCRUB_DONE_EN (0X00010000U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_DMR_COMPLETE_SHIFT (11U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_DMR_COMPLETE_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_DMR_COMPLETE_EN (0X00000800U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_SYNC_ERROR_EN (0X00000400U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_WDT_SHIFT (9U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_WDT_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_WDT_EN (0X00000200U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_MEM_WDT_SHIFT (8U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_MEM_WDT_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CORE_MEM_WDT_EN (0X00000100U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_RAM_INIT_DONE_EN (0X00000008U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_ERROR_SHIFT (1U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_ERROR_EN (0X00000002U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_COMPLETE_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_HOST_STATUS_DISABLE_CNN_COMPLETE_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_CLEAR
+*/
+#define VHA_CR_CORE_EVENT_HOST_CLEAR                      (0x20130U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_MASKFULL             (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOGIC_ERROR_SHIFT    (30U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOGIC_ERROR_CLRMSK   (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOGIC_ERROR_EN       (0X40000000U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_CORRECTION_EN    (0X20000000U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_DETECTION_SHIFT  (28U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_DETECTION_EN     (0X10000000U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_LOCM_SCRUB_DONE_EN   (0X00010000U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_DMR_COMPLETE_SHIFT   (11U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_DMR_COMPLETE_CLRMSK  (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_DMR_COMPLETE_EN      (0X00000800U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_SYNC_ERROR_EN   (0X00000400U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_WDT_SHIFT       (9U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_WDT_CLRMSK      (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_WDT_EN          (0X00000200U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_MEM_WDT_SHIFT   (8U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_MEM_WDT_CLRMSK  (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CORE_MEM_WDT_EN      (0X00000100U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_INIT_DONE_SHIFT  (3U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_RAM_INIT_DONE_EN     (0X00000008U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_ERROR_SHIFT      (1U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_ERROR_CLRMSK     (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_ERROR_EN         (0X00000002U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_COMPLETE_SHIFT   (0U)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_COMPLETE_CLRMSK  (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_HOST_CLEAR_CNN_COMPLETE_EN      (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_ENABLE
+*/
+#define VHA_CR_CORE_EVENT_WM_ENABLE                       (0x20140U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_MASKFULL              (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOGIC_ERROR_SHIFT     (30U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOGIC_ERROR_CLRMSK    (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOGIC_ERROR_EN        (0X40000000U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_CORRECTION_SHIFT  (29U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_CORRECTION_EN     (0X20000000U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_DETECTION_SHIFT   (28U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_DETECTION_CLRMSK  (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_DETECTION_EN      (0X10000000U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_LOCM_SCRUB_DONE_EN    (0X00010000U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_DMR_COMPLETE_SHIFT    (11U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_DMR_COMPLETE_CLRMSK   (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_DMR_COMPLETE_EN       (0X00000800U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_SYNC_ERROR_EN    (0X00000400U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_WDT_SHIFT        (9U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_WDT_CLRMSK       (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_WDT_EN           (0X00000200U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_MEM_WDT_SHIFT    (8U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_MEM_WDT_CLRMSK   (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CORE_MEM_WDT_EN       (0X00000100U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_INIT_DONE_SHIFT   (3U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_INIT_DONE_CLRMSK  (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_RAM_INIT_DONE_EN      (0X00000008U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_MEMBUS_RESET_DONE_EN  (0X00000004U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_ERROR_SHIFT       (1U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_ERROR_CLRMSK      (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_ERROR_EN          (0X00000002U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_COMPLETE_SHIFT    (0U)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_WM_ENABLE_CNN_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_STATUS
+*/
+#define VHA_CR_CORE_EVENT_WM_STATUS                       (0x20148U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_MASKFULL              (IMG_UINT64_C(0x00000000F0010F0F))
+#define VHA_CR_CORE_EVENT_WM_STATUS_PARITY_SHIFT          (31U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_PARITY_CLRMSK         (0X7FFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_PARITY_EN             (0X80000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOGIC_ERROR_SHIFT     (30U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOGIC_ERROR_CLRMSK    (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOGIC_ERROR_EN        (0X40000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_CORRECTION_SHIFT  (29U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_CORRECTION_EN     (0X20000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_DETECTION_SHIFT   (28U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_DETECTION_CLRMSK  (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_DETECTION_EN      (0X10000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_LOCM_SCRUB_DONE_EN    (0X00010000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DMR_COMPLETE_SHIFT    (11U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DMR_COMPLETE_CLRMSK   (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DMR_COMPLETE_EN       (0X00000800U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_SYNC_ERROR_EN    (0X00000400U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_WDT_SHIFT        (9U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_WDT_CLRMSK       (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_WDT_EN           (0X00000200U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_MEM_WDT_SHIFT    (8U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_MEM_WDT_CLRMSK   (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CORE_MEM_WDT_EN       (0X00000100U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_INIT_DONE_SHIFT   (3U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_INIT_DONE_CLRMSK  (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_RAM_INIT_DONE_EN      (0X00000008U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_MEMBUS_RESET_DONE_EN  (0X00000004U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_ERROR_SHIFT       (1U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_ERROR_CLRMSK      (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_ERROR_EN          (0X00000002U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_COMPLETE_SHIFT    (0U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_COMPLETE_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_CNN_COMPLETE_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_STATUS_DISABLE
+*/
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE               (0x20180U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_MASKFULL      (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_SHIFT (30U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_CLRMSK (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOGIC_ERROR_EN (0X40000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_CORRECTION_SHIFT (29U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_CORRECTION_CLRMSK (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_CORRECTION_EN (0X20000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_DETECTION_SHIFT (28U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_DETECTION_CLRMSK (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_DETECTION_EN (0X10000000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOCM_SCRUB_DONE_SHIFT (16U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_LOCM_SCRUB_DONE_EN (0X00010000U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_DMR_COMPLETE_SHIFT (11U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_DMR_COMPLETE_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_DMR_COMPLETE_EN (0X00000800U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_SYNC_ERROR_SHIFT (10U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_SYNC_ERROR_EN (0X00000400U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_WDT_SHIFT (9U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_WDT_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_WDT_EN   (0X00000200U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_MEM_WDT_SHIFT (8U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_MEM_WDT_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CORE_MEM_WDT_EN (0X00000100U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_INIT_DONE_SHIFT (3U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_INIT_DONE_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_RAM_INIT_DONE_EN (0X00000008U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_MEMBUS_RESET_DONE_EN (0X00000004U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_ERROR_SHIFT (1U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_ERROR_EN  (0X00000002U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_COMPLETE_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_WM_STATUS_DISABLE_CNN_COMPLETE_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_CLEAR
+*/
+#define VHA_CR_CORE_EVENT_WM_CLEAR                        (0x20150U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_MASKFULL               (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOGIC_ERROR_SHIFT      (30U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOGIC_ERROR_CLRMSK     (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOGIC_ERROR_EN         (0X40000000U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_CORRECTION_SHIFT   (29U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_CORRECTION_CLRMSK  (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_CORRECTION_EN      (0X20000000U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_DETECTION_SHIFT    (28U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_DETECTION_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_DETECTION_EN       (0X10000000U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOCM_SCRUB_DONE_SHIFT  (16U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOCM_SCRUB_DONE_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_LOCM_SCRUB_DONE_EN     (0X00010000U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_DMR_COMPLETE_SHIFT     (11U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_DMR_COMPLETE_CLRMSK    (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_DMR_COMPLETE_EN        (0X00000800U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_SYNC_ERROR_SHIFT  (10U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_SYNC_ERROR_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_SYNC_ERROR_EN     (0X00000400U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_WDT_SHIFT         (9U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_WDT_CLRMSK        (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_WDT_EN            (0X00000200U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_MEM_WDT_SHIFT     (8U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_MEM_WDT_CLRMSK    (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CORE_MEM_WDT_EN        (0X00000100U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_INIT_DONE_SHIFT    (3U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_INIT_DONE_CLRMSK   (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_RAM_INIT_DONE_EN       (0X00000008U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_MEMBUS_RESET_DONE_SHIFT (2U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_MEMBUS_RESET_DONE_EN   (0X00000004U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_ERROR_SHIFT        (1U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_ERROR_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_ERROR_EN           (0X00000002U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_COMPLETE_SHIFT     (0U)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_COMPLETE_CLRMSK    (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_WM_CLEAR_CNN_COMPLETE_EN        (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_THRESHOLD
+*/
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD                  (0x20158U)
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_MASKFULL         (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_RAM_CORRECTION_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_RAM_CORRECTION_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_HOST_THRESHOLD_VAL
+*/
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_VAL              (0x20160U)
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_VAL_MASKFULL     (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_VAL_RAM_CORRECTION_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_HOST_THRESHOLD_VAL_RAM_CORRECTION_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_THRESHOLD
+*/
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD                    (0x20168U)
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_MASKFULL           (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_RAM_CORRECTION_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_RAM_CORRECTION_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_WM_THRESHOLD_VAL
+*/
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_VAL                (0x20170U)
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_VAL_MASKFULL       (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_VAL_RAM_CORRECTION_SHIFT (0U)
+#define VHA_CR_CORE_EVENT_WM_THRESHOLD_VAL_RAM_CORRECTION_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_CORE_EVENT_INJECT
+*/
+#define VHA_CR_CORE_EVENT_INJECT                          (0x20188U)
+#define VHA_CR_CORE_EVENT_INJECT_MASKFULL                 (IMG_UINT64_C(0x0000000070010F0F))
+#define VHA_CR_CORE_EVENT_INJECT_LOGIC_ERROR_SHIFT        (30U)
+#define VHA_CR_CORE_EVENT_INJECT_LOGIC_ERROR_CLRMSK       (0XBFFFFFFFU)
+#define VHA_CR_CORE_EVENT_INJECT_LOGIC_ERROR_EN           (0X40000000U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_CORRECTION_SHIFT     (29U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_CORRECTION_CLRMSK    (0XDFFFFFFFU)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_CORRECTION_EN        (0X20000000U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_DETECTION_SHIFT      (28U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_DETECTION_CLRMSK     (0XEFFFFFFFU)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_DETECTION_EN         (0X10000000U)
+#define VHA_CR_CORE_EVENT_INJECT_LOCM_SCRUB_DONE_SHIFT    (16U)
+#define VHA_CR_CORE_EVENT_INJECT_LOCM_SCRUB_DONE_CLRMSK   (0XFFFEFFFFU)
+#define VHA_CR_CORE_EVENT_INJECT_LOCM_SCRUB_DONE_EN       (0X00010000U)
+#define VHA_CR_CORE_EVENT_INJECT_DMR_COMPLETE_SHIFT       (11U)
+#define VHA_CR_CORE_EVENT_INJECT_DMR_COMPLETE_CLRMSK      (0XFFFFF7FFU)
+#define VHA_CR_CORE_EVENT_INJECT_DMR_COMPLETE_EN          (0X00000800U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_SYNC_ERROR_SHIFT    (10U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_SYNC_ERROR_CLRMSK   (0XFFFFFBFFU)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_SYNC_ERROR_EN       (0X00000400U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_WDT_SHIFT           (9U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_WDT_CLRMSK          (0XFFFFFDFFU)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_WDT_EN              (0X00000200U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_MEM_WDT_SHIFT       (8U)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_MEM_WDT_CLRMSK      (0XFFFFFEFFU)
+#define VHA_CR_CORE_EVENT_INJECT_CORE_MEM_WDT_EN          (0X00000100U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_INIT_DONE_SHIFT      (3U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_INIT_DONE_CLRMSK     (0XFFFFFFF7U)
+#define VHA_CR_CORE_EVENT_INJECT_RAM_INIT_DONE_EN         (0X00000008U)
+#define VHA_CR_CORE_EVENT_INJECT_MEMBUS_RESET_DONE_SHIFT  (2U)
+#define VHA_CR_CORE_EVENT_INJECT_MEMBUS_RESET_DONE_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CORE_EVENT_INJECT_MEMBUS_RESET_DONE_EN     (0X00000004U)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_ERROR_SHIFT          (1U)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_ERROR_CLRMSK         (0XFFFFFFFDU)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_ERROR_EN             (0X00000002U)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_COMPLETE_SHIFT       (0U)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_COMPLETE_CLRMSK      (0XFFFFFFFEU)
+#define VHA_CR_CORE_EVENT_INJECT_CNN_COMPLETE_EN          (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL
+*/
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL                 (0x20280U)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL_MASKFULL        (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_SHIFT (0U)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_CTRL_ERR_INJ_CTRL_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS
+*/
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS               (0x20288U)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS_MASKFULL      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_SHIFT (0U)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_CORE_VHA_FLOP_ERR_INJ_STATUS_ERR_INJ_STATUS_EN (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_TIMER
+*/
+#define VHA_CR_CNN_MEM_WDT_TIMER                          (0x20030U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_SHIFT              (0U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_CLRMSK             (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_TIMER
+*/
+#define VHA_CR_CNN_WDT_TIMER                              (0x20038U)
+#define VHA_CR_CNN_WDT_TIMER_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_WDT_TIMER_VALUE_SHIFT                  (0U)
+#define VHA_CR_CNN_WDT_TIMER_VALUE_CLRMSK                 (00000000U)
+
+
+/*
+    Register VHA_CR_RTM_CTRL
+*/
+#define VHA_CR_RTM_CTRL                                   (0x20040U)
+#define VHA_CR_RTM_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000C0FFFFF8))
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_SHIFT                  (31U)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_CLRMSK                 (0X7FFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_EN                     (0X80000000U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_SHIFT                   (30U)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_CLRMSK                  (0XBFFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_CHECK_EN                      (0X40000000U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_SHIFT                (3U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK               (0XFF000007U)
+
+
+/*
+    Register VHA_CR_RTM_DATA
+*/
+#define VHA_CR_RTM_DATA                                   (0x20048U)
+#define VHA_CR_RTM_DATA_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_RTM_DATA_RTM_DATA_SHIFT                    (0U)
+#define VHA_CR_RTM_DATA_RTM_DATA_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_BIF_OUTSTANDING_READ
+*/
+#define VHA_CR_BIF_OUTSTANDING_READ                       (0x20078U)
+#define VHA_CR_BIF_OUTSTANDING_READ_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT         (0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_PAGE_FAULT_STALL
+*/
+#define VHA_CR_BIF_PAGE_FAULT_STALL                       (0x20080U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_SHIFT          (0U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_CLRMSK         (0XFFFFFFFEU)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_EN             (0X00000001U)
+
+
+/*
+    Register VHA_CR_BIF_RTN_FIFO_WORD_COUNT
+*/
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT                    (0x20088U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_MASKFULL           (IMG_UINT64_C(0x00000000000001FF))
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_SHIFT      (0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_CLRMSK     (0XFFFFFE00U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_MH_CONTROL
+*/
+#define VHA_CR_CNN_CMD_MH_CONTROL                         (0x201A0U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x0000000000000034))
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (4U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFCFU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_SHIFT  (2U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_EN     (0X00000004U)
+
+
+/*
+    Register VHA_CR_CNN_IBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_IBUF_MH_CONTROL                        (0x201A8U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_CBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_CBUF_MH_CONTROL                        (0x201B0U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_ABUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_ABUF_MH_CONTROL                        (0x201B8U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_OUTPACK_MH_CONTROL
+*/
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL                     (0x201C0U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MASKFULL            (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_ELEMENTOPS_MH_CONTROL
+*/
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL                  (0x201C8U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MASKFULL         (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_MMM_MH_CONTROL
+*/
+#define VHA_CR_CNN_MMM_MH_CONTROL                         (0x201D0U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (3U)
+#define VHA_CR_CNN_MMM_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_TRS_A_MH_CONTROL
+*/
+#define VHA_CR_CNN_TRS_A_MH_CONTROL                       (0x201D8U)
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MASKFULL              (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_TRS_A_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_TRS_B_MH_CONTROL
+*/
+#define VHA_CR_CNN_TRS_B_MH_CONTROL                       (0x201E0U)
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MASKFULL              (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_TRS_B_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_CNN_DWPE_MH_CONTROL
+*/
+#define VHA_CR_CNN_DWPE_MH_CONTROL                        (0x201E8U)
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000018))
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_DWPE_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+
+
+/*
+    Register VHA_CR_FUSA_CONTROL
+*/
+#define VHA_CR_FUSA_CONTROL                               (0x201F0U)
+#define VHA_CR_FUSA_CONTROL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_SHIFT           (0U)
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_FUSA_CONTROL_ECC_INIT_KICK_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH                   (0x201F8U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_SHIFT         (0U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_CLRMSK        (00000000U)
+
+
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_MASK     (0x00000003U)
+/*
+WDT is Disabled */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_NONE     (0x00000000U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK     (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_CTRL
+*/
+#define VHA_CR_CNN_MEM_WDT_CTRL                           (0x20200U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MASKFULL                  (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_SHIFT                (0U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_NONE                 (00000000U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK_PASS            (0X00000001U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK                 (0X00000002U)
+
+
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_MASK             (0x00000003U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS        (0x00000001U)
+/*
+WDT is Cleared when CMD Parser starts a layer group or CMD parser is kicked*/
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_LAYER       (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_CTRL
+*/
+#define VHA_CR_CNN_WDT_CTRL                               (0x20208U)
+#define VHA_CR_CNN_WDT_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_WDT_CTRL_MODE_SHIFT                    (0U)
+#define VHA_CR_CNN_WDT_CTRL_MODE_CLRMSK                   (0XFFFFFFFCU)
+#define VHA_CR_CNN_WDT_CTRL_MODE_KICK_PASS                (0X00000001U)
+#define VHA_CR_CNN_WDT_CTRL_MODE_KICK_LAYER               (0X00000002U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_WDT_COMPAREMATCH                       (0x20210U)
+#define VHA_CR_CNN_WDT_COMPAREMATCH_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_WDT_COMPAREMATCH_REG_SHIFT             (0U)
+#define VHA_CR_CNN_WDT_COMPAREMATCH_REG_CLRMSK            (00000000U)
+
+
+/*
+    Register VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL
+*/
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL               (0x20230U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_MASKFULL      (IMG_UINT64_C(0x0000000000000004))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_SHIFT (2U)
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_VHA_CNN_CMD_SECURITY_CONTROL_FORCE_DISABLE_CRC_EN (IMG_UINT64_C(0X0000000000000004))
+
+
+/*
+    Register VHA_CR_CNN_ARB_STALL_RATIO
+*/
+#define VHA_CR_CNN_ARB_STALL_RATIO                        (0x20240U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_MASKFULL               (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_SHIFT           (32U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_CLRMSK          (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_SHIFT      (28U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_8_CLRMSK     (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_SHIFT      (24U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_7_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_SHIFT      (20U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_SHIFT      (16U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_SHIFT      (12U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_SHIFT      (8U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_SHIFT      (4U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_SHIFT      (0U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_0
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0                 (0x20248U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_MASKFULL        (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_ACT_SHIFT (60U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_ACT_CLRMSK (IMG_UINT64_C(0X0FFFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ACT_CORE_XBAR_SHIFT (56U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ACT_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XF0FFFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_EWO_CORE_XBAR_SHIFT (52U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_EWO_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFF0FFFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_TENSORB_CORE_XBAR_SHIFT (48U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_TENSORB_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFF0FFFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_EWO_SHIFT (44U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_EWO_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_TENSORB_SHIFT (40U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_TENSORB_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_NORM_SHIFT (36U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_NORM_CLRMSK (IMG_UINT64_C(0XFFFFFF0FFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_NORM_POOL_SHIFT (32U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_NORM_POOL_CLRMSK (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CNV_ABUF_SHIFT  (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CNV_ABUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CNV_SHIFT  (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CNV_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_CORE_XBAR_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_IBUF_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CORE_XBAR_IBUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CORE_XBAR_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_IBUF_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_CNV_SHIFT  (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_CNV_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_OUTPACK_SHIFT (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_ABUF_OUTPACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_ABUF_SHIFT (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_0_CBUF_ABUF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_1
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1                 (0x20250U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_MASKFULL        (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_NORM_POOL_BYPASS_SHIFT (36U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_NORM_POOL_BYPASS_CLRMSK (IMG_UINT64_C(0XFFFFFF0FFFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_B_XBAR_SHIFT (32U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_B_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFF0FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_B_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_B_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_A_XBAR_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_TRS_A_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF0FFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_A_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_XBAR_TRS_A_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF0FFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_OPK_SHIFT   (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_OPK_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_CORE_XBAR_OIN_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_CORE_XBAR_OIN_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_CORE_XBAR_SHIFT (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_CORE_XBAR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF0FF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_SB_SHIFT   (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_POOL_SB_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_SB_SHIFT    (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_1_OIN_SB_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO
+*/
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO            (0x20258U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_MASKFULL   (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_MEMBUS_RESET_RTN_SHIFT (20U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_MEMBUS_RESET_RTN_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_MEMBUS_RESET_SHIFT (16U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_MEMBUS_RESET_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_LOCM_RTN_SHIFT (12U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_LOCM_RTN_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_LOCM_SHIFT (8U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_CORE_NOC_LOCM_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_BIF_CORE_NOC_RTN_SHIFT (4U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_BIF_CORE_NOC_RTN_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_BIF_CORE_NOC_SHIFT (0U)
+#define VHA_CR_NN_SYS2_MEMBUS_CORE_STALL_RATIO_BIF_CORE_NOC_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_CNN_ARB_CTRL
+*/
+#define VHA_CR_CNN_ARB_CTRL                               (0x20260U)
+#define VHA_CR_CNN_ARB_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000307))
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_SHIFT    (9U)
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_CLRMSK   (0XFFFFFDFFU)
+#define VHA_CR_CNN_ARB_CTRL_ENABLE_PASS_PRIORITY_EN       (0X00000200U)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_SHIFT        (8U)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_CLRMSK       (0XFFFFFEFFU)
+#define VHA_CR_CNN_ARB_CTRL_MMM_PRIORITY_SEL_EN           (0X00000100U)
+#define VHA_CR_CNN_ARB_CTRL_MAX_PAGE_COUNT_MIN1_SHIFT     (0U)
+#define VHA_CR_CNN_ARB_CTRL_MAX_PAGE_COUNT_MIN1_CLRMSK    (0XFFFFFFF8U)
+
+
+/*
+    Register VHA_CR_REQ_CTXT_OVERRIDE
+*/
+#define VHA_CR_REQ_CTXT_OVERRIDE                          (0x203A8U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_MASKFULL                 (IMG_UINT64_C(0x0000000000000007))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_SHIFT       (2U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS2_EN          (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_SHIFT       (1U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS1_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_SHIFT       (0U)
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_REQ_CTXT_OVERRIDE_OVERRIDE_OS0_EN          (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_CMD_PRIORITY_LIMITS
+*/
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS                    (0x203B0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_MASKFULL           (IMG_UINT64_C(0x000000000000019B))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_SHIFT    (7U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS2_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFE7F))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_SHIFT    (3U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS1_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_SHIFT    (0U)
+#define VHA_CR_CNN_CMD_PRIORITY_LIMITS_OS0_LIMIT_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+    Register VHA_CR_LOCM_SCRUB_CTRL
+*/
+#define VHA_CR_LOCM_SCRUB_CTRL                            (0x203B8U)
+#define VHA_CR_LOCM_SCRUB_CTRL_MASKFULL                   (IMG_UINT64_C(0x0000000000000011))
+#define VHA_CR_LOCM_SCRUB_CTRL_MODE_SHIFT                 (4U)
+#define VHA_CR_LOCM_SCRUB_CTRL_MODE_CLRMSK                (0XFFFFFFEFU)
+#define VHA_CR_LOCM_SCRUB_CTRL_MODE_EN                    (0X00000010U)
+#define VHA_CR_LOCM_SCRUB_CTRL_KICK_SHIFT                 (0U)
+#define VHA_CR_LOCM_SCRUB_CTRL_KICK_CLRMSK                (0XFFFFFFFEU)
+#define VHA_CR_LOCM_SCRUB_CTRL_KICK_EN                    (0X00000001U)
+
+
+#define VHA_CR_CNN_MASK_CTRL_MASK                         (0x00000003U)
+/*
+No Masks Applied */
+#define VHA_CR_CNN_MASK_CTRL_DEBUG_INTERNAL               (0x00000000U)
+/*
+Mask port with mask_level < 1 */
+#define VHA_CR_CNN_MASK_CTRL_DEBUG_SILICON                (0x00000001U)
+/*
+Mask port with mask_level < 2 */
+#define VHA_CR_CNN_MASK_CTRL_SAFETY                       (0x00000002U)
+/*
+Mask port with mask_level < 3 */
+#define VHA_CR_CNN_MASK_CTRL_RESERVED                     (0x00000003U)
+
+
+#define VHA_CR_CNN_DEBUG_CTRL_MASK                        (0x00000003U)
+/*
+Debug is switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_DISABLE                     (0x00000000U)
+/*
+Debug is output at the end of each stream */
+#define VHA_CR_CNN_DEBUG_CTRL_STREAM                      (0x00000001U)
+/*
+Debug is output at the end of each layer */
+#define VHA_CR_CNN_DEBUG_CTRL_LAYER                       (0x00000002U)
+/*
+Debug is output at the end of each pass */
+#define VHA_CR_CNN_DEBUG_CTRL_PASS                        (0x00000003U)
+
+
+#define VHA_CR_CNN_DEBUG_CTRL_BAND_MASK                   (0x00000003U)
+/*
+Debug is switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_BAND_DISABLE                (0x00000000U)
+/*
+Debug is Switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_BAND_RESERVED               (0x00000001U)
+/*
+Debug is output at the end of each layer */
+#define VHA_CR_CNN_DEBUG_CTRL_BAND_LAYER                  (0x00000002U)
+/*
+Debug is output at the end of each pass */
+#define VHA_CR_CNN_DEBUG_CTRL_BAND_PASS                   (0x00000003U)
+
+
+#define VHA_CR_CNN_PRELOAD_CTRL_MASK                      (0x00000007U)
+/*
+Preloads are switched off */
+#define VHA_CR_CNN_PRELOAD_CTRL_DISABLE                   (0x00000000U)
+/*
+Preloads are triggered 64 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_64                      (0x00000001U)
+/*
+Preloads are triggered 128 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_128                     (0x00000002U)
+/*
+Preloads are triggered 192 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_192                     (0x00000003U)
+/*
+Preloads are triggered 256 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_256                     (0x00000004U)
+/*
+Preloads are triggered 320 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_320                     (0x00000005U)
+/*
+Preloads are triggered 384 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_384                     (0x00000006U)
+/*
+Preloads are triggered 448 requests after the previous 32k boundary */
+#define VHA_CR_CNN_PRELOAD_CTRL_N_448                     (0x00000007U)
+
+
+/*
+Memory buffer will be used for MODEL only (CBUF, CMD , DEBUG, PERF) */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_MODEL_ONLY               (0x00000000U)
+/*
+Memory buffer will be used for IO Only( OUTPACK , IBUF , ABUF, EWO ) or Both MODEL and IO */
+#define VHA_CR_ALT_ADDR_BUF_TYPE_IO_OR_SHARED             (0x00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CONTROL                            (0x30000U)
+#define VHA_CR_OS0_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x00000000013F7F7E))
+#define VHA_CR_OS0_CNN_CONTROL_START_SHIFT                (24U)
+#define VHA_CR_OS0_CNN_CONTROL_START_CLRMSK               (0XFEFFFFFFU)
+#define VHA_CR_OS0_CNN_CONTROL_START_EN                   (0X01000000U)
+#define VHA_CR_OS0_CNN_CONTROL_ENABLE_SKIP_LAYERS_SHIFT   (21U)
+#define VHA_CR_OS0_CNN_CONTROL_ENABLE_SKIP_LAYERS_CLRMSK  (0XFFDFFFFFU)
+#define VHA_CR_OS0_CNN_CONTROL_ENABLE_SKIP_LAYERS_EN      (0X00200000U)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_SHIFT           (16U)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_CLRMSK          (0XFFE0FFFFU)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_IO_SHIFT        (10U)
+#define VHA_CR_OS0_CNN_CONTROL_CTXT_PASID_IO_CLRMSK       (0XFFFF83FFU)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_SHIFT             (8U)
+#define VHA_CR_OS0_CNN_CONTROL_PRIORITY_CLRMSK            (0XFFFFFCFFU)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+
+
+#define VHA_CR_OS0_CNN_STATUS_STATE_MASK                  (0x00000003U)
+/*
+No requests are pending for this host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_IDLE                  (0x00000000U)
+/*
+A command stream from this host is being processed*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_RUN                   (0x00000001U)
+/*
+The command stream from this host has been suspended due to higher priority request from another host*/
+#define VHA_CR_OS0_CNN_STATUS_STATE_SUSPEND               (0x00000002U)
+/*
+A command stream from this host is pending but not been started */
+#define VHA_CR_OS0_CNN_STATUS_STATE_PENDING               (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS
+*/
+#define VHA_CR_OS0_CNN_STATUS                             (0x30008U)
+#define VHA_CR_OS0_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x00000000C1FFFFFF))
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SHIFT         (30U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_CLRMSK        (0X3FFFFFFFU)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_IDLE          (00000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_RUN           (0X40000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_SUSPEND       (0X80000000U)
+#define VHA_CR_OS0_CNN_STATUS_CURRENT_STATE_PENDING       (0XC0000000U)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_SHIFT                (24U)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_CLRMSK               (0XFEFFFFFFU)
+#define VHA_CR_OS0_CNN_STATUS_PARITY_EN                   (0X01000000U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFF0000FFU)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS2
+*/
+#define VHA_CR_OS0_CNN_STATUS2                            (0x30010U)
+#define VHA_CR_OS0_CNN_STATUS2_MASKFULL                   (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_OS0_CNN_STATUS2_PASS_COUNT_SHIFT           (0U)
+#define VHA_CR_OS0_CNN_STATUS2_PASS_COUNT_CLRMSK          (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS                   (0x30018U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (7U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF000000007F))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED                   (0x30020U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_SHIFT (31U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_SHIFT (30U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_SHIFT (29U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_SHIFT (28U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_SHIFT (27U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_SHIFT (26U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_SHIFT (25U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_SHIFT (24U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_SHIFT (23U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_SHIFT (22U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_SHIFT (21U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_SHIFT (20U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_SHIFT (19U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_SHIFT (18U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_SHIFT (17U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT (16U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_MODEL_ONLY (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_IO_OR_SHARED (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_SHIFT (15U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR15_USED_EN (IMG_UINT64_C(0X0000000000008000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_SHIFT (14U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR14_USED_EN (IMG_UINT64_C(0X0000000000004000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_SHIFT (13U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR13_USED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_SHIFT (12U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR12_USED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_SHIFT (11U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR11_USED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_SHIFT (10U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR10_USED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_SHIFT (9U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR9_USED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_SHIFT (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR8_USED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0                       (0x30028U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1                       (0x30030U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2                       (0x30038U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3                       (0x30040U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4                       (0x30048U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5                       (0x30050U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6                       (0x30058U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7                       (0x30060U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL                  (0x30068U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CRC_CONTROL                        (0x30088U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000013))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_COMBINED_CNN_CRC_ENABLE_SHIFT (4U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_COMBINED_CNN_CRC_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_COMBINED_CNN_CRC_ENABLE_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CRC_ADDRESS                        (0x30090U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (7U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS                      (0x30098U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (7U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY
+*/
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY                  (0x300A0U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY_MASKFULL         (IMG_UINT64_C(0x0000000000FFFFE0))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY_CNN_DEBUG_SIZE_SHIFT (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY_CNN_DEBUG_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00001F))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY_CNN_DEBUG_SIZE_ALIGNSHIFT (5U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_LEGACY_CNN_DEBUG_SIZE_ALIGNSIZE (32U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL                      (0x300A8U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_RESERVED (0X00000001U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_STATUS                       (0x300B0U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000007FFFF))
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFF80000U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_PRELOAD_CONTROL
+*/
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL                    (0x300B8U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MASKFULL           (IMG_UINT64_C(0x0000000001FF7777))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_SHIFT (22U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFE3FFFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_64 (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_128 (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_192 (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_256 (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_320 (IMG_UINT64_C(0x0000000001400000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_384 (IMG_UINT64_C(0x0000000001800000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_WR_N_REQS_N_448 (IMG_UINT64_C(0x0000000001c00000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_SHIFT (19U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_64 (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_128 (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_192 (IMG_UINT64_C(0x0000000000180000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_256 (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_320 (IMG_UINT64_C(0x0000000000280000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_384 (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_MMM_RD_N_REQS_N_448 (IMG_UINT64_C(0x0000000000380000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_SHIFT (16U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_64 (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_128 (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_192 (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_256 (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_320 (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_384 (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_OUTPACK_N_REQS_N_448 (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_SHIFT   (12U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_64    (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_128   (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_192   (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_256   (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_320   (IMG_UINT64_C(0x0000000000005000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_384   (IMG_UINT64_C(0x0000000000006000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_EWO_N_REQS_N_448   (IMG_UINT64_C(0x0000000000007000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_SHIFT  (8U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_ABUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_SHIFT  (4U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000050))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000060))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_CBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000070))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_SHIFT  (0U)
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_64   (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_128  (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_192  (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_256  (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_320  (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_384  (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS0_CNN_PRELOAD_CONTROL_IBUF_N_REQS_N_448  (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS8
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8                       (0x300C0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS8_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS9
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9                       (0x300C8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_SHIFT        (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS9_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS10
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10                      (0x300D0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS10_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS11
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11                      (0x300D8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS11_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS12
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12                      (0x300E0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS12_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS13
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13                      (0x300E8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS13_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS14
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14                      (0x300F0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS14_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS15
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15                      (0x300F8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_SHIFT       (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS15_ALT_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_CNN_PERFORMANCE
+*/
+#define VHA_CR_OS0_CNN_PERFORMANCE                        (0x30100U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_SHIFT            (0U)
+#define VHA_CR_OS0_CNN_PERFORMANCE_VALUE_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_OS0_LOCM_BASE_ADDR
+*/
+#define VHA_CR_OS0_LOCM_BASE_ADDR                         (0x30108U)
+#define VHA_CR_OS0_LOCM_BASE_ADDR_MASKFULL                (IMG_UINT64_C(0x000000FFFFFFFFE0))
+#define VHA_CR_OS0_LOCM_BASE_ADDR_BASE_ADDR_SHIFT         (5U)
+#define VHA_CR_OS0_LOCM_BASE_ADDR_BASE_ADDR_CLRMSK        (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+    Register VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR
+*/
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR          (0x30110U)
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_SHIFT (0U)
+#define VHA_CR_OS0_SAVE_RESTORE_BUFFER_BASE_ADDR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000000))
+
+
+/*
+    Register VHA_CR_OS0_SAVE_RESTORE_CTRL
+*/
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL                      (0x30118U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_SHIFT         (1U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_DISABLE       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_64          (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_128         (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_192         (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_256         (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_320         (IMG_UINT64_C(0x000000000000000a))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_384         (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_N_REQS_N_448         (IMG_UINT64_C(0x000000000000000e))  
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_SHIFT (0U)
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_SAVE_RESTORE_CTRL_WRITE_CACHE_POLICY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_MASK_CTRL
+*/
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL                      (0x30120U)
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_MASKFULL             (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_DEBUG_INTERNAL (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_DEBUG_SILICON  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_SAFETY         (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_MASK_CTRL_LEVEL_RESERVED       (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_VCORE_MAPPING
+*/
+#define VHA_CR_OS0_CNN_VCORE_MAPPING                      (0x30128U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_MASKFULL             (IMG_UINT64_C(0x0000000077777777))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE7_SHIFT         (28U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE7_CLRMSK        (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE6_SHIFT         (24U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE6_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE5_SHIFT         (20U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE5_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFF8FFFFF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE4_SHIFT         (16U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE4_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE3_SHIFT         (12U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE3_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFF8FFF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE2_SHIFT         (8U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE2_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE1_SHIFT         (4U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE1_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFF8F))
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE0_SHIFT         (0U)
+#define VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE0_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+    Register VHA_CR_CORE0_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID                     (0x30130U)
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID_CORE0_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID_CORE0_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID_CORE0_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE0_LAST_NNA_SYNC_ID_CORE0_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE1_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID                     (0x30138U)
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID_CORE1_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID_CORE1_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID_CORE1_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE1_LAST_NNA_SYNC_ID_CORE1_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE2_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID                     (0x30140U)
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID_CORE2_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID_CORE2_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID_CORE2_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE2_LAST_NNA_SYNC_ID_CORE2_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE3_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID                     (0x30148U)
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID_CORE3_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID_CORE3_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID_CORE3_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE3_LAST_NNA_SYNC_ID_CORE3_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE4_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID                     (0x30150U)
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID_CORE4_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID_CORE4_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID_CORE4_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE4_LAST_NNA_SYNC_ID_CORE4_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE5_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID                     (0x30158U)
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID_CORE5_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID_CORE5_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID_CORE5_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE5_LAST_NNA_SYNC_ID_CORE5_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE6_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID                     (0x30160U)
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID_CORE6_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID_CORE6_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID_CORE6_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE6_LAST_NNA_SYNC_ID_CORE6_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE7_LAST_NNA_SYNC_ID
+*/
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID                     (0x30168U)
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID_CORE7_LAST_NNA_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID_CORE7_LAST_NNA_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID_CORE7_LAST_NNA_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE7_LAST_NNA_SYNC_ID_CORE7_LAST_NNA_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE0_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID                     (0x30230U)
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID_CORE0_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID_CORE0_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID_CORE0_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE0_LAST_MMM_SYNC_ID_CORE0_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE1_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID                     (0x30238U)
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID_CORE1_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID_CORE1_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID_CORE1_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE1_LAST_MMM_SYNC_ID_CORE1_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE2_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID                     (0x30240U)
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID_CORE2_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID_CORE2_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID_CORE2_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE2_LAST_MMM_SYNC_ID_CORE2_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE3_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID                     (0x30248U)
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID_CORE3_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID_CORE3_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID_CORE3_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE3_LAST_MMM_SYNC_ID_CORE3_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE4_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID                     (0x30250U)
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID_CORE4_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID_CORE4_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID_CORE4_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE4_LAST_MMM_SYNC_ID_CORE4_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE5_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID                     (0x30258U)
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID_CORE5_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID_CORE5_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID_CORE5_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE5_LAST_MMM_SYNC_ID_CORE5_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE6_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID                     (0x30260U)
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID_CORE6_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID_CORE6_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID_CORE6_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE6_LAST_MMM_SYNC_ID_CORE6_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CORE7_LAST_MMM_SYNC_ID
+*/
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID                     (0x30268U)
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID_MASKFULL            (IMG_UINT64_C(0x0000000070FFFFFF))
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID_CORE7_LAST_MMM_SYNC_ID_PARITY_SHIFT (28U)
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID_CORE7_LAST_MMM_SYNC_ID_PARITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFF8FFFFFFF))
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID_CORE7_LAST_MMM_SYNC_ID_SHIFT (0U)
+#define VHA_CR_CORE7_LAST_MMM_SYNC_ID_CORE7_LAST_MMM_SYNC_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF000000))
+
+
+/*
+    Register VHA_CR_CNN_CORE_SYNC_WDT_CTRL
+*/
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL                     (0x30270U)
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_MASKFULL            (IMG_UINT64_C(0x0000000000FFFF01))
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_VALUE_SHIFT         (8U)
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_VALUE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFF0000FF))
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_ENABLE_SHIFT        (0U)
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_ENABLE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CNN_CORE_SYNC_WDT_CTRL_ENABLE_EN           (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CORE_BW_LOCM_RD
+*/
+#define VHA_CR_CORE_BW_LOCM_RD                            (0x30278U)
+#define VHA_CR_CORE_BW_LOCM_RD_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_LOCM_RD_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_LOCM_RD_BW_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_LOCM_WR
+*/
+#define VHA_CR_CORE_BW_LOCM_WR                            (0x30280U)
+#define VHA_CR_CORE_BW_LOCM_WR_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_LOCM_WR_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_LOCM_WR_BW_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_LOCM_MWR
+*/
+#define VHA_CR_CORE_BW_LOCM_MWR                           (0x30288U)
+#define VHA_CR_CORE_BW_LOCM_MWR_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_LOCM_MWR_BW_SHIFT                  (0U)
+#define VHA_CR_CORE_BW_LOCM_MWR_BW_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_LOCM_RD_WORD
+*/
+#define VHA_CR_CORE_BW_LOCM_RD_WORD                       (0x30290U)
+#define VHA_CR_CORE_BW_LOCM_RD_WORD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_LOCM_RD_WORD_BW_SHIFT              (0U)
+#define VHA_CR_CORE_BW_LOCM_RD_WORD_BW_CLRMSK             (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_LOCM_WR_WORD
+*/
+#define VHA_CR_CORE_BW_LOCM_WR_WORD                       (0x30298U)
+#define VHA_CR_CORE_BW_LOCM_WR_WORD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_LOCM_WR_WORD_BW_SHIFT              (0U)
+#define VHA_CR_CORE_BW_LOCM_WR_WORD_BW_CLRMSK             (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_TLC_RD
+*/
+#define VHA_CR_CORE_BW_TLC_RD                             (0x30300U)
+#define VHA_CR_CORE_BW_TLC_RD_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_TLC_RD_BW_SHIFT                    (0U)
+#define VHA_CR_CORE_BW_TLC_RD_BW_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_TLC_WR
+*/
+#define VHA_CR_CORE_BW_TLC_WR                             (0x30308U)
+#define VHA_CR_CORE_BW_TLC_WR_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_TLC_WR_BW_SHIFT                    (0U)
+#define VHA_CR_CORE_BW_TLC_WR_BW_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_TLC_MWR
+*/
+#define VHA_CR_CORE_BW_TLC_MWR                            (0x30310U)
+#define VHA_CR_CORE_BW_TLC_MWR_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_TLC_MWR_BW_SHIFT                   (0U)
+#define VHA_CR_CORE_BW_TLC_MWR_BW_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_TLC_RD_WORD
+*/
+#define VHA_CR_CORE_BW_TLC_RD_WORD                        (0x30318U)
+#define VHA_CR_CORE_BW_TLC_RD_WORD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_TLC_RD_WORD_BW_SHIFT               (0U)
+#define VHA_CR_CORE_BW_TLC_RD_WORD_BW_CLRMSK              (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_BW_TLC_WR_WORD
+*/
+#define VHA_CR_CORE_BW_TLC_WR_WORD                        (0x30320U)
+#define VHA_CR_CORE_BW_TLC_WR_WORD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_BW_TLC_WR_WORD_BW_SHIFT               (0U)
+#define VHA_CR_CORE_BW_TLC_WR_WORD_BW_CLRMSK              (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS               (0x30328U)
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS_MASKFULL      (IMG_UINT64_C(0x000000FFFFFFFF80))
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT (7U)
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000007F))
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (7U)
+#define VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (128U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_HITS
+*/
+#define VHA_CR_PERF_SLC_SNOOP_HITS                        (0x7728U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_MISSES
+*/
+#define VHA_CR_PERF_SLC_SNOOP_MISSES                      (0x7730U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_SLC_CCM_CTRL
+*/
+#define VHA_CR_SLC_CCM_CTRL                               (0x78F8U)
+#define VHA_CR_SLC_CCM_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000FF00FF))
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_SHIFT            (16U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_SHIFT         (0U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_CLRMSK        (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_CCM_STATUS
+*/
+#define VHA_CR_SLC_CCM_STATUS                             (0x7900U)
+#define VHA_CR_SLC_CCM_STATUS_MASKFULL                    (IMG_UINT64_C(0x0FFFFFFFF10FF0FF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_SHIFT          (52U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_CLRMSK         (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_SHIFT          (44U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_CLRMSK         (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_SHIFT          (36U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_CLRMSK         (IMG_UINT64_C(0XFFFFF00FFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_SHIFT          (28U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_CLRMSK         (IMG_UINT64_C(0XFFFFFFF00FFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_SHIFT              (24U)
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_EN                 (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_SHIFT         (12U)
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_SHIFT        (0U)
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+#endif /* _VHA_CR_MAGNA_H_ */
+
+/*****************************************************************************
+ End of file (vha_cr_magna.h)
+*****************************************************************************/
+

+ 3171 - 0
driver/include/hwdefs/vha_cr_mirage.h

@@ -0,0 +1,3171 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file vha_cr_mirage.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _VHA_CR_MIRAGE_H_
+#define _VHA_CR_MIRAGE_H_
+
+#define VHA_CR_MIRAGE_REVISION 1
+
+#define VHA_CR_MH_CONTROL_PERSISTENCE_TYPE_MASK           (0x00000003U)
+
+
+
+
+
+
+#define VHA_CR_MH_CONTROL_MAX_BURST_LENGTH_MASK           (0x00000003U)
+
+
+#define VHA_CR_CLK_CTRL0_MODE_MASK                        (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_CLK_CTRL0_MODE_OFF                         (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_CLK_CTRL0_MODE_ON                          (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_CLK_CTRL0_MODE_AUTO                        (0x00000002U)
+
+
+/*
+    Register VHA_CR_CLK_CTRL0
+*/
+#define VHA_CR_CLK_CTRL0                                  (0x0000U)
+#define VHA_CR_CLK_CTRL0_MASKFULL                         (IMG_UINT64_C(0x03FFFFFF30000330))
+#define VHA_CR_CLK_CTRL0_CNN_EWO_SHIFT                    (56U)
+#define VHA_CR_CLK_CTRL0_CNN_EWO_CLRMSK                   (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_EWO_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_ON                       (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_EWO_AUTO                     (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_SHIFT                   (54U)
+#define VHA_CR_CLK_CTRL0_CNN_PACK_CLRMSK                  (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_PACK_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_ON                      (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_PACK_AUTO                    (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_SHIFT                    (52U)
+#define VHA_CR_CLK_CTRL0_CNN_OIN_CLRMSK                   (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OIN_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_ON                       (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_OIN_AUTO                     (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_SHIFT                   (50U)
+#define VHA_CR_CLK_CTRL0_CNN_POOL_CLRMSK                  (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_POOL_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_ON                      (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_POOL_AUTO                    (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_SHIFT                     (48U)
+#define VHA_CR_CLK_CTRL0_CNN_SB_CLRMSK                    (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_SB_OFF                       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_ON                        (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SB_AUTO                      (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_SHIFT                   (46U)
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_CLRMSK                  (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_ON                      (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_XBAR_AUTO                    (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_SHIFT                   (44U)
+#define VHA_CR_CLK_CTRL0_CNN_NORM_CLRMSK                  (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_NORM_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_ON                      (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_NORM_AUTO                    (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_SHIFT                    (42U)
+#define VHA_CR_CLK_CTRL0_CNN_ACT_CLRMSK                   (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACT_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_ON                       (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACT_AUTO                     (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_SHIFT                  (40U)
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_CLRMSK                 (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_OFF                    (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_ON                     (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ACCUM_AUTO                   (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_SHIFT                    (38U)
+#define VHA_CR_CLK_CTRL0_CNN_CNV_CLRMSK                   (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CNV_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_ON                       (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CNV_AUTO                     (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_SHIFT                   (36U)
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_ON                      (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CBUF_AUTO                    (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_SHIFT                   (34U)
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_CLRMSK                  (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_OFF                     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_ON                      (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_CTRL0_CNN_IBUF_AUTO                    (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_SHIFT                    (32U)
+#define VHA_CR_CLK_CTRL0_CNN_CMD_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_CMD_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_ON                       (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_CTRL0_CNN_CMD_AUTO                     (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_CTRL0_CNN_SHIFT                        (28U)
+#define VHA_CR_CLK_CTRL0_CNN_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_CLK_CTRL0_CNN_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_CNN_ON                           (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_CTRL0_CNN_AUTO                         (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_CTRL0_SLC_SHIFT                        (8U)
+#define VHA_CR_CLK_CTRL0_SLC_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_CLK_CTRL0_SLC_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_SLC_ON                           (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_CTRL0_SLC_AUTO                         (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_CLK_CTRL0_BIF_SHIFT                        (4U)
+#define VHA_CR_CLK_CTRL0_BIF_CLRMSK                       (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_CLK_CTRL0_BIF_OFF                          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_CTRL0_BIF_ON                           (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_CLK_CTRL0_BIF_AUTO                         (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_CLK_STATUS0_MODE_GATED                     (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_CLK_STATUS0_MODE_RUNNING                   (0x00000001U)
+
+
+/*
+    Register VHA_CR_CLK_STATUS0
+*/
+#define VHA_CR_CLK_STATUS0                                (0x0008U)
+#define VHA_CR_CLK_STATUS0_MASKFULL                       (IMG_UINT64_C(0x00000007FFD00104))
+#define VHA_CR_CLK_STATUS0_CNN_EWO_SHIFT                  (34U)
+#define VHA_CR_CLK_STATUS0_CNN_EWO_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_EWO_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_EWO_RUNNING                (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_SHIFT                 (33U)
+#define VHA_CR_CLK_STATUS0_CNN_PACK_CLRMSK                (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_PACK_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_PACK_RUNNING               (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_SHIFT                  (32U)
+#define VHA_CR_CLK_STATUS0_CNN_OIN_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_OIN_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_OIN_RUNNING                (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_SHIFT                 (31U)
+#define VHA_CR_CLK_STATUS0_CNN_POOL_CLRMSK                (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_POOL_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_POOL_RUNNING               (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_SHIFT                   (30U)
+#define VHA_CR_CLK_STATUS0_CNN_SB_CLRMSK                  (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_SB_GATED                   (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_SB_RUNNING                 (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_SHIFT                 (29U)
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_XBAR_RUNNING               (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_SHIFT                 (28U)
+#define VHA_CR_CLK_STATUS0_CNN_NORM_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_NORM_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_NORM_RUNNING               (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_SHIFT                  (27U)
+#define VHA_CR_CLK_STATUS0_CNN_ACT_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACT_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACT_RUNNING                (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_SHIFT                (26U)
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_CLRMSK               (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_GATED                (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_ACCUM_RUNNING              (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_SHIFT                  (25U)
+#define VHA_CR_CLK_STATUS0_CNN_CNV_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CNV_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CNV_RUNNING                (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_SHIFT                 (24U)
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CBUF_RUNNING               (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_SHIFT                 (23U)
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_CLRMSK                (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_GATED                 (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_IBUF_RUNNING               (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_SHIFT                  (22U)
+#define VHA_CR_CLK_STATUS0_CNN_CMD_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_CMD_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_CMD_RUNNING                (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_CLK_STATUS0_CNN_SHIFT                      (20U)
+#define VHA_CR_CLK_STATUS0_CNN_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_CR_CLK_STATUS0_CNN_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_CNN_RUNNING                    (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_CLK_STATUS0_SLC_SHIFT                      (8U)
+#define VHA_CR_CLK_STATUS0_SLC_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CLK_STATUS0_SLC_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_SLC_RUNNING                    (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_CLK_STATUS0_BIF_SHIFT                      (2U)
+#define VHA_CR_CLK_STATUS0_BIF_CLRMSK                     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CLK_STATUS0_BIF_GATED                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_CLK_STATUS0_BIF_RUNNING                    (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_PRODUCT_ID
+*/
+#define VHA_CR_PRODUCT_ID                                 (0x0018U)
+#define VHA_CR_PRODUCT_ID_MASKFULL                        (IMG_UINT64_C(0x00000000FFFF0000))
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_SHIFT            (16U)
+#define VHA_CR_PRODUCT_ID_IMG_PRODUCT_ID_CLRMSK           (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+
+
+/*
+    Register VHA_CR_CORE_ID
+*/
+#define VHA_CR_CORE_ID                                    (0x0020U)
+#define VHA_CR_CORE_ID_MASKFULL                           (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_BRANCH_ID_SHIFT                    (48U)
+#define VHA_CR_CORE_ID_BRANCH_ID_CLRMSK                   (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_CORE_ID_VERSION_ID_SHIFT                   (32U)
+#define VHA_CR_CORE_ID_VERSION_ID_CLRMSK                  (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT     (16U)
+#define VHA_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK    (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_CORE_ID_CONFIG_ID_SHIFT                    (0U)
+#define VHA_CR_CORE_ID_CONFIG_ID_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_CORE_IP_INTEGRATOR_ID
+*/
+#define VHA_CR_CORE_IP_INTEGRATOR_ID                      (0x0028U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_SHIFT          (0U)
+#define VHA_CR_CORE_IP_INTEGRATOR_ID_VALUE_CLRMSK         (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CHANGELIST
+*/
+#define VHA_CR_CORE_IP_CHANGELIST                         (0x0030U)
+#define VHA_CR_CORE_IP_CHANGELIST_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_SHIFT             (0U)
+#define VHA_CR_CORE_IP_CHANGELIST_VALUE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+    Register VHA_CR_CORE_IP_CONFIG
+*/
+#define VHA_CR_CORE_IP_CONFIG                             (0x0038U)
+#define VHA_CR_CORE_IP_CONFIG_MASKFULL                    (IMG_UINT64_C(0x0000000000000F03))
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_SHIFT (11U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_CLRMSK (0XFFFFF7FFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_SUBSET_EN (0X00000800U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_SHIFT (10U)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_CLRMSK (0XFFFFFBFFU)
+#define VHA_CR_CORE_IP_CONFIG_SIGNATURES_SUPPORTED_ALL_EN (0X00000400U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_CLRMSK (0XFFFFFDFFU)
+#define VHA_CR_CORE_IP_CONFIG_RANDOM_STALLERS_SUPPORTED_EN (0X00000200U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_SHIFT         (8U)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_CLRMSK        (0XFFFFFEFFU)
+#define VHA_CR_CORE_IP_CONFIG_RTM_SUPPORTED_EN            (0X00000100U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_SHIFT         (1U)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_CLRMSK        (0XFFFFFFFDU)
+#define VHA_CR_CORE_IP_CONFIG_SCL_SUPPORTED_EN            (0X00000002U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_SHIFT         (0U)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_CORE_IP_CONFIG_CNN_SUPPORTED_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_VHA_AXI_RESET_CTRL
+*/
+#define VHA_CR_VHA_AXI_RESET_CTRL                         (0x0078U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_MASKFULL                (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_SHIFT (0U)
+#define VHA_CR_VHA_AXI_RESET_CTRL_SOFT_RESET_CYCLES_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_RESET_CTRL
+*/
+#define VHA_CR_RESET_CTRL                                 (0x0080U)
+#define VHA_CR_RESET_CTRL_MASKFULL                        (IMG_UINT64_C(0x00000000C0000107))
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_SHIFT        (31U)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_CLRMSK       (0X7FFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_SYS_SOFT_RESET_EN           (0X80000000U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_SHIFT        (30U)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_CLRMSK       (0XBFFFFFFFU)
+#define VHA_CR_RESET_CTRL_VHA_AXI_SOFT_RESET_EN           (0X40000000U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_SHIFT       (8U)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_CLRMSK      (0XFFFFFEFFU)
+#define VHA_CR_RESET_CTRL_VHA_CNN0_SOFT_RESET_EN          (0X00000100U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_SHIFT        (2U)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_CLRMSK       (0XFFFFFFFBU)
+#define VHA_CR_RESET_CTRL_VHA_SLC_SOFT_RESET_EN           (0X00000004U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_SHIFT        (1U)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_RESET_CTRL_VHA_BIF_SOFT_RESET_EN           (0X00000002U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_SHIFT            (0U)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_RESET_CTRL_VHA_SOFT_RESET_EN               (0X00000001U)
+
+
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_SHIFT         (18U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_CLRMSK        (0XFFFBFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_AXI_ERROR_EN            (0X00040000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_SHIFT    (16U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_CLRMSK   (0XFFFEFFFFU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_MMU_PAGE_FAULT_EN       (0X00010000U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_SHIFT      (3U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_CLRMSK     (0XFFFFFFF7U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_MEM_WDT_EN         (0X00000008U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_WDT_SHIFT          (2U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_WDT_CLRMSK         (0XFFFFFFFBU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_WDT_EN             (0X00000004U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_SHIFT        (1U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_CLRMSK       (0XFFFFFFFDU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_ERROR_EN           (0X00000002U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_SHIFT     (0U)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_CLRMSK    (0XFFFFFFFEU)
+#define VHA_CR_VHA_EVENT_TYPE_VHA_CNN0_COMPLETE_EN        (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_ENABLE
+*/
+#define VHA_CR_OS0_VHA_EVENT_ENABLE                       (0x0088U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_MASKFULL              (IMG_UINT64_C(0x000000000005000F))
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_WDT_SHIFT    (2U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_WDT_CLRMSK   (0XFFFFFFFBU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_WDT_EN       (0X00000004U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_ENABLE_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_STATUS
+*/
+#define VHA_CR_OS0_VHA_EVENT_STATUS                       (0x0090U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000005000F))
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_SHIFT   (18U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_CLRMSK  (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_AXI_ERROR_EN      (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_MMU_PAGE_FAULT_EN (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_MEM_WDT_EN   (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_WDT_SHIFT    (2U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_WDT_CLRMSK   (0XFFFFFFFBU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_WDT_EN       (0X00000004U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_SHIFT  (1U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_ERROR_EN     (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN  (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_VHA_EVENT_CLEAR
+*/
+#define VHA_CR_OS0_VHA_EVENT_CLEAR                        (0x0098U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_MASKFULL               (IMG_UINT64_C(0x000000000005000F))
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_SHIFT    (18U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_CLRMSK   (0XFFFBFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_AXI_ERROR_EN       (0X00040000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_SHIFT (16U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_MMU_PAGE_FAULT_EN  (0X00010000U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_SHIFT (3U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_MEM_WDT_EN    (0X00000008U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_WDT_SHIFT     (2U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_WDT_CLRMSK    (0XFFFFFFFBU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_WDT_EN        (0X00000004U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_SHIFT   (1U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_CLRMSK  (0XFFFFFFFDU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_ERROR_EN      (0X00000002U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_SHIFT (0U)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define VHA_CR_OS0_VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN   (0X00000001U)
+
+
+#define VHA_CR_SYS_CLK_CTRL0_MODE_MASK                    (0x00000003U)
+/*
+The domain clock is forced off */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_OFF                     (0x00000000U)
+/*
+The domain clock is forced on */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_ON                      (0x00000001U)
+/*
+Automatic clock gating is active, the domain clock is only on whilst data is being processed */
+#define VHA_CR_SYS_CLK_CTRL0_MODE_AUTO                    (0x00000002U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_CTRL0
+*/
+#define VHA_CR_SYS_CLK_CTRL0                              (0x2000U)
+#define VHA_CR_SYS_CLK_CTRL0_MASKFULL                     (IMG_UINT64_C(0x0000000000000030))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_SHIFT                    (4U)
+#define VHA_CR_SYS_CLK_CTRL0_SLC_CLRMSK                   (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_SYS_CLK_CTRL0_SLC_OFF                      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_ON                       (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_SYS_CLK_CTRL0_SLC_AUTO                     (IMG_UINT64_C(0x0000000000000020))  
+
+
+/*
+Clock is gated and the module is inactive */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_GATED                 (0x00000000U)
+/*
+Clock is running */
+#define VHA_CR_SYS_CLK_STATUS0_MODE_RUNNING               (0x00000001U)
+
+
+/*
+    Register VHA_CR_SYS_CLK_STATUS0
+*/
+#define VHA_CR_SYS_CLK_STATUS0                            (0x2008U)
+#define VHA_CR_SYS_CLK_STATUS0_MASKFULL                   (IMG_UINT64_C(0x0000000000000004))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_SHIFT                  (2U)
+#define VHA_CR_SYS_CLK_STATUS0_SLC_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_SYS_CLK_STATUS0_SLC_GATED                  (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_SYS_CLK_STATUS0_SLC_RUNNING                (IMG_UINT64_C(0x0000000000000004))  
+
+
+/*
+    Register VHA_CR_AXI_EXACCESS
+*/
+#define VHA_CR_AXI_EXACCESS                               (0x2168U)
+#define VHA_CR_AXI_EXACCESS_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_SHIFT            (0U)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_AXI_EXACCESS_SOCIF_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_HOST_IRQ_ENABLE
+*/
+#define VHA_CR_MMU_HOST_IRQ_ENABLE                        (0xE1A0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_SHIFT         (0U)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_MMU_HOST_IRQ_ENABLE_FAULT_PM_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CONTROL                            (0x0100U)
+#define VHA_CR_OS0_CNN_CONTROL_MASKFULL                   (IMG_UINT64_C(0x000000000000007F))
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_SHIFT        (1U)
+#define VHA_CR_OS0_CNN_CONTROL_CMD_SIZE_MIN1_CLRMSK       (0XFFFFFF81U)
+#define VHA_CR_OS0_CNN_CONTROL_START_SHIFT                (0U)
+#define VHA_CR_OS0_CNN_CONTROL_START_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_CNN_CONTROL_START_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_STATUS
+*/
+#define VHA_CR_OS0_CNN_STATUS                             (0x0108U)
+#define VHA_CR_OS0_CNN_STATUS_MASKFULL                    (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_OS0_CNN_STATUS_PASS_COUNT_SHIFT            (16U)
+#define VHA_CR_OS0_CNN_STATUS_PASS_COUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_SHIFT           (8U)
+#define VHA_CR_OS0_CNN_STATUS_LAYER_COUNT_CLRMSK          (0XFFFF00FFU)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_SHIFT          (0U)
+#define VHA_CR_OS0_CNN_STATUS_STREAM_COUNT_CLRMSK         (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CMD_BASE_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS                   (0x0120U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_MASKFULL          (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_SHIFT   (8U)
+#define VHA_CR_OS0_CNN_CMD_BASE_ADDRESS_BASE_ADDR_CLRMSK  (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS_USED
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED                   (0x0138U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_MASKFULL          (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_SHIFT (7U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR7_USED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_SHIFT (6U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR6_USED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_SHIFT (5U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR5_USED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_SHIFT (4U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR4_USED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_SHIFT (3U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR3_USED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_SHIFT (2U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR2_USED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_SHIFT (1U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR1_USED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_SHIFT (0U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_USED_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS0
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0                       (0x0140U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS0_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS1
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1                       (0x0148U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS1_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS2
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2                       (0x0150U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS2_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS3
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3                       (0x0158U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS3_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS4
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4                       (0x0160U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS4_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS5
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5                       (0x0168U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS5_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS6
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6                       (0x0170U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS6_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_ALT_ADDRESS7
+*/
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7                       (0x0178U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_MASKFULL              (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_SHIFT        (8U)
+#define VHA_CR_OS0_CNN_ALT_ADDRESS7_ALT_ADDR_CLRMSK       (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+    Register VHA_CR_OS0_CNN_WRITEBACK_CONTROL
+*/
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL                  (0x0180U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_WRITEBACK_CONTROL_WRITE_BACK_VALUE_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_CMD_MH_CONTROL
+*/
+#define VHA_CR_CNN_CMD_MH_CONTROL                         (0x0200U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MASKFULL                (IMG_UINT64_C(0x0000000000001F3F))
+#define VHA_CR_CNN_CMD_MH_CONTROL_CTXT_PASID_SHIFT        (8U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_CTXT_PASID_CLRMSK       (0XFFFFE0FFU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_SHIFT  (4U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFCFU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (3U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_GPU_PIPE_COHERENT_EN    (0X00000008U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_SHIFT  (2U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CMD_MH_CONTROL_SLC_CACHE_POLICY_EN     (0X00000004U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_PERSISTENCE_SHIFT       (0U)
+#define VHA_CR_CNN_CMD_MH_CONTROL_PERSISTENCE_CLRMSK      (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_IBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_IBUF_MH_CONTROL                        (0x0208U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_IBUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_CBUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_CBUF_MH_CONTROL                        (0x0210U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_CBUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_ABUF_MH_CONTROL
+*/
+#define VHA_CR_CNN_ABUF_MH_CONTROL                        (0x0218U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MASKFULL               (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_GPU_PIPE_COHERENT_EN   (0X00000004U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_PERSISTENCE_SHIFT      (0U)
+#define VHA_CR_CNN_ABUF_MH_CONTROL_PERSISTENCE_CLRMSK     (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_OUTPACK_MH_CONTROL
+*/
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL                     (0x0220U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MASKFULL            (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_GPU_PIPE_COHERENT_EN (0X00000004U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_PERSISTENCE_SHIFT   (0U)
+#define VHA_CR_CNN_OUTPACK_MH_CONTROL_PERSISTENCE_CLRMSK  (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_CNN_ELEMENTOPS_MH_CONTROL
+*/
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL                  (0x0228U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MASKFULL         (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_SHIFT (3U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_MAX_BURST_LENGTH_CLRMSK (0XFFFFFFE7U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_SHIFT (2U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_GPU_PIPE_COHERENT_EN (0X00000004U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_PERSISTENCE_SHIFT (0U)
+#define VHA_CR_CNN_ELEMENTOPS_MH_CONTROL_PERSISTENCE_CLRMSK (0XFFFFFFFCU)
+
+
+#define VHA_CR_CNN_DEBUG_CTRL_MASK                        (0x00000003U)
+/*
+Debug is switched off */
+#define VHA_CR_CNN_DEBUG_CTRL_DISABLE                     (0x00000000U)
+/*
+Debug is output at the end of each stream */
+#define VHA_CR_CNN_DEBUG_CTRL_STREAM                      (0x00000001U)
+/*
+Debug is output at the end of each layer */
+#define VHA_CR_CNN_DEBUG_CTRL_LAYER                       (0x00000002U)
+/*
+Debug is output at the end of each pass */
+#define VHA_CR_CNN_DEBUG_CTRL_PASS                        (0x00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_CONTROL
+*/
+#define VHA_CR_OS0_CNN_CRC_CONTROL                        (0x0300U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_MASKFULL               (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_SHIFT   (0U)
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_DISABLE (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_STREAM  (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_LAYER   (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_OS0_CNN_CRC_CONTROL_CNN_CRC_ENABLE_PASS    (IMG_UINT64_C(0x0000000000000003))  
+
+
+/*
+    Register VHA_CR_OS0_CNN_CRC_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_CRC_ADDRESS                        (0x0308U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_MASKFULL               (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_SHIFT     (8U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_CLRMSK    (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS0_CNN_CRC_ADDRESS_CNN_CRC_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_ADDRESS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS                      (0x0310U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_MASKFULL             (IMG_UINT64_C(0x000000FFFFFFFF00))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_SHIFT (8U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSHIFT (8U)
+#define VHA_CR_OS0_CNN_DEBUG_ADDRESS_CNN_DEBUG_ADDR_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_SIZE
+*/
+#define VHA_CR_OS0_CNN_DEBUG_SIZE                         (0x0318U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_MASKFULL                (IMG_UINT64_C(0x0000000000FFFF00))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_SHIFT    (8U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_CLRMSK   (IMG_UINT64_C(0XFFFFFFFFFF0000FF))
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSHIFT (8U)
+#define VHA_CR_OS0_CNN_DEBUG_SIZE_CNN_DEBUG_SIZE_ALIGNSIZE (256U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_CONTROL
+*/
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL                      (0x0320U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_MASKFULL             (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_SHIFT (2U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_CLRMSK (0XFFFFFFF3U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_STREAM (0X00000004U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_LAYER (0X00000008U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_PERF_ENABLE_PASS (0X0000000CU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_CLRMSK (0XFFFFFFFCU)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_DISABLE (00000000U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_STREAM (0X00000001U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_LAYER (0X00000002U)
+#define VHA_CR_OS0_CNN_DEBUG_CONTROL_CNN_BAND_ENABLE_PASS (0X00000003U)
+
+
+/*
+    Register VHA_CR_OS0_CNN_DEBUG_STATUS
+*/
+#define VHA_CR_OS0_CNN_DEBUG_STATUS                       (0x0328U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_SHIFT (0U)
+#define VHA_CR_OS0_CNN_DEBUG_STATUS_CNN_DEBUG_OFFSET_CLRMSK (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_WDT_COMPAREMATCH                       (0x0330U)
+#define VHA_CR_CNN_WDT_COMPAREMATCH_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_WDT_COMPAREMATCH_REG_SHIFT             (0U)
+#define VHA_CR_CNN_WDT_COMPAREMATCH_REG_CLRMSK            (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_TIMER
+*/
+#define VHA_CR_CNN_WDT_TIMER                              (0x0338U)
+#define VHA_CR_CNN_WDT_TIMER_MASKFULL                     (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_WDT_TIMER_VALUE_SHIFT                  (0U)
+#define VHA_CR_CNN_WDT_TIMER_VALUE_CLRMSK                 (00000000U)
+
+
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_MASK             (0x00000003U)
+/*
+WDT is Disabled */
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_NONE             (0x00000000U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS        (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK             (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_WDT_CTRL
+*/
+#define VHA_CR_CNN_WDT_CTRL                               (0x0340U)
+#define VHA_CR_CNN_WDT_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_WDT_CTRL_MODE_SHIFT                    (0U)
+#define VHA_CR_CNN_WDT_CTRL_MODE_CLRMSK                   (0XFFFFFFFCU)
+#define VHA_CR_CNN_WDT_CTRL_MODE_NONE                     (00000000U)
+#define VHA_CR_CNN_WDT_CTRL_MODE_KICK_PASS                (0X00000001U)
+#define VHA_CR_CNN_WDT_CTRL_MODE_KICK                     (0X00000002U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_COMPAREMATCH
+*/
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH                   (0x0348U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_SHIFT         (0U)
+#define VHA_CR_CNN_MEM_WDT_COMPAREMATCH_REG_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_ARB_STALL_RATIO
+*/
+#define VHA_CR_CNN_ARB_STALL_RATIO                        (0x0350U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_MASKFULL               (IMG_UINT64_C(0x000000000FFFFFFF))
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_SHIFT           (24U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_OUTPUT_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_SHIFT      (20U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_6_CLRMSK     (0XFF0FFFFFU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_SHIFT      (16U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_5_CLRMSK     (0XFFF0FFFFU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_SHIFT      (12U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_4_CLRMSK     (0XFFFF0FFFU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_SHIFT      (8U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_3_CLRMSK     (0XFFFFF0FFU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_SHIFT      (4U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_2_CLRMSK     (0XFFFFFF0FU)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_SHIFT      (0U)
+#define VHA_CR_CNN_ARB_STALL_RATIO_REQUESTER_1_CLRMSK     (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_TIMER
+*/
+#define VHA_CR_CNN_MEM_WDT_TIMER                          (0x0358U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_MASKFULL                 (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_SHIFT              (0U)
+#define VHA_CR_CNN_MEM_WDT_TIMER_VALUE_CLRMSK             (00000000U)
+
+
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_MASK     (0x00000003U)
+/*
+WDT is Disabled */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_NONE     (0x00000000U)
+/*
+WDT is Cleared when CMD Parser starts a pass or CMD parser is kicked*/
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS (0x00000001U)
+/*
+WDT is Cleared when CMD Parser is kicked */
+#define VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK     (0x00000002U)
+
+
+/*
+    Register VHA_CR_CNN_MEM_WDT_CTRL
+*/
+#define VHA_CR_CNN_MEM_WDT_CTRL                           (0x0360U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MASKFULL                  (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_SHIFT                (0U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_NONE                 (00000000U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK_PASS            (0X00000001U)
+#define VHA_CR_CNN_MEM_WDT_CTRL_MODE_KICK                 (0X00000002U)
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_FE
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE                (0x0368U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CNV_ABUF_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CNV_ABUF_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_CNV_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_CNV_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_ACT_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_ACT_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ACT_NORM_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ACT_NORM_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_NORM_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_IBUF_NORM_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_CNV_SHIFT (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_CNV_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_OUTPACK_SHIFT (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_ABUF_OUTPACK_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_ABUF_SHIFT (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_FE_CBUF_ABUF_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_CNN_DATAPATH_STALL_RATIO_BE
+*/
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE                (0x0370U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_XBAR_SHIFT (28U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_XBAR_CLRMSK (0X0FFFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_OIN_SHIFT (24U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_OIN_CLRMSK (0XF0FFFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_OUTPACK_SHIFT (20U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_OUTPACK_CLRMSK (0XFF0FFFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_XBAR_SHIFT (16U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_XBAR_CLRMSK (0XFFF0FFFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_POOL_SHIFT (12U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_XBAR_POOL_CLRMSK (0XFFFF0FFFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_SB_SHIFT  (8U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_NORM_SB_CLRMSK (0XFFFFF0FFU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_SB_SHIFT  (4U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_POOL_SB_CLRMSK (0XFFFFFF0FU)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_SB_SHIFT   (0U)
+#define VHA_CR_CNN_DATAPATH_STALL_RATIO_BE_OIN_SB_CLRMSK  (0XFFFFFFF0U)
+
+
+/*
+    Register VHA_CR_RTM_CTRL
+*/
+#define VHA_CR_RTM_CTRL                                   (0x0380U)
+#define VHA_CR_RTM_CTRL_MASKFULL                          (IMG_UINT64_C(0x000000008FFFFFF8))
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_SHIFT                  (31U)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_CLRMSK                 (0X7FFFFFFFU)
+#define VHA_CR_RTM_CTRL_RTM_ENABLE_EN                     (0X80000000U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_SHIFT                (3U)
+#define VHA_CR_RTM_CTRL_RTM_SELECTOR_CLRMSK               (0XF0000007U)
+
+
+/*
+    Register VHA_CR_RTM_DATA
+*/
+#define VHA_CR_RTM_DATA                                   (0x0388U)
+#define VHA_CR_RTM_DATA_MASKFULL                          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_RTM_DATA_RTM_DATA_SHIFT                    (0U)
+#define VHA_CR_RTM_DATA_RTM_DATA_CLRMSK                   (00000000U)
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG0
+*/
+#define VHA_CR_CNN_IP_CONFIG0                             (0x03A0U)
+#define VHA_CR_CNN_IP_CONFIG0_MASKFULL                    (IMG_UINT64_C(0x0000000000003FFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_SHIFT (13U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_OUTPUT_FIXTOFIX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000002000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DUAL_8BIT_CONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000001000))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_SHIFT (11U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_DECOMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_SHIFT (10U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DATA_COMPRESSION_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000400))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_SHIFT (9U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_CALC_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000200))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_FETCH_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000100))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_SHIFT (7U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ELEMENT_OPS_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000080))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_SHIFT (6U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_AVG_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000040))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_SHIFT (5U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_POOL_MAX_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ICN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000010))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_SHIFT (3U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_NORM_ACN_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_SHIFT (2U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_ACT_LUT_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000004))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_SHIFT (1U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_DECONV_SUPPORTED_EN (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_CNN_IP_CONFIG0_VHA_CNN_CONV_SUPPORTED_EN   (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG1
+*/
+#define VHA_CR_CNN_IP_CONFIG1                             (0x03A8U)
+#define VHA_CR_CNN_IP_CONFIG1_MASKFULL                    (IMG_UINT64_C(0x000000003F0FFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_SHIFT (24U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_WEIGHT_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC0FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_ACTIVATION_DATA_WIDTH_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0FFFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_SHIFT (12U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_FILTERS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0FFF))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_NUM_COEFFS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF00F))
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG1_VHA_CNN_CONV_ENGINE_CALC_BLOCKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG2
+*/
+#define VHA_CR_CNN_IP_CONFIG2                             (0x03B0U)
+#define VHA_CR_CNN_IP_CONFIG2_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_SHIFT (28U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_NORM_UNITS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0FFFFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_SHIFT (20U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_SHARED_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_SHIFT (4U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF0000F))
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG2_VHA_CNN_COEFF_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+    Register VHA_CR_CNN_IP_CONFIG3
+*/
+#define VHA_CR_CNN_IP_CONFIG3                             (0x03B8U)
+#define VHA_CR_CNN_IP_CONFIG3_MASKFULL                    (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_SHIFT (16U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_ACT_LUT_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_SHIFT (8U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_SIZE_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_SHIFT (0U)
+#define VHA_CR_CNN_IP_CONFIG3_VHA_CNN_INPUT_BUFFER_BANKS_MIN1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register VHA_CR_BIF_OUTSTANDING_READ
+*/
+#define VHA_CR_BIF_OUTSTANDING_READ                       (0xF098U)
+#define VHA_CR_BIF_OUTSTANDING_READ_MASKFULL              (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT         (0U)
+#define VHA_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK        (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_BIF_PAGE_FAULT_STALL
+*/
+#define VHA_CR_BIF_PAGE_FAULT_STALL                       (0xF0B0U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_MASKFULL              (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_SHIFT          (0U)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_CLRMSK         (0XFFFFFFFEU)
+#define VHA_CR_BIF_PAGE_FAULT_STALL_STATUS_EN             (0X00000001U)
+
+
+/*
+    Register VHA_CR_BIF_RTN_FIFO_WORD_COUNT
+*/
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT                    (0xF230U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_MASKFULL           (IMG_UINT64_C(0x00000000000001FF))
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_SHIFT      (0U)
+#define VHA_CR_BIF_RTN_FIFO_WORD_COUNT_COUNTER_CLRMSK     (0XFFFFFE00U)
+
+
+/*
+    Register VHA_CR_IDLE_HYSTERESIS_COUNT
+*/
+#define VHA_CR_IDLE_HYSTERESIS_COUNT                      (0x1000U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_MASKFULL             (IMG_UINT64_C(0x0000001F1F1F1F1F))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_SHIFT        (32U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_SYS_CLRMSK       (IMG_UINT64_C(0XFFFFFFE0FFFFFFFF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_FE_SHIFT         (24U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_FE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFE0FFFFFF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_BE_SHIFT         (16U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_BE_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_SHIFT        (8U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_CNN_TOP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFE0FF))
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_SHIFT       (0U)
+#define VHA_CR_IDLE_HYSTERESIS_COUNT_VHA_CTRL_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+    Register VHA_CR_SOCIF_WAKEUP_ENABLE
+*/
+#define VHA_CR_SOCIF_WAKEUP_ENABLE                        (0x1008U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_SHIFT           (0U)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_CLRMSK          (0XFFFFFFFEU)
+#define VHA_CR_SOCIF_WAKEUP_ENABLE_ALWAYS_EN              (0X00000001U)
+
+
+/*
+    Register VHA_CR_REGBANK_REQUEST_INVALID
+*/
+#define VHA_CR_REGBANK_REQUEST_INVALID                    (0x1018U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_MASKFULL           (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_SHIFT         (0U)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_CLRMSK        (0XFFFFFFFEU)
+#define VHA_CR_REGBANK_REQUEST_INVALID_FLAG_EN            (0X00000001U)
+
+
+/*
+    Register VHA_CR_RESET_CLK_CTRL
+*/
+#define VHA_CR_RESET_CLK_CTRL                             (0x1020U)
+#define VHA_CR_RESET_CLK_CTRL_MASKFULL                    (IMG_UINT64_C(0x00000000000003FF))
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_SHIFT               (8U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_SYS_CLRMSK              (0XFFFFFCFFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_SHIFT                (6U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_FE_CLRMSK               (0XFFFFFF3FU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_SHIFT                (4U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_BE_CLRMSK               (0XFFFFFFCFU)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_SHIFT               (2U)
+#define VHA_CR_RESET_CLK_CTRL_CNN_TOP_CLRMSK              (0XFFFFFFF3U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_SHIFT              (0U)
+#define VHA_CR_RESET_CLK_CTRL_VHA_CTRL_CLRMSK             (0XFFFFFFFCU)
+
+
+/*
+    Register VHA_CR_PM_VFP_TRAN_EN
+*/
+#define VHA_CR_PM_VFP_TRAN_EN                             (0x2100U)
+#define VHA_CR_PM_VFP_TRAN_EN_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PM_VFP_TRAN_EN_OP_SHIFT                    (0U)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_CLRMSK                   (0XFFFFFFFEU)
+#define VHA_CR_PM_VFP_TRAN_EN_OP_EN                       (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ
+*/
+#define VHA_CR_PERF_SLC0_READ                             (0x60A0U)
+#define VHA_CR_PERF_SLC0_READ_MASKFULL                    (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_COUNT_SHIFT                 (0U)
+#define VHA_CR_PERF_SLC0_READ_COUNT_CLRMSK                (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE
+*/
+#define VHA_CR_PERF_SLC0_WRITE                            (0x60A8U)
+#define VHA_CR_PERF_SLC0_WRITE_MASKFULL                   (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_SHIFT                (0U)
+#define VHA_CR_PERF_SLC0_WRITE_COUNT_CLRMSK               (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_DATA_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL                 (0x60B0U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_SHIFT     (0U)
+#define VHA_CR_PERF_SLC0_WRITE_DATA_STALL_COUNT_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_STALL                       (0x60B8U)
+#define VHA_CR_PERF_SLC0_READ_STALL_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT           (0U)
+#define VHA_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK          (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_STALL                      (0x60C0U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_READ_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_READ_ID_STALL                    (0x60F0U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_SHIFT        (0U)
+#define VHA_CR_PERF_SLC0_READ_ID_STALL_COUNT_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WRITE_ID_STALL
+*/
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL                   (0x60F8U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WRITE_ID_STALL_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1                   (0x6130U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE1
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1                   (0x6138U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE1_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2                   (0x6190U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE2
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2                   (0x6198U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE2_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3                   (0x61B0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE3
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3                   (0x61B8U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE3_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_RD_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4                   (0x61C0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_RD_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC0_WR_BURST_SIZE4
+*/
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4                   (0x61C8U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_SHIFT       (0U)
+#define VHA_CR_PERF_SLC0_WR_BURST_SIZE4_COUNT_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_HITS
+*/
+#define VHA_CR_PERF_SLC_SNOOP_HITS                        (0x61A0U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_SNOOP_HITS_COUNT_CLRMSK           (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_SNOOP_MISSES
+*/
+#define VHA_CR_PERF_SLC_SNOOP_MISSES                      (0x61A8U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_SNOOP_MISSES_COUNT_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_RESET_FULL
+*/
+#define VHA_CR_PERF_RESET_FULL                            (0x3990U)
+#define VHA_CR_PERF_RESET_FULL_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_RESET_FULL_RANGE_SHIFT                (0U)
+#define VHA_CR_PERF_RESET_FULL_RANGE_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_PERF_RESET_FULL_RANGE_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_ENABLE_FULL
+*/
+#define VHA_CR_PERF_ENABLE_FULL                           (0x3998U)
+#define VHA_CR_PERF_ENABLE_FULL_MASKFULL                  (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_SHIFT               (0U)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_CLRMSK              (0XFFFFFFFEU)
+#define VHA_CR_PERF_ENABLE_FULL_RANGE_EN                  (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0                 (0xE008U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1                 (0xE010U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2                 (0xE018U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3                 (0xE020U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0                  (0xE028U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1                  (0xE030U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2                  (0xE038U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3                  (0xE040U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0                     (0xE048U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_WRAP_SHIFT          (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0                 (0xE050U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_MASKFULL        (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_SHIFT (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_SHIFT      (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_ADDR_CLRMSK     (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1                 (0xE058U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2                 (0xE060U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3                 (0xE068U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_MASKFULL        (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_WRAP_SHIFT      (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_WRAP_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_WRAP_EN         (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_SHIFT     (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_VALID_EN        (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0                  (0xE070U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_MASKFULL         (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_SHIFT  (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_SHIFT       (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_ADDR_CLRMSK      (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1                  (0xE078U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2                  (0xE080U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3                  (0xE088U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_MASKFULL         (IMG_UINT64_C(0x00000003FFFFC003))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_WRAP_SHIFT       (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_WRAP_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_WRAP_EN          (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_SHIFT      (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_VALID_EN         (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1                     (0xE090U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_MASKFULL            (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT     (40U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK    (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_SHIFT          (12U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_ADDR_CLRMSK         (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_WRAP_SHIFT          (1U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_WRAP_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_WRAP_EN             (IMG_UINT64_C(0X0000000000000002))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_SHIFT         (0U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_VALID_EN            (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST            (0xE098U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST            (0xE0A0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST            (0xE0A8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST            (0xE0B0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST             (0xE0B8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST             (0xE0C0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST             (0xE0C8U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST             (0xE0D0U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE0_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST                (0xE0D8U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST            (0xE0E0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST            (0xE0E8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST            (0xE0F0U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST            (0xE0F8U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_MASKFULL   (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_SHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_VCE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST             (0xE100U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE0_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST             (0xE108U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST             (0xE110U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE2_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST             (0xE118U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_MASKFULL    (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_SHIFT  (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_TE1_PIPE3_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST
+*/
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST                (0xE120U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_MASKFULL       (IMG_UINT64_C(0x00000003FFFFC000))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_SHIFT     (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_CLRMSK    (IMG_UINT64_C(0XFFFFFFFC00003FFF))
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSHIFT (14U)
+#define VHA_CR_MMU_PM_CAT_BASE_ALIST1_LAST_PAGE_ALIGNSIZE (16384U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL_INVAL
+*/
+#define VHA_CR_OS0_MMU_CTRL_INVAL                         (0xE138U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_MASKFULL                (IMG_UINT64_C(0x0000000000000FFF))
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_SHIFT      (11U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_CLRMSK     (0XFFFFF7FFU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_ALL_CONTEXTS_EN         (0X00000800U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_SHIFT           (3U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_CONTEXT_CLRMSK          (0XFFFFF807U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_SHIFT                (2U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_CLRMSK               (0XFFFFFFFBU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PC_EN                   (0X00000004U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_SHIFT                (1U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_CLRMSK               (0XFFFFFFFDU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PD_EN                   (0X00000002U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_SHIFT                (0U)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_INVAL_PT_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT              (0xE140U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_MASKFULL     (IMG_UINT64_C(0x00000000000000FF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT     (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CBASE_MAPPING
+*/
+#define VHA_CR_OS0_MMU_CBASE_MAPPING                      (0xE148U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_MASKFULL             (IMG_UINT64_C(0x000000001FFFFFFF))
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_SHIFT        (28U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_CLRMSK       (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_INVALID_EN           (0X10000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT      (0U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK     (0XF0000000U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE  (4096U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS1
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS1                      (0xE150U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_SHIFT          (62U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_SHIFT         (56U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_SHIFT        (48U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_SHIFT        (4U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_SHIFT            (3U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_SHIFT           (1U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_SHIFT          (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_OS0_MMU_FAULT_STATUS1_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_OS0_MMU_FAULT_STATUS2
+*/
+#define VHA_CR_OS0_MMU_FAULT_STATUS2                      (0xE158U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_MASKFULL             (IMG_UINT64_C(0x000000003FFF07FF))
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_SHIFT      (29U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK     (0XDFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_WRITEBACK_EN         (0X20000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT    (28U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK   (0XEFFFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_CLEANUNIQUE_EN       (0X10000000U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_SHIFT           (24U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BANK_CLRMSK          (0XF0FFFFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT      (16U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK     (0XFF00FFFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT      (10U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK     (0XFFFFFBFFU)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_FBM_FAULT_EN         (0X00000400U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_SHIFT         (0U)
+#define VHA_CR_OS0_MMU_FAULT_STATUS2_BIF_ID_CLRMSK        (0XFFFFFC00U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS_META                      (0xE160U)
+#define VHA_CR_MMU_FAULT_STATUS_META_MASKFULL             (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT          (62U)
+#define VHA_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK         (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT         (56U)
+#define VHA_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK        (IMG_UINT64_C(0XC0FFFFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT        (48U)
+#define VHA_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK       (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT        (4U)
+#define VHA_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK       (IMG_UINT64_C(0XFFFF00000000000F))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_SHIFT            (3U)
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define VHA_CR_MMU_FAULT_STATUS_META_RNW_EN               (IMG_UINT64_C(0X0000000000000008))
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT           (1U)
+#define VHA_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK          (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT          (0U)
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_META_FAULT_EN             (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS2_META
+*/
+#define VHA_CR_MMU_FAULT_STATUS2_META                     (0xE198U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_MASKFULL            (IMG_UINT64_C(0x0000000000003FFF))
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT     (13U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK    (0XFFFFDFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN        (0X00002000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT   (12U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK  (0XFFFFEFFFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN      (0X00001000U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT          (8U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT     (0U)
+#define VHA_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK    (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_MMU_FAULT_STATUS_PM
+*/
+#define VHA_CR_MMU_FAULT_STATUS_PM                        (0xE130U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_MASKFULL               (IMG_UINT64_C(0x0000000007FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_SHIFT               (24U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT              (23U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_CR_MMU_FAULT_STATUS_PM_RNW_EN                 (IMG_UINT64_C(0X0000000000800000))
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT          (3U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK         (IMG_UINT64_C(0XFFFFFFFFFF800007))
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT            (1U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT            (0U)
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK           (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_CR_MMU_FAULT_STATUS_PM_FAULT_EN               (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_CR_MMU_FAULT_ACCESS
+*/
+#define VHA_CR_MMU_FAULT_ACCESS                           (0xE168U)
+#define VHA_CR_MMU_FAULT_ACCESS_MASKFULL                  (IMG_UINT64_C(0x000000FFFFFFF000))
+#define VHA_CR_MMU_FAULT_ACCESS_ADDRESS_SHIFT             (12U)
+#define VHA_CR_MMU_FAULT_ACCESS_ADDRESS_CLRMSK            (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define VHA_CR_MMU_FAULT_ACCESS_ADDRESS_ALIGNSHIFT        (12U)
+#define VHA_CR_MMU_FAULT_ACCESS_ADDRESS_ALIGNSIZE         (4096U)
+
+
+/*
+    Register VHA_CR_MMU_STATUS
+*/
+#define VHA_CR_MMU_STATUS                                 (0xE170U)
+#define VHA_CR_MMU_STATUS_MASKFULL                        (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_SHIFT               (40U)
+#define VHA_CR_MMU_STATUS_MMU_STALLED_CLRMSK              (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_MMU_STALLED_EN                  (IMG_UINT64_C(0X0000010000000000))
+#define VHA_CR_MMU_STATUS_PM_WRITES_SHIFT                 (38U)
+#define VHA_CR_MMU_STATUS_PM_WRITES_CLRMSK                (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PM_READS_SHIFT                  (36U)
+#define VHA_CR_MMU_STATUS_PM_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_MMU_STATUS_PC_READS_SHIFT                  (24U)
+#define VHA_CR_MMU_STATUS_PC_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_MMU_STATUS_PD_READS_SHIFT                  (12U)
+#define VHA_CR_MMU_STATUS_PD_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_MMU_STATUS_PT_READS_SHIFT                  (0U)
+#define VHA_CR_MMU_STATUS_PT_READS_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_OS0_MMU_ENTRY_STATUS
+*/
+#define VHA_CR_OS0_MMU_ENTRY_STATUS                       (0xE178U)
+#define VHA_CR_OS0_MMU_ENTRY_STATUS_MASKFULL              (IMG_UINT64_C(0x000000FFFFFF80FF))
+#define VHA_CR_OS0_MMU_ENTRY_STATUS_ADDRESS_SHIFT         (15U)
+#define VHA_CR_OS0_MMU_ENTRY_STATUS_ADDRESS_CLRMSK        (IMG_UINT64_C(0XFFFFFF0000007FFF))
+#define VHA_CR_OS0_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT      (0U)
+#define VHA_CR_OS0_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK     (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register VHA_CR_OS0_MMU_ENTRY
+*/
+#define VHA_CR_OS0_MMU_ENTRY                              (0xE180U)
+#define VHA_CR_OS0_MMU_ENTRY_MASKFULL                     (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_OS0_MMU_ENTRY_ENABLE_SHIFT                 (1U)
+#define VHA_CR_OS0_MMU_ENTRY_ENABLE_CLRMSK                (0XFFFFFFFDU)
+#define VHA_CR_OS0_MMU_ENTRY_ENABLE_EN                    (0X00000002U)
+#define VHA_CR_OS0_MMU_ENTRY_PENDING_SHIFT                (0U)
+#define VHA_CR_OS0_MMU_ENTRY_PENDING_CLRMSK               (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_ENTRY_PENDING_EN                   (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_CTRL
+*/
+#define VHA_CR_MMU_ABORT_PM_CTRL                          (0xE188U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_MASKFULL                 (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_SHIFT             (0U)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_CLRMSK            (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_CTRL_ENABLE_EN                (0X00000001U)
+
+
+/*
+    Register VHA_CR_MMU_ABORT_PM_STATUS
+*/
+#define VHA_CR_MMU_ABORT_PM_STATUS                        (0xE190U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_MASKFULL               (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_SHIFT            (0U)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_MMU_ABORT_PM_STATUS_ABORT_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_OS0_MMU_CTRL
+*/
+#define VHA_CR_OS0_MMU_CTRL                               (0xE1A8U)
+#define VHA_CR_OS0_MMU_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_SHIFT                  (0U)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_CLRMSK                 (0XFFFFFFFEU)
+#define VHA_CR_OS0_MMU_CTRL_BYPASS_EN                     (0X00000001U)
+
+
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_MASK               (0x00000003U)
+/*
+Reserved value */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_RESERVED           (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING     (0x00000001U)
+/*
+Addresses are interleaved between Cache Banks on a Cacheline boundary */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_LINEAR             (0x00000002U)
+/*
+Addresses interleaved between Cache Banks using an XOR hash of the address bits below the 4KB page granularity */
+#define VHA_CR_SLC_CTRL_ENUM_HASH_MODE_IN_PAGE_HASH       (0x00000003U)
+
+
+/*
+    Register VHA_CR_SLC_CTRL
+*/
+#define VHA_CR_SLC_CTRL                                   (0xE200U)
+#define VHA_CR_SLC_CTRL_MASKFULL                          (IMG_UINT64_C(0x0000000000001FF3))
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_SHIFT          (12U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_CLRMSK         (0XFFFFEFFFU)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_ENABLE_EN             (0X00001000U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_SHIFT          (8U)
+#define VHA_CR_SLC_CTRL_CFI_TIMEOUT_CYCLES_CLRMSK         (0XFFFFF0FFU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_SHIFT    (7U)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_CLRMSK   (0XFFFFFF7FU)
+#define VHA_CR_SLC_CTRL_PERSISTENCE_DECAY_ENABLE_EN       (0X00000080U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_SHIFT                  (4U)
+#define VHA_CR_SLC_CTRL_MAX_FENCES_CLRMSK                 (0XFFFFFF8FU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_SHIFT                   (0U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_CLRMSK                  (0XFFFFFFFCU)
+#define VHA_CR_SLC_CTRL_HASH_MODE_RESERVED                (00000000U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_PVR_V3_HASHING          (0X00000001U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_LINEAR                  (0X00000002U)
+#define VHA_CR_SLC_CTRL_HASH_MODE_IN_PAGE_HASH            (0X00000003U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS1
+*/
+#define VHA_CR_SLC_STATUS1                                (0xE210U)
+#define VHA_CR_SLC_STATUS1_MASKFULL                       (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT        (48U)
+#define VHA_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK       (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS2
+*/
+#define VHA_CR_SLC_STATUS2                                (0xE218U)
+#define VHA_CR_SLC_STATUS2_MASKFULL                       (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT  (36U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFF000FFFFFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT  (24U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0XFFFFFFF000FFFFFF))
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT   (12U)
+#define VHA_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFF000FFF))
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT   (0U)
+#define VHA_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFF000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS
+*/
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS             (0xE220U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_MASKFULL    (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK3_SHIFT (48U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK2_SHIFT (32U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK1_SHIFT (16U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK0_SHIFT (0U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_READS_BANK0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES
+*/
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES            (0xE228U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_MASKFULL   (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK3_SHIFT (48U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK2_SHIFT (32U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK1_SHIFT (16U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK0_SHIFT (0U)
+#define VHA_CR_SLC_STATUS_BYP_COH_ERROR_WRITES_BANK0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+    Register VHA_CR_SLC_IDLE
+*/
+#define VHA_CR_SLC_IDLE                                   (0xE230U)
+#define VHA_CR_SLC_IDLE_MASKFULL                          (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT              (12U)
+#define VHA_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK             (0XFFFF0FFFU)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_SHIFT                 (4U)
+#define VHA_CR_SLC_IDLE_CACHE_BANKS_CLRMSK                (0XFFFFF00FU)
+#define VHA_CR_SLC_IDLE_MMU_SHIFT                         (3U)
+#define VHA_CR_SLC_IDLE_MMU_CLRMSK                        (0XFFFFFFF7U)
+#define VHA_CR_SLC_IDLE_MMU_EN                            (0X00000008U)
+#define VHA_CR_SLC_IDLE_CCM_SHIFT                         (2U)
+#define VHA_CR_SLC_IDLE_CCM_CLRMSK                        (0XFFFFFFFBU)
+#define VHA_CR_SLC_IDLE_CCM_EN                            (0X00000004U)
+#define VHA_CR_SLC_IDLE_RDI_SHIFT                         (1U)
+#define VHA_CR_SLC_IDLE_RDI_CLRMSK                        (0XFFFFFFFDU)
+#define VHA_CR_SLC_IDLE_RDI_EN                            (0X00000002U)
+#define VHA_CR_SLC_IDLE_XBAR_SHIFT                        (0U)
+#define VHA_CR_SLC_IDLE_XBAR_CLRMSK                       (0XFFFFFFFEU)
+#define VHA_CR_SLC_IDLE_XBAR_EN                           (0X00000001U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS3
+*/
+#define VHA_CR_SLC_STATUS3                                (0xE238U)
+#define VHA_CR_SLC_STATUS3_MASKFULL                       (IMG_UINT64_C(0x0FFFFFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_SHIFT (50U)
+#define VHA_CR_SLC_STATUS3_MAX_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XF003FFFFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_SHIFT (40U)
+#define VHA_CR_SLC_STATUS3_MAX_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFC00FFFFFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_SHIFT (30U)
+#define VHA_CR_SLC_STATUS3_AVG_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_SHIFT (20U)
+#define VHA_CR_SLC_STATUS3_AVG_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC00FFFFF))
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_SHIFT (10U)
+#define VHA_CR_SLC_STATUS3_MIN_CRITICAL_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF003FF))
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_SHIFT (0U)
+#define VHA_CR_SLC_STATUS3_MIN_LOW_QOS_READ_LATENCY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFC00))
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_STATUS
+*/
+#define VHA_CR_SLC_FAULT_STOP_STATUS                      (0xE240U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_MASKFULL             (IMG_UINT64_C(0x000000000001FFFF))
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT            (0U)
+#define VHA_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK           (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_SLC_FAULT_STOP_CTRL
+*/
+#define VHA_CR_SLC_FAULT_STOP_CTRL                        (0xE248U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_MASKFULL               (IMG_UINT64_C(0x000000000003FFFF))
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_SHIFT              (17U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_CLRMSK             (0XFFFDFFFFU)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ALL_EN                 (0X00020000U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_SHIFT           (0U)
+#define VHA_CR_SLC_FAULT_STOP_CTRL_ENABLE_CLRMSK          (0XFFFE0000U)
+
+
+/*
+    Register VHA_CR_SLC_STATUS_DEBUG
+*/
+#define VHA_CR_SLC_STATUS_DEBUG                           (0xE260U)
+#define VHA_CR_SLC_STATUS_DEBUG_MASKFULL                  (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT         (16U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK        (0X0000FFFFU)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT      (0U)
+#define VHA_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_SLC_CCM_CTRL
+*/
+#define VHA_CR_SLC_CCM_CTRL                               (0xE300U)
+#define VHA_CR_SLC_CCM_CTRL_MASKFULL                      (IMG_UINT64_C(0x0000000000FF00FF))
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_SHIFT            (16U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_AMOUNT_CLRMSK           (0XFF00FFFFU)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_SHIFT         (0U)
+#define VHA_CR_SLC_CCM_CTRL_SPILL_THRESHOLD_CLRMSK        (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_CCM_STATUS
+*/
+#define VHA_CR_SLC_CCM_STATUS                             (0xE308U)
+#define VHA_CR_SLC_CCM_STATUS_MASKFULL                    (IMG_UINT64_C(0x0FFFFFFFF10FF0FF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_SHIFT          (52U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT3_CLRMSK         (IMG_UINT64_C(0XF00FFFFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_SHIFT          (44U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT2_CLRMSK         (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_SHIFT          (36U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT1_CLRMSK         (IMG_UINT64_C(0XFFFFF00FFFFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_SHIFT          (28U)
+#define VHA_CR_SLC_CCM_STATUS_SNOOP_COUNT0_CLRMSK         (IMG_UINT64_C(0XFFFFFFF00FFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_SHIFT              (24U)
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_CLRMSK             (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define VHA_CR_SLC_CCM_STATUS_SPILLING_EN                 (IMG_UINT64_C(0X0000000001000000))
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_SHIFT         (12U)
+#define VHA_CR_SLC_CCM_STATUS_SPILL_ENTRIES_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_SHIFT        (0U)
+#define VHA_CR_SLC_CCM_STATUS_ACTIVE_ENTRIES_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+    Register VHA_CR_ACE_QOS_CTRL
+*/
+#define VHA_CR_ACE_QOS_CTRL                               (0xE310U)
+#define VHA_CR_ACE_QOS_CTRL_MASKFULL                      (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_SHIFT                (12U)
+#define VHA_CR_ACE_QOS_CTRL_CRITICAL_CLRMSK               (0XFFFF0FFFU)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_SHIFT                    (8U)
+#define VHA_CR_ACE_QOS_CTRL_HIGH_CLRMSK                   (0XFFFFF0FFU)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_SHIFT                  (4U)
+#define VHA_CR_ACE_QOS_CTRL_MEDIUM_CLRMSK                 (0XFFFFFF0FU)
+#define VHA_CR_ACE_QOS_CTRL_LOW_SHIFT                     (0U)
+#define VHA_CR_ACE_QOS_CTRL_LOW_CLRMSK                    (0XFFFFFFF0U)
+
+
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MASK (0x00000003U)
+/*
+Low */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_LOW (0x00000000U)
+/*
+Medium */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_MEDIUM (0x00000001U)
+/*
+High */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_HIGH (0x00000002U)
+/*
+Critical */
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ENUM_PRIORITIES_CRITICAL (0x00000003U)
+
+
+/*
+    Register VHA_CR_ACE_PRIORITY_MAPPING_CTRL
+*/
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL                  (0xE318U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MASKFULL         (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_SHIFT        (62U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CLRMSK       (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_MEDIUM       (IMG_UINT64_C(0x4000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_HIGH         (IMG_UINT64_C(0x8000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MMU_CRITICAL     (IMG_UINT64_C(0xc000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_SHIFT        (60U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_CLRMSK       (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_MEDIUM       (IMG_UINT64_C(0x1000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_HIGH         (IMG_UINT64_C(0x2000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_UPS_CRITICAL     (IMG_UINT64_C(0x3000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_SHIFT    (58U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_CLRMSK   (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_MEDIUM   (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_HIGH     (IMG_UINT64_C(0x0800000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBM_BSC_CRITICAL (IMG_UINT64_C(0x0c00000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_SHIFT        (56U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_CLRMSK       (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_MEDIUM       (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_HIGH         (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TUL_CRITICAL     (IMG_UINT64_C(0x0300000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_SHIFT        (54U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_CLRMSK       (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_MEDIUM       (IMG_UINT64_C(0x0040000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_HIGH         (IMG_UINT64_C(0x0080000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHR_CRITICAL     (IMG_UINT64_C(0x00c0000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_SHIFT        (52U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_CLRMSK       (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_MEDIUM       (IMG_UINT64_C(0x0010000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_HIGH         (IMG_UINT64_C(0x0020000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_FBA_CRITICAL     (IMG_UINT64_C(0x0030000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_SHIFT        (50U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_CLRMSK       (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_MEDIUM       (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_HIGH         (IMG_UINT64_C(0x0008000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VDM_CRITICAL     (IMG_UINT64_C(0x000c000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_SHIFT        (48U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_CLRMSK       (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_MEDIUM       (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_HIGH         (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_USC_CRITICAL     (IMG_UINT64_C(0x0003000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_SHIFT        (46U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_CLRMSK       (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_MEDIUM       (IMG_UINT64_C(0x0000400000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_HIGH         (IMG_UINT64_C(0x0000800000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDS_CRITICAL     (IMG_UINT64_C(0x0000c00000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_SHIFT      (44U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_CLRMSK     (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_LOW        (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_MEDIUM     (IMG_UINT64_C(0x0000100000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_HIGH       (IMG_UINT64_C(0x0000200000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PDSRW_CRITICAL   (IMG_UINT64_C(0x0000300000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_SHIFT        (42U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_CLRMSK       (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_MEDIUM       (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_HIGH         (IMG_UINT64_C(0x0000080000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TPF_CRITICAL     (IMG_UINT64_C(0x00000c0000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_SHIFT    (40U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_CLRMSK   (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_MEDIUM   (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_HIGH     (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_SHF_SHG_CRITICAL (IMG_UINT64_C(0x0000030000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_SHIFT        (38U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_CLRMSK       (IMG_UINT64_C(0XFFFFFF3FFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_MEDIUM       (IMG_UINT64_C(0x0000004000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_HIGH         (IMG_UINT64_C(0x0000008000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_AMC_CRITICAL     (IMG_UINT64_C(0x000000c000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_SHIFT        (36U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_CLRMSK       (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_MEDIUM       (IMG_UINT64_C(0x0000001000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_HIGH         (IMG_UINT64_C(0x0000002000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RAC_CRITICAL     (IMG_UINT64_C(0x0000003000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_SHIFT    (34U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_CLRMSK   (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_LOW      (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_MEDIUM   (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_HIGH     (IMG_UINT64_C(0x0000000800000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_RTC_CRITICAL (IMG_UINT64_C(0x0000000c00000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_SHIFT        (32U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_MEDIUM       (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_HIGH         (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_ISP_CRITICAL     (IMG_UINT64_C(0x0000000300000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_SHIFT        (30U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_MEDIUM       (IMG_UINT64_C(0x0000000040000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_HIGH         (IMG_UINT64_C(0x0000000080000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PPP_CRITICAL     (IMG_UINT64_C(0x00000000c0000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_SHIFT        (28U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_MEDIUM       (IMG_UINT64_C(0x0000000010000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_HIGH         (IMG_UINT64_C(0x0000000020000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPF_CRITICAL     (IMG_UINT64_C(0x0000000030000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_SHIFT        (26U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_MEDIUM       (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_HIGH         (IMG_UINT64_C(0x0000000008000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_VCE_CRITICAL     (IMG_UINT64_C(0x000000000c000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_SHIFT        (24U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_MEDIUM       (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_HIGH         (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PBE_CRITICAL     (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_SHIFT        (22U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_MEDIUM       (IMG_UINT64_C(0x0000000000400000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_HIGH         (IMG_UINT64_C(0x0000000000800000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TCU_CRITICAL     (IMG_UINT64_C(0x0000000000c00000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_SHIFT        (20U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_MEDIUM       (IMG_UINT64_C(0x0000000000100000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_HIGH         (IMG_UINT64_C(0x0000000000200000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_MCU_CRITICAL     (IMG_UINT64_C(0x0000000000300000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_SHIFT        (18U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_MEDIUM       (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_HIGH         (IMG_UINT64_C(0x0000000000080000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RPM_CRITICAL     (IMG_UINT64_C(0x00000000000c0000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_SHIFT        (16U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_MEDIUM       (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_HIGH         (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_RTU_CRITICAL     (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_SHIFT     (14U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_CLRMSK    (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_LOW       (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_MEDIUM    (IMG_UINT64_C(0x0000000000004000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_HIGH      (IMG_UINT64_C(0x0000000000008000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TILING_CRITICAL  (IMG_UINT64_C(0x000000000000c000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_SHIFT   (12U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_CLRMSK  (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_LOW     (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_MEDIUM  (IMG_UINT64_C(0x0000000000001000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_HIGH    (IMG_UINT64_C(0x0000000000002000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_DMA_CRITICAL (IMG_UINT64_C(0x0000000000003000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_SHIFT       (10U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_LOW         (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_MEDIUM      (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_HIGH        (IMG_UINT64_C(0x0000000000000800))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_META_CRITICAL    (IMG_UINT64_C(0x0000000000000c00))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_SHIFT        (8U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_MEDIUM       (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_HIGH         (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_CDM_CRITICAL     (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_SHIFT         (6U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_LOW           (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_MEDIUM        (IMG_UINT64_C(0x0000000000000040))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_HIGH          (IMG_UINT64_C(0x0000000000000080))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_PM_CRITICAL      (IMG_UINT64_C(0x00000000000000c0))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_SHIFT        (4U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_MEDIUM       (IMG_UINT64_C(0x0000000000000010))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_HIGH         (IMG_UINT64_C(0x0000000000000020))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_TDM_CRITICAL     (IMG_UINT64_C(0x0000000000000030))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_SHIFT        (2U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_MEDIUM       (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_HIGH         (IMG_UINT64_C(0x0000000000000008))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_DCE_CRITICAL     (IMG_UINT64_C(0x000000000000000c))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_SHIFT        (0U)
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_CLRMSK       (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_LOW          (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_MEDIUM       (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_HIGH         (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PRIORITY_MAPPING_CTRL_IPP_CRITICAL     (IMG_UINT64_C(0x0000000000000003))  
+
+
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x00000006U)
+/*
+Write-through Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_THROUGH_WRITE_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x00000007U)
+/*
+Write-back Write-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_WR_CACHEABLE_WRITE_BACK_WRITE_ALLOCATE (0x0000000fU)
+
+
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_MASK            (0x0000000FU)
+/*
+Device Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_NON_BUFFERABLE (0x00000000U)
+/*
+Device Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_DEVICE_BUFFERABLE (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_NON_BUFFERABLE (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_NORMAL_NC_BUFFERABLE (0x00000003U)
+/*
+Write-through No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_NO_ALLOCATE (0x0000000aU)
+/*
+Write-through Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_THROUGH_READ_ALLOCATE (0x0000000eU)
+/*
+Write-back No-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_NO_ALLOCATE (0x0000000bU)
+/*
+Write-back Read-allocate */
+#define VHA_CR_ACE_CTRL_ENUM_RD_CACHEABLE_WRITE_BACK_READ_ALLOCATE (0x0000000fU)
+
+
+/*
+Non-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_NON_SHAREABLE    (0x00000000U)
+/*
+System */
+#define VHA_CR_ACE_CTRL_ENUM_NCOH_DOMAIN_SYSTEM           (0x00000001U)
+
+
+/*
+Inner-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_INNER_SHAREABLE   (0x00000000U)
+/*
+Outer-Shareable */
+#define VHA_CR_ACE_CTRL_ENUM_COH_DOMAIN_OUTER_SHAREABLE   (0x00000001U)
+
+
+/*
+    Register VHA_CR_ACE_CTRL
+*/
+#define VHA_CR_ACE_CTRL                                   (0xE320U)
+#define VHA_CR_ACE_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000007FCFFF))
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_SHIFT                   (19U)
+#define VHA_CR_ACE_CTRL_CLB_AXQOS_CLRMSK                  (0XFF87FFFFU)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT              (15U)
+#define VHA_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK             (0XFFF87FFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN    (0X00004000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_SHIFT                 (8U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK                (0XFFFFF0FFU)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE     (0X00000100U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000200U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE  (0X00000300U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0X00000600U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0X00000E00U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0X00000700U)
+#define VHA_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0X00000F00U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_SHIFT                 (4U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK                (0XFFFFFF0FU)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (00000000U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE     (0X00000010U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0X00000020U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE  (0X00000030U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0X000000A0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0X000000E0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0X000000B0U)
+#define VHA_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0X000000F0U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_SHIFT                  (2U)
+#define VHA_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK                 (0XFFFFFFF3U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_SHIFT                  (1U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_CLRMSK                 (0XFFFFFFFDU)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE        (00000000U)
+#define VHA_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE        (0X00000002U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT              (0U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK             (0XFFFFFFFEU)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE      (00000000U)
+#define VHA_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM             (0X00000001U)
+
+
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_MASK               (0x00000007U)
+/*
+Unprivileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_DATA (0x00000000U)
+/*
+Privileged secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_DATA (0x00000001U)
+/*
+Unprivileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_DATA (0x00000002U)
+/*
+Privileged non-secure data access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_DATA (0x00000003U)
+/*
+Unprivileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_SECURE_INSTRUCTION (0x00000004U)
+/*
+Privileged secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_SECURE_INSTRUCTION (0x00000005U)
+/*
+Unprivileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_UNPRIVILEGED_NONSECURE_INSTRUCTION (0x00000006U)
+/*
+Privileged non-secure instruction access*/
+#define VHA_CR_ACE_PROT_CTRL_ENUM_PROT_PRIVILEGED_NONSECURE_INSTRUCTION (0x00000007U)
+
+
+/*
+    Register VHA_CR_ACE_PROT_CTRL
+*/
+#define VHA_CR_ACE_PROT_CTRL                              (0xE328U)
+#define VHA_CR_ACE_PROT_CTRL_MASKFULL                     (IMG_UINT64_C(0x0707070707070707))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_SHIFT                  (56U)
+#define VHA_CR_ACE_PROT_CTRL_OSID7_CLRMSK                 (IMG_UINT64_C(0XF8FFFFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0100000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0200000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0300000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0400000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0500000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0600000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID7_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0700000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_SHIFT                  (48U)
+#define VHA_CR_ACE_PROT_CTRL_OSID6_CLRMSK                 (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0001000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0002000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0003000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0004000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0005000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0006000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID6_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0007000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_SHIFT                  (40U)
+#define VHA_CR_ACE_PROT_CTRL_OSID5_CLRMSK                 (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000010000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000020000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000030000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000040000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000050000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000060000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID5_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000070000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_SHIFT                  (32U)
+#define VHA_CR_ACE_PROT_CTRL_OSID4_CLRMSK                 (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000100000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000200000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000300000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000400000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000500000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000600000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID4_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000700000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_SHIFT                  (24U)
+#define VHA_CR_ACE_PROT_CTRL_OSID3_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000001000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000002000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000003000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000004000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000005000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000006000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID3_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000007000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_SHIFT                  (16U)
+#define VHA_CR_ACE_PROT_CTRL_OSID2_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000010000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000020000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000030000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000040000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000050000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000060000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID2_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000070000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_SHIFT                  (8U)
+#define VHA_CR_ACE_PROT_CTRL_OSID1_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000100))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000200))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000300))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000400))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000500))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000600))  
+#define VHA_CR_ACE_PROT_CTRL_OSID1_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000700))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_SHIFT                  (0U)
+#define VHA_CR_ACE_PROT_CTRL_OSID0_CLRMSK                 (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_DATA (IMG_UINT64_C(0000000000000000))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_DATA (IMG_UINT64_C(0x0000000000000001))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000002))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_DATA (IMG_UINT64_C(0x0000000000000003))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000004))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_SECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000005))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_UNPRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000006))  
+#define VHA_CR_ACE_PROT_CTRL_OSID0_PRIVILEGED_NONSECURE_INSTRUCTION (IMG_UINT64_C(0x0000000000000007))  
+
+
+/*
+    Register VHA_CR_ACE_STATUS
+*/
+#define VHA_CR_ACE_STATUS                                 (0xE330U)
+#define VHA_CR_ACE_STATUS_MASKFULL                        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_SHIFT             (28U)
+#define VHA_CR_ACE_STATUS_WR_BUS3_ERROR_CLRMSK            (0X0FFFFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_SHIFT             (24U)
+#define VHA_CR_ACE_STATUS_RD_BUS3_ERROR_CLRMSK            (0XF0FFFFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_SHIFT             (20U)
+#define VHA_CR_ACE_STATUS_WR_BUS2_ERROR_CLRMSK            (0XFF0FFFFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_SHIFT             (16U)
+#define VHA_CR_ACE_STATUS_RD_BUS2_ERROR_CLRMSK            (0XFFF0FFFFU)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_SHIFT             (12U)
+#define VHA_CR_ACE_STATUS_WR_BUS1_ERROR_CLRMSK            (0XFFFF0FFFU)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_SHIFT             (8U)
+#define VHA_CR_ACE_STATUS_RD_BUS1_ERROR_CLRMSK            (0XFFFFF0FFU)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_SHIFT             (4U)
+#define VHA_CR_ACE_STATUS_WR_BUS0_ERROR_CLRMSK            (0XFFFFFF0FU)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_SHIFT             (0U)
+#define VHA_CR_ACE_STATUS_RD_BUS0_ERROR_CLRMSK            (0XFFFFFFF0U)
+
+
+#define VHA_CR_SOC_AXI_ENUM_COH_MASK                      (0x00000003U)
+/*
+The SoC does not support any form of Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_NO_COHERENCY              (0x00000000U)
+/*
+The SoC supports ACE-Lite or I/O Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_ACE_LITE_COHERENCY        (0x00000001U)
+/*
+The SoC supports full ACE or 2-Way Coherency*/
+#define VHA_CR_SOC_AXI_ENUM_COH_FULL_ACE_COHERENCY        (0x00000002U)
+
+
+/*
+    Register VHA_CR_SOC_AXI
+*/
+#define VHA_CR_SOC_AXI                                    (0xE338U)
+#define VHA_CR_SOC_AXI_MASKFULL                           (IMG_UINT64_C(0x000000000000000F))
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000008U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0X00000004U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT            (0U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK           (0XFFFFFFFCU)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY     (00000000U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0X00000001U)
+#define VHA_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0X00000002U)
+
+
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_MASK            (0x00000003U)
+/*
+Addresses interleaved between Cache Banks using a weaved XOR hash of address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_WEAVED_HASH     (0x00000000U)
+/*
+Addresses interleaved between Cache Bank using a combined Set & Bank hash of the upper address bits */
+#define VHA_CR_L1_GLB_CTRL_ENUM_HASH_MODE_PVR_V3_HASHING  (0x00000001U)
+
+
+/*
+    Register VHA_CR_L1_GLB_CTRL
+*/
+#define VHA_CR_L1_GLB_CTRL                                (0xE400U)
+#define VHA_CR_L1_GLB_CTRL_MASKFULL                       (IMG_UINT64_C(0x0000000000000003))
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_SHIFT                (0U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_CLRMSK               (0XFFFFFFFCU)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_WEAVED_HASH          (00000000U)
+#define VHA_CR_L1_GLB_CTRL_HASH_MODE_PVR_V3_HASHING       (0X00000001U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING2
+*/
+#define VHA_CR_CONTEXT_MAPPING2                           (0xF088U)
+#define VHA_CR_CONTEXT_MAPPING2_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING2_TE0_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING2_VCE0_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_CONTEXT_MAPPING3
+*/
+#define VHA_CR_CONTEXT_MAPPING3                           (0xF090U)
+#define VHA_CR_CONTEXT_MAPPING3_MASKFULL                  (IMG_UINT64_C(0x0000000000FFFFFF))
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_SHIFT              (16U)
+#define VHA_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK             (0XFF00FFFFU)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_SHIFT                 (8U)
+#define VHA_CR_CONTEXT_MAPPING3_TE1_CLRMSK                (0XFFFF00FFU)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_SHIFT                (0U)
+#define VHA_CR_CONTEXT_MAPPING3_VCE1_CLRMSK               (0XFFFFFF00U)
+
+
+/*
+    Register VHA_CR_SLC_FIX
+*/
+#define VHA_CR_SLC_FIX                                    (0xF0D8U)
+#define VHA_CR_SLC_FIX_MASKFULL                           (IMG_UINT64_C(0x000000000000FFFF))
+#define VHA_CR_SLC_FIX_DISABLE_SHIFT                      (0U)
+#define VHA_CR_SLC_FIX_DISABLE_CLRMSK                     (0XFFFF0000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC
+*/
+#define VHA_CR_PERF_SLC                                   (0xF0F0U)
+#define VHA_CR_PERF_SLC_MASKFULL                          (IMG_UINT64_C(0x000000000FFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_SHIFT (27U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XF7FFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_RESET_EN  (0X08000000U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_SHIFT    (26U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_CLRMSK   (0XFBFFFFFFU)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_RESET_EN       (0X04000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_SHIFT (25U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFDFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_RESET_EN  (0X02000000U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_SHIFT    (24U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_CLRMSK   (0XFEFFFFFFU)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_RESET_EN       (0X01000000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_SHIFT (23U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFF7FFFFFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_RESET_EN  (0X00800000U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (22U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFBFFFFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00400000U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_SHIFT (21U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFDFFFFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_RESET_EN (0X00200000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_COUNTER_RESET_SHIFT (20U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_COUNTER_RESET_CLRMSK (0XFFEFFFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_COUNTER_RESET_EN (0X00100000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_COUNTER_RESET_SHIFT (19U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_COUNTER_RESET_CLRMSK (0XFFF7FFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_COUNTER_RESET_EN (0X00080000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_COUNTER_RESET_SHIFT (18U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFBFFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_COUNTER_RESET_EN (0X00040000U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_SHIFT (17U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFDFFFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_RESET_EN (0X00020000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_SHIFT (16U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_CLRMSK (0XFFFEFFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_RESET_EN (0X00010000U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_SHIFT (15U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFF7FFFU)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_RESET_EN (0X00008000U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_SHIFT (14U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFBFFFU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_RESET_EN (0X00004000U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_SHIFT (13U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_CLRMSK (0XFFFFDFFFU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_RESET_EN (0X00002000U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_SHIFT (12U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_CLRMSK (0XFFFFEFFFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_RESET_EN  (0X00001000U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_SHIFT    (11U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_CLRMSK   (0XFFFFF7FFU)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_RESET_EN       (0X00000800U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_SHIFT   (10U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFBFFU)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_RESET_EN      (0X00000400U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_SHIFT   (9U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_CLRMSK  (0XFFFFFDFFU)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_RESET_EN      (0X00000200U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_COUNTER_RESET_SHIFT  (8U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_COUNTER_RESET_CLRMSK (0XFFFFFEFFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_COUNTER_RESET_EN     (0X00000100U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_COUNTER_RESET_SHIFT  (7U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_COUNTER_RESET_CLRMSK (0XFFFFFF7FU)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_COUNTER_RESET_EN     (0X00000080U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_COUNTER_RESET_SHIFT  (6U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_COUNTER_RESET_CLRMSK (0XFFFFFFBFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_COUNTER_RESET_EN     (0X00000040U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_SHIFT  (5U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_CLRMSK (0XFFFFFFDFU)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_RESET_EN     (0X00000020U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_SHIFT (4U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_CLRMSK (0XFFFFFFEFU)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_RESET_EN    (0X00000010U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_SHIFT (3U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFF7U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_RESET_EN   (0X00000008U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_SHIFT (2U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFBU)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_RESET_EN   (0X00000004U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_SHIFT (1U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_CLRMSK (0XFFFFFFFDU)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_RESET_EN   (0X00000002U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_CLRMSK   (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_RESET_EN       (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_REQ_COUNT
+*/
+#define VHA_CR_PERF_SLC_REQ_COUNT                         (0xF0F8U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_MASKFULL                (IMG_UINT64_C(0x0000000000000001))
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_SHIFT            (0U)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_CLRMSK           (0XFFFFFFFEU)
+#define VHA_CR_PERF_SLC_REQ_COUNT_ENABLE_EN               (0X00000001U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD                        (0xF110U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR                    (0xF118U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR                    (0xF120U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR                    (0xF128U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_MASKFULL           (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_SHIFT      (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_COUNTER_CLRMSK     (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE                     (0xF130U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_MASKFULL            (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_SHIFT       (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_COUNTER_CLRMSK      (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD                      (0xF138U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ1_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD                      (0xF140U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ2_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD                      (0xF148U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ3_RD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD                      (0xF150U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_MASKFULL             (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_COUNTER_SHIFT        (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_COUNTER_CLRMSK       (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD                       (0xF158U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD                       (0xF160U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_MASKFULL              (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_SHIFT         (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_COUNTER_CLRMSK        (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR                        (0xF168U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD                   (0xF170U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD               (0xF178U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_BCK_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD               (0xF180U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_CRC_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD               (0xF188U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_MASKFULL      (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_SHIFT (0U)
+#define VHA_CR_PERF_SLC_CMD_DBG_REQ_WR_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD
+*/
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD                (0xF190U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_MASKFULL       (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_SHIFT  (0U)
+#define VHA_CR_PERF_SLC_CMD_REQ_FENCE_WORD_COUNTER_CLRMSK (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD                 (0xF198U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ0_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD                 (0xF1A0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ1_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD                 (0xF1A8U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ2_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD                 (0xF1B0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_MASKFULL        (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_COUNTER_SHIFT   (0U)
+#define VHA_CR_PERF_SLC_IBUF_REQ3_RD_WORD_COUNTER_CLRMSK  (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD                  (0xF1B8U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_CBUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD                  (0xF1C0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_MASKFULL         (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_SHIFT    (0U)
+#define VHA_CR_PERF_SLC_ABUF_REQ_RD_WORD_COUNTER_CLRMSK   (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_OPK_REQ_WR_WORD
+*/
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD                   (0xF1C8U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_OPK_REQ_WR_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD                        (0xF1D0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_MMU_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD                   (0xF1D8U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_MMU_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD                        (0xF1E0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_MASKFULL               (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_SHIFT          (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_COUNTER_CLRMSK         (00000000U)
+
+
+/*
+    Register VHA_CR_PERF_SLC_EWO_REQ_RD_WORD
+*/
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD                   (0xF1E8U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_MASKFULL          (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_SHIFT     (0U)
+#define VHA_CR_PERF_SLC_EWO_REQ_RD_WORD_COUNTER_CLRMSK    (00000000U)
+
+
+/*
+    Register VHA_CR_PWR_MAN_HYSTERESIS
+*/
+#define VHA_CR_PWR_MAN_HYSTERESIS                         (0xF100U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_MASKFULL                (IMG_UINT64_C(0x000000000000001F))
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_SHIFT             (0U)
+#define VHA_CR_PWR_MAN_HYSTERESIS_VALUE_CLRMSK            (0XFFFFFFE0U)
+
+
+#endif /* _VHA_CR_MIRAGE_H_ */
+
+/*****************************************************************************
+ End of file (vha_cr_mirage.h)
+*****************************************************************************/
+

+ 101 - 0
driver/include/hwdefs/vha_tb.h

@@ -0,0 +1,101 @@
+/*************************************************************************/ /*!
+@Title          Hardware definition file vha_tb.h
+@Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+*/ /**************************************************************************/
+
+/*               ****   Autogenerated C -- do not edit    ****               */
+
+/*
+ */
+
+
+#ifndef _VHA_TB_H_
+#define _VHA_TB_H_
+
+#define VHA_TB_REVISION 1
+
+/*
+    Register VHA_TB_MEM_CTRL
+*/
+#define VHA_TB_MEM_CTRL                                   (0x0000U)
+#define VHA_TB_MEM_CTRL_MASKFULL                          (IMG_UINT64_C(0x00000000FF800000))
+#define VHA_TB_MEM_CTRL_MEM_READ_OUTSTANDING_SHIFT        (24U)
+#define VHA_TB_MEM_CTRL_MEM_READ_OUTSTANDING_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define VHA_TB_MEM_CTRL_MEM_READ_8TIMES_OUT_SHIFT         (23U)
+#define VHA_TB_MEM_CTRL_MEM_READ_8TIMES_OUT_CLRMSK        (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define VHA_TB_MEM_CTRL_MEM_READ_8TIMES_OUT_EN            (IMG_UINT64_C(0X0000000000800000))
+
+
+/*
+    Register VHA_TB_MEM_CTRL_EXT
+*/
+#define VHA_TB_MEM_CTRL_EXT                               (0x0010U)
+#define VHA_TB_MEM_CTRL_EXT_MASKFULL                      (IMG_UINT64_C(0x0000000000170000))
+#define VHA_TB_MEM_CTRL_EXT_MEM_ENCRYPT_SHIFT             (20U)
+#define VHA_TB_MEM_CTRL_EXT_MEM_ENCRYPT_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define VHA_TB_MEM_CTRL_EXT_MEM_ENCRYPT_EN                (IMG_UINT64_C(0X0000000000100000))
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_INTERLEAVED_MODE_SHIFT (18U)
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_INTERLEAVED_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_INTERLEAVED_MODE_EN (IMG_UINT64_C(0X0000000000040000))
+#define VHA_TB_MEM_CTRL_EXT_MEM_WRESP_REORDER_MODE_SHIFT  (17U)
+#define VHA_TB_MEM_CTRL_EXT_MEM_WRESP_REORDER_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define VHA_TB_MEM_CTRL_EXT_MEM_WRESP_REORDER_MODE_EN     (IMG_UINT64_C(0X0000000000020000))
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_REORDER_MODE_SHIFT  (16U)
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_REORDER_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define VHA_TB_MEM_CTRL_EXT_MEM_RDATA_REORDER_MODE_EN     (IMG_UINT64_C(0X0000000000010000))
+
+
+/*
+    Register VHA_TB_VHA_IDLE_STATUS
+*/
+#define VHA_TB_VHA_IDLE_STATUS                            (0x0060U)
+#define VHA_TB_VHA_IDLE_STATUS_MASKFULL                   (IMG_UINT64_C(0x0000000000000001))
+#define VHA_TB_VHA_IDLE_STATUS_VHA_IDLE_SHIFT             (0U)
+#define VHA_TB_VHA_IDLE_STATUS_VHA_IDLE_CLRMSK            (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_TB_VHA_IDLE_STATUS_VHA_IDLE_EN                (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_TB_VHA_IRQ_STATUS
+*/
+#define VHA_TB_VHA_IRQ_STATUS                             (0x0068U)
+#define VHA_TB_VHA_IRQ_STATUS_MASKFULL                    (IMG_UINT64_C(0x0000000000000001))
+#define VHA_TB_VHA_IRQ_STATUS_VHA_IRQ_SHIFT               (0U)
+#define VHA_TB_VHA_IRQ_STATUS_VHA_IRQ_CLRMSK              (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define VHA_TB_VHA_IRQ_STATUS_VHA_IRQ_EN                  (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+    Register VHA_TB_MEM_WR_CTRL
+*/
+#define VHA_TB_MEM_WR_CTRL                                (0x0128U)
+#define VHA_TB_MEM_WR_CTRL_MASKFULL                       (IMG_UINT64_C(0x00000000BFFF3FFF))
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_EN_SHIFT        (31U)
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_EN_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_EN_EN           (IMG_UINT64_C(0X0000000080000000))
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_MAX_SHIFT       (16U)
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_MAX_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFC000FFFF))
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_MIN_SHIFT       (0U)
+#define VHA_TB_MEM_WR_CTRL_MEM_WR_LATENCY_MIN_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFC000))
+
+
+/*
+    Register VHA_TB_MEM_RD_CTRL
+*/
+#define VHA_TB_MEM_RD_CTRL                                (0x0130U)
+#define VHA_TB_MEM_RD_CTRL_MASKFULL                       (IMG_UINT64_C(0x00000000BFFF3FFF))
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_EN_SHIFT        (31U)
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_EN_CLRMSK       (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_EN_EN           (IMG_UINT64_C(0X0000000080000000))
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_MAX_SHIFT       (16U)
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_MAX_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFC000FFFF))
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_MIN_SHIFT       (0U)
+#define VHA_TB_MEM_RD_CTRL_MEM_RD_LATENCY_MIN_CLRMSK      (IMG_UINT64_C(0XFFFFFFFFFFFFC000))
+
+
+#endif /* _VHA_TB_H_ */
+
+/*****************************************************************************
+ End of file (vha_tb.h)
+*****************************************************************************/
+

+ 296 - 0
driver/include/img_mem_man.h

@@ -0,0 +1,296 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef IMG_MEM_MAN_H
+#define IMG_MEM_MAN_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+#define KERNEL_DMA_FENCE_SUPPORT
+#endif
+
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+#include <linux/dma-fence.h>
+#endif
+
+#include "uapi/img_mem_man.h"
+
+/* Defines allocation order range for platforms
+ * that do not explicitly defined it.
+ * NOTE: applicable for unified heap type only */
+#define IMG_MIN_ALLOC_ORDER_DEFAULT 0
+#define IMG_MAX_ALLOC_ORDER_DEFAULT 0
+
+/* Page catalogue address shift */
+#define IMG_MMU_PC_ADDR_SHIFT 12
+
+/* MMUv3 PTE entry flags */
+enum {
+	IMG_MMU_PTE_FLAG_NONE = 0x0,
+	IMG_MMU_PTE_FLAG_VALID = 0x1,
+	IMG_MMU_PTE_FLAG_READ_ONLY = 0x2,
+	IMG_MMU_PTE_FLAG_CACHE_COHERENCY = 0x4,
+};
+
+/* All level entries flags are stored under 4lsb bits */
+#define IMG_MMU_ENTRY_FLAGS_MASK 0xf
+
+/* Each entry can store 40bit physical address */
+#define IMG_MMU_PHY_ADDR_MASK ((1ULL<<40)-1)
+
+struct mmu_config {
+	uint32_t addr_width; /* physical */
+	bool bypass_hw; /* MMU bypass mode */
+	size_t bypass_offset; /* Optional offset in physical space for MMU bypass mode */
+	bool use_pte_parity; /* enables parity calculation for PTEs */
+	/* memory attributes to be used when allocating mmu pages */
+	enum img_mem_attr alloc_attr;
+	int page_size;
+};
+
+union heap_options {
+	struct {
+		gfp_t gfp_type; /* pool and flags for buffer allocations */
+		int min_order;  /* minimum page allocation order */
+		int max_order;  /* maximum page allocation order */
+	} unified;
+#ifdef CONFIG_ION
+	struct {
+		struct ion_client *client; /* must be provided by platform */
+	} ion;
+#endif
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	struct {
+		void *kptr; /* static pointer to kernel mapping of memory */
+		/* Optional hooks to obtain kernel mapping dynamically */
+		void* (*get_kptr)(
+				phys_addr_t addr,
+				size_t size,
+				enum img_mem_attr mattr);
+		int (*put_kptr)(void *);
+		phys_addr_t phys; /* physical address start of memory */
+		size_t size; /* size of memory */
+		unsigned long offs; /* optional offset of the start
+							of memory as seen from device,
+							zero by default */
+		int pool_order;  /* allocation order */
+	} carveout;
+#endif
+	struct {
+		bool use_sg_dma;  /* Forces sg_dma physical address instead of CPU physical address*/
+	} dmabuf;
+	struct {
+		gfp_t gfp_flags; /* for buffer allocations */
+	} coherent;
+	struct {
+		phys_addr_t phys; /* physical address start of memory */
+		size_t size; /* size of memory */
+		enum img_mem_heap_attrs hattr; /* User attributes */
+	} ocm;
+};
+
+struct heap_config {
+	enum img_mem_heap_type type;
+	union heap_options options;
+	/* (optional) functions to convert a physical address as seen from
+		 the CPU to the physical address as seen from the vha device and
+		 vice versa. When not implemented,
+		 it is assumed that physical addresses are the
+		 same regardless of viewpoint */
+	phys_addr_t (*to_dev_addr)(union heap_options *opts, phys_addr_t addr);
+	phys_addr_t (*to_host_addr)(union heap_options *opts, phys_addr_t addr);
+	/* Cache attribute,
+	 * could be platform specific if provided - overwrites the global cache policy */
+	enum img_mem_attr cache_attr;
+};
+
+enum img_mmu_callback_type {
+	IMG_MMU_CALLBACK_MAP = 1,
+	IMG_MMU_CALLBACK_UNMAP,
+};
+
+struct mem_ctx;
+struct mmu_ctx;
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id);
+void img_mem_del_heap(int heap_id);
+int img_mem_get_heap_info(int heap_id, uint8_t *type, uint32_t *attrs);
+
+/*
+*  related to process context (contains SYSMEM heap's functionality in general)
+*/
+
+int img_mem_create_proc_ctx(struct mem_ctx **ctx);
+void img_mem_destroy_proc_ctx(struct mem_ctx *ctx);
+
+int img_mem_alloc(struct device *device, struct mem_ctx *ctx, int heap_id,
+			size_t size, enum img_mem_attr attributes, int *buf_id);
+int img_mem_import(struct device *device, struct mem_ctx *ctx, int heap_id,
+			 size_t size, enum img_mem_attr attributes, uint64_t buf_hnd,
+			 int *buf_id);
+int img_mem_export(struct device *device, struct mem_ctx *ctx, int buf_id,
+			 size_t size, enum img_mem_attr attributes, uint64_t *buf_hnd);
+void img_mem_free(struct mem_ctx *ctx, int buf_id);
+
+int img_mem_map_um(struct mem_ctx *ctx, int buf_id, struct vm_area_struct *vma);
+int img_mem_unmap_um(struct mem_ctx *ctx, int buf_id);
+int img_mem_map_km(struct mem_ctx *ctx, int buf_id);
+int img_mem_unmap_km(struct mem_ctx *ctx, int buf_id);
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buf_id);
+uint64_t *img_mem_get_page_array(struct mem_ctx *mem_ctx, int buf_id);
+uint64_t img_mem_get_single_page(struct mem_ctx *mem_ctx, int buf_id,
+		unsigned int offset);
+phys_addr_t img_mem_get_dev_addr(struct mem_ctx *mem_ctx, int buf_id,
+		phys_addr_t addr);
+
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buf_id);
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buf_id);
+
+int img_mem_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr);
+int img_mmu_get_usage(const struct mem_ctx *ctx, size_t *max, size_t *curr);
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+struct dma_fence * img_mem_add_fence(struct mem_ctx *ctx, int buf_id);
+void img_mem_remove_fence(struct mem_ctx *ctx, int buf_id);
+int img_mem_signal_fence(struct mem_ctx *ctx, int buf_id);
+#endif
+
+/*
+* related to stream MMU context (constains IMGMMU functionality in general)
+*/
+int img_mmu_ctx_create(struct device *device, const struct mmu_config *config,
+					 struct mem_ctx *mem_ctx, int heap_id,
+					 int (*callback_fn)(enum img_mmu_callback_type type,
+					 int buf_id, void *data),
+					 void *callback_data,
+					 struct mmu_ctx **mmu_ctx);
+void img_mmu_ctx_destroy(struct mmu_ctx *mmu);
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id,
+		uint64_t virt_addr, unsigned int map_flags);
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx, int buf_id);
+
+int img_mmu_get_pc(const struct mmu_ctx *ctx,
+		unsigned int *pc_reg, int *buf_id);
+int img_mmu_get_conf(size_t *page_size, size_t *virt_size);
+phys_addr_t img_mmu_get_paddr(const struct mmu_ctx *ctx,
+		uint64_t vaddr, uint8_t *flags);
+
+int img_mmu_init_cache(struct mmu_ctx *mmu_ctx,	unsigned long cache_phys_start,
+		uint32_t cache_size);
+int img_mmu_clear_cache(struct mmu_ctx *mmu_ctx);
+int img_mmu_move_pg_to_cache(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		int buf_id, uint64_t virt_addr, uint32_t page_size, uint32_t page_idx);
+/*
+ * virtual address allocation
+ */
+struct mmu_vaa;
+
+int img_mmu_vaa_create(struct device *device,
+		uint32_t base, size_t size, struct mmu_vaa **vaa);
+int img_mmu_vaa_destroy(struct mmu_vaa *vaa);
+int img_mmu_vaa_alloc(struct mmu_vaa *vaa, size_t size, uint32_t *addr);
+int img_mmu_vaa_free(struct mmu_vaa *vaa, uint32_t addr, size_t size);
+
+bool img_mem_calc_parity(unsigned long long input);
+
+/*
+ * PDUMP generation:
+ * img_pdump_txt_create creates a TXT buffer in RAM,
+ *   which is used by img_pdump_txt_printf
+ * img_pdump_bin_create creates a PRM or RES buffer in RAM,
+ *   which is used by img_pdump_bin_write
+ *
+ */
+struct pdump_buf {
+	char   *ptr;
+	size_t  size;      /* allocated size of buffer */
+	size_t  len;       /* how full is the buffer */
+	bool    drop_data; /* do not store data in file */
+};
+#define PDUMP_TXT      0  /* eg pdump.txt     */
+#define PDUMP_PRM      1  /* eg pdump.prm     */
+#define PDUMP_RES      2  /* eg pdump.res     */
+#define PDUMP_DBG      3  /* eg pdump.dbg     */
+#define PDUMP_CRC      4  /* eg pdump.crc     */
+#define PDUMP_CRC_CMB  5  /* eg pdump.crc_cmb */
+#define PDUMP_MAX      6  
+
+/*
+  * VHA PDUMPs.
+  * Uses img_pdump buffers to collect pdump information.
+  * there are 3 different PDUMP files: TXT, PRM and RES.
+  * they are simply buffers in ram.
+  * They are mapped into debugfs: /sys/kernel/debug/vhaN/pdump.*
+  */
+struct pdump_descr {
+	struct pdump_buf pbufs[PDUMP_MAX];
+	struct mutex     lock;
+};
+
+#ifndef OSID
+#define _PMEM_ ":MEM"
+#else
+#define _PMEM_ ":MEM_OS"__stringify(OSID)
+#endif
+struct pdump_buf *img_pdump_create(struct pdump_descr* pdump, uint32_t pdump_num, size_t size);
+int img_pdump_write(struct pdump_descr* pdump, uint32_t pdump_num, const void *ptr, size_t size);
+int __img_pdump_printf(struct device* dev, const char *fmt, ...) __printf(2, 3);
+#define img_pdump_printf(fmt, ...) __img_pdump_printf(vha->dev, fmt, ##__VA_ARGS__)
+
+void img_pdump_destroy(struct pdump_descr* pdump);
+bool img_pdump_enabled(struct pdump_descr* pdump);
+
+#endif /* IMG_MEM_MAN_H */
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 55 - 0
driver/include/nexef_plat.h

@@ -0,0 +1,55 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#ifndef GYRUS_PLAT_H
+#define GYRUS_PLAT_H
+
+#define NEXEF_NNA_DEVICE_NAME "nexef_nna"
+
+struct nexef_nna_platform_data {
+    uint64_t nna_memory_offset;
+    uint64_t nna_memory_base;
+    uint64_t nna_memory_size;
+};
+
+
+#define TC_INTERRUPT_NNA 2
+
+#endif /* NEXEF_PLAT_H */

+ 118 - 0
driver/include/uapi/img_mem_man.h

@@ -0,0 +1,118 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       img_mem_man.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef IMG_MEM_MAN_UAPI_H
+#define IMG_MEM_MAN_UAPI_H
+
+/* memory attributes */
+enum img_mem_attr {
+	IMG_MEM_ATTR_CACHED        = 0x00000001,
+	IMG_MEM_ATTR_UNCACHED      = 0x00000002,
+	IMG_MEM_ATTR_WRITECOMBINE  = 0x00000004,
+
+	/* Special */
+	IMG_MEM_ATTR_SECURE        = 0x00000010,
+	IMG_MEM_ATTR_NOMAP         = 0x00000020,
+	IMG_MEM_ATTR_NOSYNC        = 0x00000040,
+
+	/* Internal */
+	IMG_MEM_ATTR_MMU           = 0x10000000,
+	IMG_MEM_ATTR_OCM           = 0x20000000,
+};
+
+/* Cache attributes mask */
+#define IMG_MEM_ATTR_CACHE_MASK 0xf
+
+/* Supported heap types */
+enum img_mem_heap_type {
+	IMG_MEM_HEAP_TYPE_UNKNOWN = 0,
+	IMG_MEM_HEAP_TYPE_UNIFIED,
+	IMG_MEM_HEAP_TYPE_CARVEOUT,
+	IMG_MEM_HEAP_TYPE_ION,
+	IMG_MEM_HEAP_TYPE_DMABUF,
+	IMG_MEM_HEAP_TYPE_COHERENT,
+	IMG_MEM_HEAP_TYPE_ANONYMOUS,
+	IMG_MEM_HEAP_TYPE_OCM,
+};
+
+/* Heap attributes */
+enum img_mem_heap_attrs {
+	IMG_MEM_HEAP_ATTR_INTERNAL  = 0x01,
+	IMG_MEM_HEAP_ATTR_IMPORT    = 0x02,
+	IMG_MEM_HEAP_ATTR_EXPORT    = 0x04,
+	IMG_MEM_HEAP_ATTR_SEALED    = 0x08,
+
+	/* User attributes */
+	IMG_MEM_HEAP_ATTR_LOCAL     = 0x10,
+	IMG_MEM_HEAP_ATTR_SHARED    = 0x20,
+};
+
+/* heaps ids */
+#define IMG_MEM_MAN_HEAP_ID_INVALID 0
+#define IMG_MEM_MAN_MIN_HEAP 1
+#define IMG_MEM_MAN_MAX_HEAP 16
+
+/* buffer ids (per memory context) */
+#define IMG_MEM_MAN_BUF_ID_INVALID 0
+#define IMG_MEM_MAN_MIN_BUFFER 1
+#define IMG_MEM_MAN_MAX_BUFFER 2000
+
+/* Definition of VA guard gap between allocations */
+#define IMG_MEM_VA_GUARD_GAP 0x1000
+
+/* Virtual memory space for buffers allocated
+ * in the kernel - OCM & device debug buffers */
+#define IMG_MEM_VA_HEAP1_BASE 0x8000000ULL
+#define IMG_MEM_VA_HEAP1_SIZE 0x40000000ULL
+
+/* Definition of VA guard gap between heaps - 2MB (size of MMU PD) */
+#define IMG_MEM_HEAP_GUARD_GAP 0x200000
+
+/* Virtual memory space for buffers allocated in the user space */
+#define IMG_MEM_VA_HEAP2_BASE ( \
+		IMG_MEM_VA_HEAP1_BASE + IMG_MEM_VA_HEAP1_SIZE + IMG_MEM_HEAP_GUARD_GAP)
+#define IMG_MEM_VA_HEAP2_SIZE 0x3C0000000ULL
+
+#endif /* IMG_MEM_MAN_UAPI_H */

+ 49 - 0
driver/include/uapi/version.h

@@ -0,0 +1,49 @@
+/*!
+******************************************************************************
+ @file   : version.h
+
+ @brief  Version information for VHA tools
+
+ @Author Imagination Technologies
+
+ @date   08/05/2013
+
+ @License  MIT
+
+  Copyright (c) Imagination Technologies Ltd.
+
+  Permission is hereby granted, free of charge, to any person obtaining a copy
+  of this software and associated documentation files (the "Software"), to deal
+  in the Software without restriction, including without limitation the rights
+  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+  copies of the Software, and to permit persons to whom the Software is
+  furnished to do so, subject to the following conditions:
+
+  The above copyright notice and this permission notice shall be included in
+  all copies or substantial portions of the Software.
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+  THE SOFTWARE.
+
+ \n<b>Description:</b>\n
+         This file is automatically updated by the build system to contain
+         the correct version information.
+
+ \n<b>Platform:</b>\n
+         Platform Independent
+
+******************************************************************************/
+
+#ifndef VERSION_H
+#define VERSION_H
+
+#define VERSION_STRING "REL_3.8-cl6140200"
+
+#define KERNEL_INTERFACE_DIGEST "37b909e8b218177ddfc5eb2d5f162348"
+
+#endif  // VERSION_H

+ 423 - 0
driver/include/uapi/vha.h

@@ -0,0 +1,423 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef _VHA_UAPI_H
+#define _VHA_UAPI_H
+
+#if defined(__KERNEL__)
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#elif defined(__linux__)
+#include <sys/ioctl.h>
+#include <inttypes.h>
+#else
+#error unsupported build
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define VHA_OCM_MAX_NUM_PAGES 128
+#define VHA_CORE_MAX_ALT_ADDRS 16
+#define VHA_MAX_CORES 8
+
+// represents OCM types,
+#define VHA_LOCAL_OCM  0  /* Local OCM */
+#define VHA_SHARED_OCM 1  /* Shared OCM */
+#define VHA_OCM_TYPE_MAX 2
+
+/* device hw properties */
+struct vha_hw_props {
+	uint64_t product_id;
+	uint64_t core_id;
+	uint64_t soc_axi;
+	uint8_t  mmu_width;	   /* MMU address width: 40, or 0 if no MMU */
+	uint8_t  mmu_ver;      /* MMU version */
+	uint32_t mmu_pagesize; /* MMU page size */
+
+	union {
+		struct {
+			unsigned rtm: 1;
+			unsigned parity: 1;
+		} supported;
+		uint8_t features;
+	};
+	bool     dummy_dev;
+	bool     skip_bvnc_check;
+	bool     use_pdump;
+	uint8_t  num_cnn_core_devs;
+	uint32_t locm_size_bytes; /* per core */
+	uint32_t socm_size_bytes; /* total size for all cores */
+	uint32_t socm_core_size_bytes; /* per core */
+	uint32_t clock_freq;		/* hardware clock rate, kHz */
+
+} __attribute__((aligned(8)));
+
+struct vha_cnn_props {
+	/* TOBEDONE */
+};
+
+/* command sent to device */
+enum vha_cmd_type {
+	VHA_CMD_INVALID          = 0x000,
+	VHA_CMD_CNN_SUBMIT       = 0x101,
+	VHA_CMD_CNN_SUBMIT_MULTI,
+	VHA_CMD_CNN_PDUMP_MSG
+};
+
+/* optional flags for commands */
+#define VHA_CMDFLAG_NOTIFY       0x0001 /* send response when cmd complete */
+#define VHA_CHECK_CRC            0x0002 /* check the combined CRCs */
+#define VHA_EXEC_TIME_SET        0x0004 /* execution time is valid */
+
+/*
+ * message from user to be sent to VHA (write).
+ * A command will contain a number of input and ouput buffers
+ * and some command specific parameters.
+ * Buffers must be identified by their buffer id.
+ * All buffer ids must *precede* any other parameters:
+ *    input buf ids,
+ *    output buf ids,
+ *    followed by other parameters.
+ */
+struct vha_user_cmd {
+	uint32_t cmd_id;     /* arbitrary id for cmd */
+	uint16_t cmd_type;   /* enum vha_cmd_type */
+	uint16_t flags;      /* VHA_CMDFLAG_xxx */
+	uint8_t  priority;   /* WL priority */
+	uint8_t  padding;    /* padding to keep data 32bit aligned */
+	uint8_t  num_bufs;   /* total number of buffers */
+	uint8_t  num_inbufs; /* number of input buffers */
+	uint32_t data[0];    /* 0-N words: input bufids
+	                      * followed by other bufids
+	                      * followed by other parameters */
+};
+
+/* Structure defining hardware issues */
+struct vha_hw_brns {
+	union {
+		struct {
+			unsigned bRN71649: 1;
+			unsigned bRN71556: 1;
+			unsigned bRN71338: 1;
+		} bit;
+		uint64_t map;
+	};
+};
+
+/*
+ * CNN_SUBMIT message written from user to VHA.
+ * 3 input buffers: cmdstream, input image, coefficients
+ * 1 output buffer
+ * 1 internal buffer (optional)
+ * offsets into the input and output buffers
+ * and a register map: this tells the driver which alt-register-N
+ * will contain the address of which buffer.
+ */
+struct vha_subseg_info {
+	uint32_t cmdbuf_offset;
+	uint32_t cmdbuf_size;
+};
+struct vha_user_cnn_submit_cmd {
+	struct vha_user_cmd msg;
+	uint32_t cmdbuf;                             /* bufid of cmdstream buffer */
+	uint32_t bufs[VHA_CORE_MAX_ALT_ADDRS];       /* bufid of IN, COEFF, OUT, INTERNAL,CRC,DBG,WB buffers */
+	uint32_t bufoffsets[VHA_CORE_MAX_ALT_ADDRS]; /* offsets into inbufs and outbufs buffers */
+	uint32_t bufsizes[VHA_CORE_MAX_ALT_ADDRS];   /* sizes of the inbufs and outbufs buffers */
+	uint8_t  regidx[VHA_CORE_MAX_ALT_ADDRS];     /* register to be used for inbufs and outbufs */
+	uint32_t onchipram_map_id;                   /* OCM mapping id - hot pages */
+	uint32_t onchipram_bufs[VHA_OCM_TYPE_MAX];   /* OCM linear mapping buffers */
+	uint32_t estimated_cycles;                   /* estimated number of cycles for this command */
+	uint64_t expected_ip_capab;                  /* expected BVNC */
+	uint64_t hw_brns;                            /* BRNSs bit map */
+	uint32_t subseg_num;                         /* number of subsegments in subseg_info array */
+	struct vha_subseg_info subseg_info[1];       /* there's always at least one subsegment */
+} __attribute__((aligned(8)));
+
+/*
+ * CNN_SUBMIT_MULTI message written from user to VHA.
+ * 3 input buffers: cmdstream(s), input image, coefficients
+ * 1 output buffer
+ * 1 internal buffer (optional)
+ * offsets into the input and output buffers
+ * and a register map: this tells the driver which alt-register-N
+ * will contain the address of which buffer.
+ */
+struct vha_user_cnn_submit_multi_cmd {
+	struct vha_user_cmd msg;
+	uint32_t cmdbuf[VHA_MAX_CORES];              /* bufid of cmdstream buffer */
+	uint32_t bufs[VHA_CORE_MAX_ALT_ADDRS];       /* bufid of IN, COEFF, OUT, INTERNAL,CRC,DBG,WB buffers */
+	uint32_t bufoffsets[VHA_CORE_MAX_ALT_ADDRS]; /* offsets into inbufs and outbufs buffers */
+	uint32_t bufsizes[VHA_CORE_MAX_ALT_ADDRS];   /* sizes of the inbufs and outbufs buffers */
+	uint8_t  regidx[VHA_CORE_MAX_ALT_ADDRS];     /* register to be used for inbufs and outbufs */
+	uint8_t  num_cores;                          /* number of cores required for this workload */
+	uint32_t onchipram_bufs[VHA_OCM_TYPE_MAX];   /* OCM linear mapping buffers */
+	uint32_t crcs[VHA_MAX_CORES];                /* golden CRCs */
+	uint64_t exec_time;                          /* expected execution time */
+	uint32_t shared_circ_buf_offs;               /* circular buffer offset in the shared memory */
+	uint32_t estimated_cycles;                   /* estimated number of cycles for this command */
+	uint64_t expected_ip_capab;                  /* expected BVNC */
+	uint64_t hw_brns;                            /* BRNSs bit map */
+} __attribute__((aligned(8)));
+
+/*
+ * response from from kernel module to user.
+ */
+struct vha_user_rsp {
+	uint32_t cmd_id;	/* arbitrary id to identify cmd */
+	uint32_t err_no;	/* 0 if successful, else -ve */
+	uint64_t rsp_err_flags;
+	uint32_t data[0];	/* 0-N words of additional info */
+};
+
+/*
+ * response returned after CNN_SUBMIT.
+ */
+struct vha_user_cnn_submit_rsp {
+	struct vha_user_rsp msg;
+	uint64_t last_proc_us;	/* processing time in us,
+				measured with system clock */
+	uint32_t mem_usage;	/* device memory used */
+	uint32_t hw_cycles;	/* hardware cycles used */
+} __attribute__((aligned(8)));
+
+#define MAX_VHA_USER_RSP_SIZE (sizeof(struct vha_user_cnn_submit_rsp))
+
+/* response returned when querying for heaps */
+struct vha_heap_data {
+	uint32_t id;				/* Heap ID   */
+	uint32_t type;				/* Heap type */
+	uint32_t attributes;		/* Heap attributes
+		defining capabilities that user may treat as hint
+		when selecting the heap id during allocation/importing */
+};
+
+#define VHA_MAX_HEAPS 16
+
+struct vha_heaps_data {
+	struct vha_heap_data heaps[VHA_MAX_HEAPS];		/* [OUT] Heap data */
+} __attribute__((aligned(8)));
+
+/* parameters to allocate a device buffer */
+struct vha_alloc_data {
+	uint64_t size;				/* [IN] Size of device memory (in bytes)    */
+	uint32_t heap_id;			/* [IN] Heap ID of allocator
+														or VHA_USE_DEFAULT_MEM_HEAP */
+	uint32_t attributes;	/* [IN] Attributes of buffer: img_mem_attr  */
+	char     name[8];			/* [IN] short name for buffer               */
+	uint32_t buf_id;			/* [OUT] Generated buffer ID                */
+} __attribute__((aligned(8)));
+
+/* parameters to import a device buffer */
+struct vha_import_data {
+	uint64_t size;				/* [IN] Size of device memory (in bytes)    */
+	uint64_t buf_hnd;			/* [IN] File descriptor/cpu pointer
+														of buffer to import */
+	uint32_t heap_id;			/* [IN] Heap ID of allocator                */
+	uint32_t attributes;	/* [IN] Attributes of buffer                */
+	char     name[8];			/* [IN] short name for buffer               */
+	uint32_t buf_id;			/* [OUT] Generated buffer ID                */
+} __attribute__((aligned(8)));
+
+/* parameters to export a device buffer */
+struct vha_export_data {
+	uint32_t buf_id;       /* [IN] Buffer ID to be exported */
+	uint64_t size;         /* [IN] Size to be exported */
+	uint32_t attributes;   /* [IN] Attributes of buffer */
+	uint64_t buf_hnd;      /* [OUT] Buffer handle (file descriptor) */
+} __attribute__((aligned(8)));
+
+struct vha_free_data {
+	uint32_t buf_id;	/* [IN] ID of device buffer to free */
+};
+
+enum vha_map_flags {
+	VHA_MAP_FLAG_NONE       = 0x0,
+	VHA_MAP_FLAG_READ_ONLY  = 0x1,
+	VHA_MAP_FLAG_WRITE_ONLY = 0x2,
+	VHA_MAP_FLAG_IO         = 0x4,
+	VHA_MAP_FLAG_MODEL      = 0x8,
+};
+
+/* parameters to map a buffer into device */
+struct vha_map_to_onchip_data {
+	uint64_t virt_addr;		/* [IN] Device virtual address of a mapping */
+	uint32_t buf_id;		/* [IN] ID of device buffer to map to VHA */
+	uint32_t page_size;		/* [IN] Page size */
+	uint32_t num_pages;		/* [IN] The number of pages to be mapped */
+	uint32_t page_idxs[VHA_OCM_MAX_NUM_PAGES];
+							/* [IN] Indexes of pages to be mapped */
+	uint32_t map_id;		/* [IN/OUT] if map_id == 0, creates new mapping
+								and generates new map_id,
+								otherwise using existing map_id*/
+} __attribute__((aligned(8)));
+
+/* parameters to map a buffer into device */
+struct vha_map_data {
+	uint64_t virt_addr;	/* [IN] Device virtual address to map     */
+	uint32_t buf_id;	/* [IN] ID of device buffer to map to VHA */
+	uint32_t flags;		/* [IN] Mapping flags, see vha_map_flags  */
+} __attribute__((aligned(8)));
+
+struct vha_unmap_data {
+	uint32_t buf_id;	/* [IN] ID of device buffer to unmap from VHA */
+} __attribute__((aligned(8)));
+
+enum vha_buf_status {
+	VHA_BUF_UNFILLED,
+	VHA_BUF_FILLED_BY_SW,
+	VHA_BUF_FILLED_BY_HW
+};
+#define VHA_SYNC_NONE (-1)
+/* parameters to set buffer status ("filled" or "unfilled") */
+struct vha_buf_status_data {
+	uint32_t buf_id;
+	uint32_t status;	/* enum vha_buf_status */
+	int      in_sync_fd;   /* input sync to attach */
+	bool     out_sync_sig; /* output sync signal */
+} __attribute__((aligned(8)));
+
+enum vha_sync_op {
+	VHA_SYNC_OP_CREATE_OUT, /* create output sync_fd */
+	VHA_SYNC_OP_MERGE_IN,   /* merge input sync_fds */
+	VHA_SYNC_OP_RELEASE     /* release syncs */
+};
+
+/* parameters to manage sync_fds */
+#define VHA_SYNC_MAX_BUF_IDS     (VHA_CORE_MAX_ALT_ADDRS)
+#define VHA_SYNC_MAX_IN_SYNC_FDS (VHA_CORE_MAX_ALT_ADDRS)
+struct vha_sync_create_data {
+	uint32_t buf_id_count;                  /* [IN] number of output buffers */
+	uint32_t buf_ids[VHA_SYNC_MAX_BUF_IDS]; /* [IN] list of output buffer ids */
+};
+struct vha_sync_merge_data {
+	uint32_t in_sync_fd_count;                 /* [IN] number of input sync_fds */
+	int in_sync_fds[VHA_SYNC_MAX_IN_SYNC_FDS]; /* [IN] list of input sync_fds */
+};
+struct vha_sync_release_data {
+	uint32_t buf_id_count;                  /* [IN] number of buffers */
+	uint32_t buf_ids[VHA_SYNC_MAX_BUF_IDS]; /* [IN] list of buffer ids */
+};
+struct vha_sync_data {
+	enum vha_sync_op op;
+	union {
+		struct vha_sync_create_data create_data;   /* create output sync_fd data */
+		struct vha_sync_merge_data merge_data;     /* merge input sync_fds data */
+		struct vha_sync_release_data release_data; /* release syncs data */
+	};
+	int sync_fd; /* [OUT] output sync_fd/sync_fd for merged input sync_fds */
+} __attribute__((aligned(8)));
+
+struct vha_cancel_data {
+	uint32_t cmd_id;      /* [IN] masked ID of commands to be cancelled */
+	uint32_t cmd_id_mask; /* [IN] mask for command IDs to be cancelled */
+	bool     respond;     /* [IN] if true, respond to this cancel request */
+} __attribute__((aligned(8)));
+
+struct vha_version_data {
+	char  digest[33];     /* [OUT] digest of this interface file */
+} __attribute__((aligned(8)));
+
+#define VHA_IOC_MAGIC  'q'
+
+#define VHA_IOC_HW_PROPS          _IOR(VHA_IOC_MAGIC,  0, struct vha_hw_props)
+#define VHA_IOC_QUERY_HEAPS       _IOR(VHA_IOC_MAGIC,  1, struct vha_heaps_data)
+#define VHA_IOC_ALLOC             _IOWR(VHA_IOC_MAGIC, 2, struct vha_alloc_data)
+#define VHA_IOC_IMPORT            _IOWR(VHA_IOC_MAGIC, 3, struct vha_import_data)
+#define VHA_IOC_EXPORT            _IOWR(VHA_IOC_MAGIC, 4, struct vha_export_data)
+#define VHA_IOC_FREE              _IOW(VHA_IOC_MAGIC,  5, struct vha_free_data)
+#define VHA_IOC_VHA_MAP_TO_ONCHIP _IOW(VHA_IOC_MAGIC,  6, struct vha_map_to_onchip_data)
+#define VHA_IOC_VHA_MAP           _IOW(VHA_IOC_MAGIC,  7, struct vha_map_data)
+#define VHA_IOC_VHA_UNMAP         _IOW(VHA_IOC_MAGIC,  8, struct vha_unmap_data)
+#define VHA_IOC_BUF_STATUS        _IOW(VHA_IOC_MAGIC,  9, struct vha_buf_status_data)
+#define VHA_IOC_SYNC              _IOWR(VHA_IOC_MAGIC, 10, struct vha_sync_data)
+#define VHA_IOC_CANCEL            _IOW(VHA_IOC_MAGIC,  11, struct vha_cancel_data)
+
+#define VHA_IOC_VERSION           _IOW(VHA_IOC_MAGIC,  16, struct vha_version_data)
+
+#define VHA_SCOPE_DEV_NAME "vha_scope"
+
+/* vha scope context
+ * */
+struct vha_trace_ctx {
+	unsigned model_id;	/* model id */
+	unsigned frame_id;	/* inference id */
+	unsigned dev_id;		/* device id */
+	unsigned osid;			/* device id */
+	unsigned pid;				/* process id */
+	unsigned tid;				/* thread id */
+};
+
+/* Event information, available from vha_info */
+struct vha_timing_data {
+	unsigned evt_type;			/* event type */
+	unsigned seqno;					/* continually increments */
+	unsigned dev_id;				/* device id */
+	unsigned timestamp_lo;	/* in microseconds */
+	unsigned timestamp_hi;
+	unsigned type;					/* either SUBMIT or COMPLETE or ERROR */
+	unsigned cycles;				/* HW cycle count */
+	unsigned pid;						/* process id */
+};
+
+enum vha_scope_evt_type {
+	VHA_EVENT_TIMING,
+	VHA_EVENT_NUM
+};
+
+enum vha_timing_data_type {
+	VHA_EVENT_TYPE_ENQUEUE,
+	VHA_EVENT_TYPE_SUBMIT,
+	VHA_EVENT_TYPE_COMPLETE,
+	VHA_EVENT_TYPE_ERROR,
+	VHA_EVENT_TYPE_NUM
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _VHA_UAPI_H */

+ 116 - 0
driver/include/uapi/vha_errors.h

@@ -0,0 +1,116 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_errors.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef _VHA_ERRORS_H
+#define _VHA_ERRORS_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+enum {
+	/* System errors */
+	VHA_RSP_ERROR_HW_SYS_AXI_ERROR_SHF = 0,
+	VHA_RSP_ERROR_HW_SYS_MMU_PAGE_FAULT_SHF,
+	VHA_RSP_ERROR_HW_SYS_SYS_MEM_WDT_SHF,
+	VHA_RSP_ERROR_HW_SYS_AXI_MEMORY_PARITY_ERROR_SHF,
+	VHA_RSP_ERROR_HW_SYS_MMU_PARITY_ERROR_SHF,
+	VHA_RSP_ERROR_HW_SYS_RAM_CORRECTION_SHF,
+	VHA_RSP_ERROR_HW_SYS_RAM_DETECTION_SHF,
+	VHA_RSP_ERROR_HW_SYS_LSYNC_INV_REQ_SHF,
+	VHA_RSP_ERROR_HW_SYS_LOGIC_ERROR_SHF,
+	VHA_RSP_ERROR_SW_SYS_EVNT_PARITY_ERROR_SHF,
+	VHA_RSP_ERROR_SW_WDT_EXPIRED_SHF,
+	/* WM event errors */
+	VHA_RSP_ERROR_HW_EVNT_WM_WL_WDT_SHF,
+	VHA_RSP_ERROR_HW_EVNT_WM_WL_IDLE_WDT_SHF,
+	VHA_RSP_ERROR_HW_EVNT_WM_SOCIF_WDT_SHF,
+	VHA_RSP_ERROR_HW_EVNT_LOGIC_FAULT_SHF,
+	VHA_RSP_ERROR_SW_EVNT_WM_PARITY_ERROR_SHF,
+	/* WM response FIFO errors */
+	VHA_RSP_ERROR_HW_CORE_IRQ_BEFORE_KICK_SHF,
+	VHA_RSP_ERROR_HW_INDIRECT_MASK_SET_ERROR_SHF,
+	VHA_RSP_ERROR_HW_KICK_CORE_ACCESS_ERROR_SHF,
+	VHA_RSP_ERROR_HW_CNN_CONTROL_START_HIGH_SHF,
+	VHA_RSP_ERROR_HW_CNN_STATUS_ERROR_SHF,
+	VHA_RSP_ERROR_HW_INT_CORE_ACCESS_ERROR_SHF,
+	VHA_RSP_ERROR_HW_CORE_EVENT_ERROR_SHF,
+	VHA_RSP_ERROR_HW_CORE_EVENT_NOT_CLEARED_SHF,
+	VHA_RSP_ERROR_HW_CORE_EVENT_IRQ_HIGH_SHF,
+	VHA_RSP_ERROR_HW_INTERCONNECT_ERROR_SHF,
+	VHA_RSP_ERROR_SW_WM_PARITY_ERROR_SHF,
+	VHA_RSP_ERROR_SW_WL_ID_MISMATCH_ERROR_SHF,
+	VHA_RSP_ERROR_SW_CONF_ERROR_SHF,
+	VHA_RSP_ERROR_SW_CRC_MISMATCH_ERROR_SHF,
+	/* CNN core status errors. */
+	VHA_RSP_ERROR_HW_CORE_LOGIC_ERROR_SHF,
+	VHA_RSP_ERROR_HW_RAM_CORRECTION_SHF,
+	VHA_RSP_ERROR_HW_RAM_DETECTION_SHF,
+	VHA_RSP_ERROR_HW_CORE_SYNC_ERROR_SHF,
+	VHA_RSP_ERROR_HW_CORE_WDT_SHF,
+	VHA_RSP_ERROR_HW_CORE_MEM_WDT_SHF,
+	VHA_RSP_ERROR_HW_CORE_CNN_ERROR_SHF,
+	/* Interconnect status errors. */
+	VHA_RSP_ERROR_HW_LOCKSTEP_ERROR_SHF,
+	VHA_RSP_ERROR_HW_IC_LOGIC_ERROR_SHF,
+	VHA_RSP_ERROR_HW_SOCIF_READ_MISMATCH_SHF,
+	VHA_RSP_ERROR_HW_SOCIF_READ_UNRESPONSIVE_SHF,
+	VHA_RSP_ERROR_SW_IC_PARITY_ERROR_SHF,
+	/* Workload submit errors. */
+	VHA_RSP_ERROR_SW_SKIP_CMD_SHF,
+	VHA_RSP_ERROR_SW_KICK_BIT_READ_BACK_FAILURE_SHF,
+	VHA_RSP_ERROR_SW_HW_BUSY_SHF,
+	VHA_RSP_ERROR_SW_INVALID_CMD_INFO_SHF,
+	VHA_RSP_ERROR_SW_INVALID_CMD_TYPE_SHF,
+	VHA_RSP_ERROR_SW_MMU_SETUP_FAILURE_SHF
+};
+
+#define VHA_RSP_ERROR(err) (1ull << (VHA_RSP_ERROR_##err##_SHF))
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _VHA_ERRORS_H */

+ 72 - 0
driver/include/vha_drv_common.h

@@ -0,0 +1,72 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef VHA_DRV_COMMON_H
+#define VHA_DRV_COMMON_H
+
+#include <img_mem_man.h>
+
+/* This is a structure shared between img_mem and vha modules.
+ * The pointer to it is stored in device.driver_data
+ */
+struct vha_dev_common {
+	void* vha_dev; /* opaque pointer to vha_dev structure */
+	struct pdump_descr pdump;
+};
+
+static inline struct pdump_descr* vha_pdump_dev_get_drvdata(struct device* dev) {
+	struct vha_dev_common* vdc = dev_get_drvdata(dev);
+	if(!vdc)
+	  return NULL;
+	return &vdc->pdump;
+}
+
+#endif /* VHA_DRV_COMMON_H */
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 24 - 0
driver/nexef_platform/Makefile

@@ -0,0 +1,24 @@
+# if building the kernel module in-tree, these config options
+# should be put into a Kconfig file.
+# if building out-of-tree, defining them here is as good as any.
+# CONFIG_VHA_NEXEF_PLAT:       build the VHA 3NX-F platform driver
+export CONFIG_VHA_NEXEF_PLAT := m
+
+ifeq ($(CONFIG_NEXEF_NNPU_INCLUDE),)
+  $(error CONFIG_NEXEF_NNPU_INCLUDE not set: You must provide the path to the NNPU tc_drv.h file in)
+endif
+
+ccflags-y += -I$(CONFIG_NEXEF_NNPU_INCLUDE)
+
+ccflags-y += -I$(src)/../include
+ccflags-y += -I$(src)/
+ccflags-y += -Wall -g
+
+# This should not be needed, but on some platform (especially old kernel)
+# the module can't be build without it.
+ccflags-y += -DRETPOLINE -Wfatal-errors
+
+# NOT WORKING FOR NOW, DO NOT ENABLE
+#ccflags-y += -DCONFIG_SET_FPGA_CLOCK=y
+
+obj-$(CONFIG_VHA_NEXEF_PLAT) += nexef_plat.o

+ 110 - 0
driver/nexef_platform/README.md

@@ -0,0 +1,110 @@
+3NX-F base platform driver
+==========================
+
+This document is here to explain how to build the linux drivers for the PCI FPGA based version of the 3NX-F.
+
+It also expect you having basic knowledge on how to build the normal GPU driver and NNA driver.
+
+### Building the drivers
+
+##### 1. Getting the sources
+
+First you need to have the NNA source tree (which you probably already have done as you read this file).
+You will also need the NNPU (a GPU without graphic) driver that you can find here:
+
+//powervr/swgraphics/rogueddk/MAIN/
+
+##### 2. Building the base driver and NNA
+
+Then you first build the NNA side driver which include the base 3NX-F driver: nexef_plat.ko
+
+> To build the driver you will need first to locate the `tc_drv.h` file inside the GPU driver. It is normally located in
+> `kernel/drivers/staging/imgtec/tc` and copy the full path of the folder containing it.
+
+Start by building the NNA driver as usual:
+- Creating a build folder
+- Run cmake inside that build folder and configure as your needs
+- run `make` inside the folder to build the userland part
+
+Then we will build the three needed kernel modules:
+- Go inside the `source/kernel/linux` folder
+- run `make` with the proper arguments: 
+
+```
+make -f Makefile.testing CONFIG_VHA_NEXEF=y CONFIG_HW_AX3=y CONFIG_NEXEF_NNPU_INCLUDE=/path/to/tc_drv/folder
+```
+
+It should results with three `.ko` files: `nexef_platform/nexef_plat.ko`, `img_mem/img_mem.ko` and `vha/vha.ko`.
+Copy them to a handy location for later use.
+
+##### 3. Building the NNPU driver
+
+You now need to build the NNPU driver. This document is not an extensive explanation of how to build this driver.
+Please refer to the GPU/NNPU documentation on the dependencies you need and how to build it.
+
+So the main thing you will need is to set the proper environment variable to build the NNPU driver:
+
+```
+MIPS_ELF_ROOT=/path/to/mips/toolchain
+RGX_BVNC=32.6.52.603
+EXCLUDED_APIS=vulkan renderscript openrl opengles1 opengles3 camerahal composerhal memtrackhal sensorhal
+PVR_BUILD_DIR=tc_linux
+CLDNN=1
+KERNELDIR=/path/to/linux/kernel/sources
+ROGUEDDK_FOLDER=/path/to/nnpu/sources
+NPU_BUILD_FOLDER=binary_${PVR_BUILD_DIR}_${BUILD}
+NNPU_BUILD_FOLDER_PATH=/path/to/nnpu/sources/${NNPU_BUILD_FOLDER}
+NNPU_FIRMWARE_FILE=${NNPU_BUILD_FOLDER}/target_neutral/rgx.fw.${RGX_BVNC}
+NNPU_PVRSRVKM_MODULE=${NNPU_BUILD_FOLDER}/target_x86_64/pvrsrvkm.ko
+SUPPORT_KMS=1
+```
+
+You would also probably need these variable to be set:
+```
+PVRVERSION_WITHOUT_P4=1
+DRIVER_BRANCH=MAIN
+PDUMP=0
+SUPPORT_OPENCL_2_X=1
+```
+
+Then from the root NNPU driver you would just need to run `make -j8 imgdnn imgdnn_test build`
+
+Once the NNPU driver is build you will need to install the required files, you can use the provided install.sh shell.
+**Be careful** to not install the kernel modules and the rc script with it!
+
+
+
+### Loading the drivers
+
+To load the kernel drivers, you will need to load them in order.
+But before loading the kernel modules, you need to set the DUT clock on the FPGA. For that you need the dbg_py tool
+to be installed on the system you are using, and run the python script `set_fpga_freq.py` to set the FPGA clock.
+
+Then you will be able to load the driver:
+
+First the base 3NX-F driver:
+`sudo insmod nexef_plat.ko`
+
+Then load the NNA and NNPU driver:
+```
+sudo insmod img_mem.ko
+sudo insmod vha.ko
+sudo insmod pvrsrvkm.ko
+```
+
+**You must not** load the `tc.ko` as it will conflict with the base 3NX-F driver.
+You also don't need the `drm_pdp.ko`file as there is no support for it in the 3NX-F base driver.
+
+Info: The 3NX-F base driver basically replace the `tc.ko` module in this configuration.
+
+
+##### What does NeXeF mean?
+
+(*Or why does the base driver is called nexef?*)  
+
+*NeX* / *NeXeF* were coined by the GPT-2 Network (a transformer neural network) while trying to make it talking about
+the 3NX-F. It added in the middle of a sentence "the 3NX (pronounced 3-Nex`)"
+
+> **Imagination Technologies just released the 3NX-F**, a new "3NX" (pronounced "3-Nex") laser-based MEMS camera
+> that has been designed to be "small, power-efficient, light-weight and with a compact size to be able to fit in a
+> smartphone." The 3NX-F is an upgrade to the 1.5 million pixel 3NX that debuted in March.

+ 1799 - 0
driver/nexef_platform/nexef_plat.c

@@ -0,0 +1,1799 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <hwdefs/vha_cr_gyrus.h>
+#include <hwdefs/nn_sys_cr_gyrus.h>
+#include <hwdefs/gyrus_system.h>
+
+#define DEVICE_NAME "3NXF_plat"
+
+#include <nexef_plat.h>
+/* NNPU needed includes */
+#define SUPPORT_RGX
+#include <tc_drv.h>
+
+//region Defines
+/*
+ * We don't support Apollo base board here, but to have a
+ * proper error message if someone use the wrong baseboard,
+ * we have a little bit of code to report it.
+ *
+ * For that we need to have the Apollo PCI IDs
+ */
+#define PCI_APOLLO_VENDOR_ID (0x1010)
+#define PCI_APOLLO_DEVICE_ID (0x1CF2)
+
+#define IS_APOLLO_DEVICE(devid) ((devid) == PCI_APOLLO_DEVICE_ID)
+
+#define PCI_SIRIUS_VENDOR_ID (0x1AEE)
+#define PCI_SIRIUS_DEVICE_ID (0x1020)
+#define IS_SIRIUS_DEVICE(devid) ((devid) == PCI_SIRIUS_DEVICE_ID)
+
+/*
+ * from Odin Lite TRM rev 1.0.88
+ */
+#define PCI_ODIN_VENDOR_ID (0x1AEE)
+#define PCI_ODIN_DEVICE_ID (0x1010)
+
+#define IS_ODIN_DEVICE(devid) ((devid) == PCI_ODIN_DEVICE_ID)
+
+/* Odin - System control register bar */
+#define PCI_ODIN_SYS_CTRL_REGS_BAR (0)
+
+#define PCI_ODIN_SYS_CTRL_BASE_OFFSET (0x0000)
+/* srs_core */
+#define PCI_ODIN_CORE_ID                        (0x0000)
+#define PCI_ODIN_CORE_REVISION                  (0x0004)
+#define PCI_ODIN_CORE_CHANGE_SET                (0x0008)
+#define PCI_ODIN_CORE_USER_ID                   (0x000C)
+#define PCI_ODIN_CORE_USER_BUILD                (0x0010)
+/* Resets */
+#define PCI_ODIN_CORE_INTERNAL_RESETN           (0x0080)
+#define PCI_ODIN_CORE_EXTERNAL_RESETN           (0x0084)
+#define PCI_ODIN_CORE_EXTERNAL_RESET            (0x0088)
+#define PCI_ODIN_CORE_INTERNAL_AUTO_RESETN      (0x008C)
+/* Clock */
+#define PCI_ODIN_CORE_CLK_GEN_RESET             (0x0090)
+/* Interrupts */
+#define PCI_ODIN_CORE_INTERRUPT_STATUS          (0x0100)
+#define PCI_ODIN_CORE_INTERRUPT_ENABLE          (0x0104)
+#define PCI_ODIN_CORE_INTERRUPT_CLR             (0x010C)
+#define PCI_ODIN_CORE_INTERRUPT_TEST            (0x0110)
+/* GPIOs */
+#define PCI_ODIN_CORE_NUM_GPIO                  (0x0180)
+#define PCI_ODIN_CORE_GPIO_EN                   (0x0184)
+#define PCI_ODIN_CORE_GPIO                      (0x0188)
+/* DUT Ctrl */
+#define PCI_ODIN_CORE_NUM_DUT_CTRL              (0x0190)
+#define PCI_ODIN_CORE_DUT_CTRL1                 (0x0194)
+#define PCI_ODIN_CORE_DUT_CTRL2                 (0x0198)
+#define PCI_ODIN_CORE_NUM_DUT_STAT              (0x019C)
+#define PCI_ODIN_CORE_DUT_STAT1                 (0x01A0)
+#define PCI_ODIN_CORE_DUT_STAT2                 (0x01A4)
+/* LEDs! */
+#define PCI_ODIN_CORE_DASH_LEDS                 (0x01A8)
+/* Core stuff */
+#define PCI_ODIN_CORE_CORE_STATUS               (0x0200)
+#define PCI_ODIN_CORE_CORE_CONTROL              (0x0204)
+#define PCI_ODIN_CORE_REG_BANK_STATUS           (0x0208)
+#define PCI_ODIN_CORE_MMCM_LOCK_STATUS          (0x020C)
+#define PCI_ODIN_CORE_GIST_STATUS               (0x0210)
+
+#define PCI_ODIN_MMCM_LOCK_STATUS_DUT_CORE      (1 << 0)
+#define PCI_ODIN_MMCM_LOCK_STATUS_DUT_IF        (1 << 1)
+
+/* core bits definitions */
+#define INTERNAL_RESET_INTERNAL_RESETN_PIKE     (1 << 7)
+#define EXTERNAL_RESET_EXTERNAL_RESETN_SPI      (1 << 1)
+#define EXTERNAL_RESET_EXTERNAL_RESETN_DUT      (1 << 0)
+
+#define EXTERNAL_RESET_DUT_CORE_MMCM            (1 << 1)
+
+#define DUT_CTRL1_DUT_MST_OFFSET                (1 << 31)
+#define ODIN_CORE_CONTROL_DUT_OFFSET_SHIFT      (24)
+#define ODIN_CORE_CONTROL_DUT_OFFSET_MASK       (0x7 << ODIN_CORE_CONTROL_DUT_OFFSET_SHIFT)
+
+/* interrupt bits definitions */
+#define INT_INTERRUPT_MASTER_ENABLE             (1 << 31)
+#define INT_INTERRUPT_DUT0                      (1 << 0)
+#define INT_INTERRUPT_PDP                       (1 << 1)
+#define INT_INTERRUPT_DUT1                      (1 << 9)
+
+/* odn_clk_blk */
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV1  (0x0020)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV2  (0x0024)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV3  (0x001C)
+#define PCI_ODIN_CLK_BLK_DUT_REG_CLK_OUT_DIV1   (0x0028)
+#define PCI_ODIN_CLK_BLK_DUT_REG_CLK_OUT_DIV2   (0x002C)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT1     (0x0050)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT2     (0x0054)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT3     (0x004C)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_IN_DIV    (0x0058)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV1   (0x0220)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV2   (0x0224)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV3   (0x021C)
+#define PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV1   (0x0228)
+#define PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV2   (0x022C)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT1      (0x0250)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT2      (0x0254)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT3      (0x024C)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_IN_DIV     (0x0258)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV1 (0x0620)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV2 (0x0624)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV3 (0x061C)
+#define PCI_ODIN_CLK_BLK_PDP_MEM_CLK_OUT_DIV1   (0x0628)
+#define PCI_ODIN_CLK_BLK_PDP_MEM_CLK_OUT_DIV2   (0x062C)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT1    (0x0650)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT2    (0x0654)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT3    (0x064C)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_IN_DIV   (0x0658)
+
+#define PCI_ODIN_CORE_REG_SIZE                  (0x1000)
+
+/* Odin - Device Under Test (DUT) register bar */
+#define PCI_ODIN_DUT_REGS_BAR (2)
+/* Odin - Device Under Test (DUT) memory bar */
+#define PCI_ODIN_DUT_MEM_BAR  (4)
+
+/* Odin clock related infos */
+#define PCI_ODIN_INPUT_CLOCK_SPEED              (100000000U)
+#define PCI_ODIN_INPUT_CLOCK_SPEED_MIN          (10000000U)
+#define PCI_ODIN_INPUT_CLOCK_SPEED_MAX          (933000000U)
+#define PCI_ODIN_OUTPUT_CLOCK_SPEED_MIN         (4690000U)
+#define PCI_ODIN_OUTPUT_CLOCK_SPEED_MAX         (933000000U)
+#define PCI_ODIN_VCO_MIN                        (600000000U)
+#define PCI_ODIN_VCO_MAX                        (1440000000U)
+#define PCI_ODIN_PFD_MIN                        (10000000U)
+#define PCI_ODIN_PFD_MAX                        (500000000U)
+/*
+ * Max values that can be set in DRP registers771
+ */
+#define PCI_ODIN_OREG_VALUE_MAX                 (126.875f)
+#define PCI_ODIN_MREG_VALUE_MAX                 (126.875f)
+#define PCI_ODIN_DREG_VALUE_MAX                 (126U)
+
+/*
+ * DUT core clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1                (0x0028)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2                (0x002C)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT     (7)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER1                 (0x0050)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT   (0)
+
+#define ODN_DUT_CORE_CLK_MULTIPLIER2                 (0x0054)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK       (0x00007000U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT      (12)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK    (0x00000800U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT   (11)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK       (0x00000080U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT      (7)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK    (0x00000040U)
+#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT   (6)
+
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1                 (0x0058)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK       (0x00002000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT      (13)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK    (0x00001000U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT   (12)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK    (0x00000FC0U)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT   (6)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK    (0x0000003FU)
+#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT   (0)
+
+/*
+ * DUT interface clock input divider, multiplier and out divider.
+ */
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1               (0x0220)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK  (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK  (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0)
+
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2               (0x0224)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK     (0x00000080U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT    (7)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK  (0x00000040U)
+#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1                (0x0250)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT  (0)
+
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2                (0x0254)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK      (0x00007000U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT     (12)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK   (0x00000800U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT  (11)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK      (0x00000080U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT     (7)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK   (0x00000040U)
+#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT  (6)
+
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1                (0x0258)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK      (0x00002000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT     (13)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK   (0x00001000U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT  (12)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK   (0x00000FC0U)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT  (6)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK   (0x0000003FU)
+#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT  (0)
+
+
+#define NEXEF_ROGUE_REG_BAR (PCI_ODIN_DUT_REGS_BAR)
+#define NEXEF_ROGUE_REG_SIZE (_RGXREG_SIZE)
+#define NEXEF_ROGUE_REG_OFFSET (_RGXREG_START)
+#define NEXEF_NNPU_PDEV_NAME "rogue-regs"
+
+#define NEXEF_NNA_REG_BAR (PCI_ODIN_DUT_REGS_BAR)
+#define NEXEF_NNA_REG_SIZE (_REG_NNA_SIZE)
+#define NEXEF_NNA_REG_OFFSET (_REG_NNA_START)
+#define NEXEF_NNA_PDEV_NAME "nna-regs"
+
+#define NEXEF_NNSYS_REG_BAR (PCI_ODIN_DUT_REGS_BAR)
+#define NEXEF_NNSYS_REG_SIZE (_REG_NNSYS_SIZE)
+#define NEXEF_NNSYS_REG_OFFSET (_REG_NNSYS_START)
+
+#define NEXEF_NNPU_HEAP_SIZE (128*1024*1024)
+
+//endregion Defines
+
+//region Struct and Prototypes
+
+static const struct pci_device_id pci_pci_ids[] = {
+        /* We don't support the Apollo/TCF board, but we request for it to display a nice
+         * friendly error message
+         */
+        { PCI_DEVICE(PCI_APOLLO_VENDOR_ID, PCI_APOLLO_DEVICE_ID), },
+
+        /* There is currently no plan to use the Orion/Sirius platform, but I still can use
+         * it to do some test. It is really close to the Odin/Sleipnir platform
+         */
+        { PCI_DEVICE(PCI_SIRIUS_VENDOR_ID, PCI_SIRIUS_DEVICE_ID), },
+
+        { PCI_DEVICE(PCI_ODIN_VENDOR_ID, PCI_ODIN_DEVICE_ID), },
+        { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+/* We need the NNA reg bank because the secure bit is currently not handled by the NNA driver */
+enum { CORE_REG_BANK = 0, NNSYS_REG_BANK,
+        NNA_REG_BANK,
+        REG_BANK_COUNT /* Must be the last */};
+
+struct mem_region {
+    resource_size_t base;
+    resource_size_t size;
+};
+
+struct platdev_export_info {
+    /* General infos */
+    struct mem_region dut_mem;
+
+    /* Rogue export infos */
+    int rogue_mem_mode;
+    struct platform_device *rogue_pdev;
+    struct mem_region rogue_heap_mem;
+    struct mem_region rogue_pdp_heap_mem;
+
+    /* NNA export infos */
+    struct platform_device *nna_pdev;
+    struct mem_region nna_heap_mem;
+};
+
+struct nexefdrv_prvdata {
+    int irq;
+
+    struct {
+        int bar;
+        unsigned long addr;
+        unsigned long size;
+        void __iomem *km_addr;
+    } reg_bank[REG_BANK_COUNT];
+
+    struct platdev_export_info plat_exports;
+
+    struct pci_dev *pci_dev;
+};
+
+struct img_pci_driver {
+    struct pci_dev *pci_dev;
+    struct pci_driver pci_driver;
+    struct delayed_work irq_work;
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
+
+
+static int nexef_plat_probe(struct pci_dev *pci_dev,
+                          const struct pci_device_id *id);
+static void nexef_plat_remove(struct pci_dev *dev);
+
+static int nexef_plat_suspend(struct device *dev);
+static int nexef_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(nexef_pm_plat_ops,
+        nexef_plat_suspend, nexef_plat_resume);
+
+
+static int nexef_register_rogue_plat_device(struct nexefdrv_prvdata *priv_data);
+static void nexef_unregister_rogue_plat_device(struct nexefdrv_prvdata *priv_data);
+
+static int nexef_register_nna_plat_device(struct nexefdrv_prvdata *priv_data);
+static void nexef_unregister_nna_plat_device(struct nexefdrv_prvdata *priv_data);
+
+static int nexef_nnsys_init(struct pci_dev *pci_dev, struct nexefdrv_prvdata *priv_data);
+static void nexef_nnsys_unlock(struct nexefdrv_prvdata *priv_data);
+static void nexef_nnsys_configure(struct nexefdrv_prvdata *priv_data);
+
+static int nexef_nna_init(struct pci_dev *pci_dev, struct nexefdrv_prvdata *priv_data);
+static void nexef_nna_unlock(struct nexefdrv_prvdata *priv_data);
+
+
+//endregion Struct and Prototypes
+
+//region Kernel module parameters
+
+/* Parameters applicable when using bus master mode */
+static unsigned long contig_phys_start;
+module_param(contig_phys_start, ulong, 0444);
+MODULE_PARM_DESC(contig_phys_start, "Physical address of start of contiguous region");
+
+static uint32_t contig_size;
+module_param(contig_size, uint, 0444);
+MODULE_PARM_DESC(contig_size, "Size of contiguous region: takes precedence over any PCI based memory");
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+#ifdef CONFIG_SET_FPGA_CLOCK
+static int odin_fpga_dut_clock = 25000000;
+module_param(odin_fpga_dut_clock, int, 0444);
+MODULE_PARM_DESC(odin_fpga_dut_clock, "DUT clock speed");
+
+static int odin_fpga_mem_clock = 25000000;
+module_param(odin_fpga_mem_clock, int, 0444);
+MODULE_PARM_DESC(odin_fpga_mem_clock, "Memory clock speed");
+#endif
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+    return sprintf(buf, "PCI 3NX-F Platform driver version : N/A\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+        &driver_attr_info.attr,
+        NULL
+};
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver nexef_pci_drv = {
+        .pci_driver = {
+                .name = "nexef_plat_pci",
+                .id_table = pci_pci_ids,
+                .probe = nexef_plat_probe,
+                .remove = nexef_plat_remove,
+                .driver = {
+                        .groups = drv_groups,
+                        .pm = &nexef_pm_plat_ops,
+                }
+        },
+};
+//endregion Kernel module parameters
+
+//region Utility functions
+/*
+ * __readreg32 - Generic PCI bar read functions
+ */
+static inline unsigned int __readreg32(struct nexefdrv_prvdata *data,
+                                       int bank, unsigned long offset)
+{
+    void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+                                         offset);
+    return ioread32(reg);
+}
+
+/*
+ * __writereg32 - Generic PCI bar write functions
+ */
+static inline void __writereg32(struct nexefdrv_prvdata *data,
+                                int bank, unsigned long offset, int val)
+{
+    void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+                                         offset);
+    iowrite32(val, reg);
+}
+
+/*
+ * __readreg64 - Generic PCI bar read functions
+ */
+static inline uint64_t __readreg64(struct nexefdrv_prvdata *data,
+                                       int bank, unsigned long offset)
+{
+    void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+                                         offset);
+    return (uint64_t)ioread32(reg) | ((uint64_t)ioread32(reg + 4) << 32);
+}
+
+/*
+ * __writereg64 - Generic PCI bar write functions
+ */
+static inline void __writereg64(struct nexefdrv_prvdata *data,
+                                int bank, unsigned long offset, uint64_t val)
+{
+    void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+                                         offset);
+    iowrite32(val & 0xFFFFFFFF, reg);
+    iowrite32(val >> 32, reg + 4);
+}
+
+//endregion Utility functions
+
+//region Odin handling functions
+/*
+ * odin_core_writereg32 - Write to Odin control registers
+ */
+static inline void odin_core_writereg32(struct nexefdrv_prvdata *data,
+                                        unsigned long offset, int val)
+{
+    __writereg32(data, CORE_REG_BANK, offset, val);
+}
+
+/*
+ * odin_core_readreg32 - Read Odin control registers
+ */
+static inline unsigned int odin_core_readreg32(struct nexefdrv_prvdata *data,
+                                               unsigned long offset)
+{
+    return __readreg32(data, CORE_REG_BANK, offset);
+}
+
+static inline unsigned int odin_core_polreg32(struct nexefdrv_prvdata *data, unsigned long offset, uint32_t mask)
+{
+    int timeout = 50;
+    uint32_t read_value;
+
+    while(timeout > 0)
+    {
+        read_value = odin_core_readreg32(data, offset) & mask;
+
+        if (read_value != 0)
+            break;
+
+        msleep(20);
+
+        timeout--;
+    }
+
+    if (timeout == 0)
+    {
+        dev_err(&data->pci_dev->dev, " %s(%08lX, %08X) timeout\n", __func__, offset, mask);
+        return -ETIME;
+    }
+
+    return 0;
+}
+
+static void odin_set_mem_mode_lma(struct nexefdrv_prvdata *data)
+{
+    uint32_t val;
+
+    /* Enable memory offset to be applied to DUT and PDP1 */
+    /*
+     * 31: Set Enable DUT Offset
+     * 11: JTAG EN
+     * 9 CORE CLK DIV4
+     * 4 PLL_BYPASS
+     */
+    odin_core_writereg32(data, PCI_ODIN_CORE_DUT_CTRL1, 0x80000A10);
+
+    /* Apply memory offset to GPU and PDP1 to point to DDR memory.
+     * Enable HDMI.
+     */
+    val = (0x4 << 24) | /* DUT_OFFSET */
+          (0x4 << 16) | /* PDP1_OFFSET */
+          (0x2 << 10) | /* HDMI Module Enable */
+          (0x1 << 13);  /* MCU Communicator */
+    odin_core_writereg32(data, PCI_ODIN_CORE_CORE_CONTROL, val);
+}
+
+/*
+ * reset_dut - Reset the Device Under Test
+ */
+static void reset_dut(struct nexefdrv_prvdata *data)
+{
+
+    uint32_t internal_rst = odin_core_readreg32(data, PCI_ODIN_CORE_INTERNAL_RESETN);
+    uint32_t external_rst = odin_core_readreg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN);
+
+    dev_dbg(&data->pci_dev->dev, "going to reset DUT fpga!\n");
+
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERNAL_RESETN,
+                         internal_rst & ~(INTERNAL_RESET_INTERNAL_RESETN_PIKE));
+    odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN,
+                         external_rst & ~(EXTERNAL_RESET_EXTERNAL_RESETN_DUT));
+
+    udelay(50); /* arbitrary delays, just in case! */
+
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERNAL_RESETN, internal_rst);
+    odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN, external_rst);
+
+    udelay(50);
+
+    nexef_nnsys_unlock(data);
+
+    /* Call the NNA unlock function to make sure NNA driver can access it without any issue
+     * The security bit is reverted after each reset.
+     */
+    nexef_nna_unlock(data);
+
+    dev_dbg(&data->pci_dev->dev, "DUT fpga reset done!\n");
+}
+
+#ifdef CONFIG_SET_FPGA_CLOCK
+/*
+ * Returns the divider group register fields for the specified counter value.
+ * See Xilinx Application Note xapp888.
+ */
+static void odin_mmcm_reg_param_calc(uint32_t value, uint32_t *low, uint32_t *high,
+                                     uint32_t *edge, uint32_t *no_count)
+{
+    if (value == 1U) {
+        *no_count = 1U;
+        *edge = 0;
+        *high = 0;
+        *low = 0;
+    } else {
+        *no_count = 0;
+        *edge = value % 2U;
+        *high = value >> 1;
+        *low = (value + *edge) >> 1U;
+    }
+}
+
+/* GPU clock functions use these macros: */
+#define REG_FIELD_GET(v, str) \
+	(uint32_t)(((v) & (s##_MASK)) >> (s##_SHIFT))
+#define REG_FIELD_SET(v, f, str) \
+	v = (uint32_t)(((v) & (uint32_t)~(str##_MASK)) | \
+		  (uint32_t)(((f) << (str##_SHIFT)) & (str##_MASK)))
+
+/*
+ * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for
+ * the specified input frequency and target output frequency.
+ * Function doesn't support fractional values for multiplier and output divider
+ * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest
+ * VCO and smallest D and M.
+ * Configured for Xilinx Virtex7 speed grade 2.
+ */
+static int odin_mmcm_counter_calc(struct device *dev,
+                                  uint32_t freq_input, uint32_t freq_output,
+                                  uint32_t *d, uint32_t *m, uint32_t *o)
+{
+    uint32_t d_min, d_max;
+    uint32_t m_min, m_max, m_ideal;
+    uint32_t d_cur, m_cur, o_cur;
+    uint32_t best_diff, d_best, m_best, o_best;
+
+    /*
+     * Check specified input frequency is within range
+     */
+    if (freq_input < PCI_ODIN_INPUT_CLOCK_SPEED_MIN) {
+        dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n",
+                freq_input, PCI_ODIN_INPUT_CLOCK_SPEED_MIN);
+        return -EINVAL;
+    }
+    if (freq_input > PCI_ODIN_INPUT_CLOCK_SPEED_MAX) {
+        dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n",
+                freq_input, PCI_ODIN_INPUT_CLOCK_SPEED_MAX);
+        return -EINVAL;
+    }
+
+    /*
+     * Check specified target frequency is within range
+     */
+    if (freq_output < PCI_ODIN_OUTPUT_CLOCK_SPEED_MIN) {
+        dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n",
+                freq_input, PCI_ODIN_OUTPUT_CLOCK_SPEED_MIN);
+        return -EINVAL;
+    }
+    if (freq_output > PCI_ODIN_OUTPUT_CLOCK_SPEED_MAX) {
+        dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n",
+                freq_output, PCI_ODIN_OUTPUT_CLOCK_SPEED_MAX);
+        return -EINVAL;
+    }
+
+    /*
+     * Calculate min and max for Input Divider.
+     * Refer Xilinx 7 series FPGAs clocking resources user guide
+     * equation 3-6 and 3-7
+     */
+    d_min = DIV_ROUND_UP(freq_input, PCI_ODIN_PFD_MAX);
+    d_max = min(freq_input/PCI_ODIN_PFD_MIN, (uint32_t)PCI_ODIN_DREG_VALUE_MAX);
+
+    /*
+     * Calculate min and max for Input Divider.
+     * Refer Xilinx 7 series FPGAs clocking resources user guide.
+     * equation 3-8 and 3-9
+     */
+    m_min = DIV_ROUND_UP((PCI_ODIN_VCO_MIN * d_min), freq_input);
+    m_max = min(((PCI_ODIN_VCO_MAX * d_max) / freq_input),
+                (uint32_t)PCI_ODIN_MREG_VALUE_MAX);
+
+    for (d_cur = d_min; d_cur <= d_max; d_cur++) {
+        /*
+         * Refer Xilinx 7 series FPGAs clocking resources user guide.
+         * equation 3-10
+         */
+        m_ideal = min(((d_cur * PCI_ODIN_VCO_MAX)/freq_input), m_max);
+
+        for (m_cur = m_ideal; m_cur >= m_min; m_cur -= 1) {
+            /**
+             * Skip if VCO for given 'm' and 'd' value is not an
+             * integer since fractional component is not supported
+             */
+            if (((freq_input * m_cur) % d_cur) != 0)
+                continue;
+
+            /**
+             * Skip if divider for given 'm' and 'd' value is not
+             * an integer since fractional component is not
+             * supported
+             */
+            if ((freq_input * m_cur) % (d_cur * freq_output) != 0)
+                continue;
+
+            /**
+             * Calculate output divider value.
+             */
+            o_cur = (freq_input * m_cur)/(d_cur * freq_output);
+
+            *d = d_cur;
+            *m = m_cur;
+            *o = o_cur;
+            return 0;
+        }
+    }
+
+    /* Failed to find exact optimal solution with high VCO. Brute-force find a suitable config,
+     * again prioritising high VCO, to get lowest jitter */
+    d_min = 1; d_max = (uint32_t)PCI_ODIN_DREG_VALUE_MAX;
+    m_min = 1; m_max = (uint32_t)PCI_ODIN_MREG_VALUE_MAX;
+    best_diff = 0xFFFFFFFF;
+
+    for (d_cur = d_min; d_cur <= d_max; d_cur++) {
+        for (m_cur = m_max; m_cur >= m_min; m_cur -= 1) {
+            uint32_t pfd, vco, o_avg, o_min, o_max;
+
+            pfd = freq_input / d_cur;
+            vco = pfd * m_cur;
+
+            if (pfd < PCI_ODIN_PFD_MIN)
+                continue;
+
+            if (pfd > PCI_ODIN_PFD_MAX)
+                continue;
+
+            if (vco < PCI_ODIN_VCO_MIN)
+                continue;
+
+            if (vco > PCI_ODIN_VCO_MAX)
+                continue;
+
+            /* A range of -1/+3 around o_avg gives us 100kHz granularity. It can be extended further. */
+            o_avg = vco / freq_output;
+            o_min = (o_avg >= 2) ? (o_avg - 1) : 1;
+            o_max = o_avg + 3;
+            if (o_max > (uint32_t)PCI_ODIN_OREG_VALUE_MAX)
+                o_max = (uint32_t)PCI_ODIN_OREG_VALUE_MAX;
+
+            for (o_cur = o_min; o_cur <= o_max; o_cur++) {
+                uint32_t freq_cur, diff_cur;
+
+                freq_cur = vco / o_cur;
+
+                if (freq_cur > freq_output)
+                    continue;
+
+                diff_cur = freq_output - freq_cur;
+
+                if (diff_cur == 0) {
+                    /* Found an exact match */
+                    *d = d_cur;
+                    *m = m_cur;
+                    *o = o_cur;
+                    return 0;
+                }
+
+                if (diff_cur < best_diff) {
+                    best_diff = diff_cur;
+                    d_best = d_cur;
+                    m_best = m_cur;
+                    o_best = o_cur;
+                }
+            }
+        }
+    }
+
+    if (best_diff != 0xFFFFFFFF) {
+        dev_warn(dev, "Odin: Found similar freq of %u Hz\n", freq_output - best_diff);
+        *d = d_best;
+        *m = m_best;
+        *o = o_best;
+        return 0;
+    }
+
+    dev_err(dev, "Odin: Unable to find integer values for d, m and o for requested frequency (%u)\n",
+            freq_output);
+
+    return -ERANGE;
+}
+
+static int odin_set_dut_core_clk(struct nexefdrv_prvdata *data, uint32_t input_clk, uint32_t output_clk)
+{
+    int err = 0;
+    uint32_t in_div, mul, out_div;
+    uint32_t high_time, low_time, edge, no_count;
+    uint32_t value;
+    struct device *dev = &data->pci_dev->dev;
+
+    err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div,
+                                 &mul, &out_div);
+    if (err != 0)
+        return err;
+
+    /* Put DUT into reset */
+    odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN, EXTERNAL_RESET_EXTERNAL_RESETN_SPI);
+    msleep(20);
+
+    /* Put DUT Core MMCM into reset */
+    odin_core_writereg32(data, PCI_ODIN_CORE_CLK_GEN_RESET, EXTERNAL_RESET_DUT_CORE_MMCM);
+    msleep(20);
+
+    /* Calculate the register fields for output divider */
+    odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+                             &edge, &no_count);
+
+    /* Read-modify-write the required fields to output divider register 1 */
+    value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV1);
+    REG_FIELD_SET(value, high_time,
+                  ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME);
+    REG_FIELD_SET(value, low_time,
+                  ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME);
+    odin_core_writereg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV1, value);
+
+    /* Read-modify-write the required fields to output divider register 2 */
+    value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV2);
+    REG_FIELD_SET(value, edge,
+                  ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE);
+    REG_FIELD_SET(value, no_count,
+                  ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT);
+    odin_core_writereg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV2, value);
+
+    /* Calculate the register fields for multiplier */
+    odin_mmcm_reg_param_calc(mul, &high_time, &low_time,
+                             &edge, &no_count);
+
+    /* Read-modify-write the required fields to multiplier register 1*/
+    value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT1);
+    REG_FIELD_SET(value, high_time,
+                  ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME);
+    REG_FIELD_SET(value, low_time,
+                  ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME);
+    odin_core_writereg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT1, value);
+
+    /* Read-modify-write the required fields to multiplier register 2 */
+    value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT2);
+    REG_FIELD_SET(value, edge,
+                  ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE);
+    REG_FIELD_SET(value, no_count,
+                  ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT);
+    odin_core_writereg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT2, value);
+
+    /* Calculate the register fields for input divider */
+    odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+                             &edge, &no_count);
+
+    /* Read-modify-write the required fields to input divider register 1 */
+    value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_IN_DIV);
+    REG_FIELD_SET(value, high_time,
+                  ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME);
+    REG_FIELD_SET(value, low_time,
+                  ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME);
+    REG_FIELD_SET(value, edge,
+                  ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE);
+    REG_FIELD_SET(value, no_count,
+                  ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT);
+    odin_core_writereg32(data, PCI_ODIN_CLK_BLK_DUT_CORE_CLK_IN_DIV, value);
+
+    /* Bring DUT clock MMCM out of reset */
+    odin_core_writereg32(data, PCI_ODIN_CORE_CLK_GEN_RESET, 0);
+
+    err = odin_core_polreg32(data, PCI_ODIN_CORE_MMCM_LOCK_STATUS, PCI_ODIN_MMCM_LOCK_STATUS_DUT_CORE);
+    if (err != 0) {
+        dev_err(dev, "MMCM failed to lock for DUT core\n");
+        return err;
+    }
+
+    /* Bring DUT out of reset */
+    odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN,
+            EXTERNAL_RESET_EXTERNAL_RESETN_SPI | EXTERNAL_RESET_EXTERNAL_RESETN_DUT);
+    msleep(20);
+
+    dev_info(dev, "DUT core clock set-up successful at %dHz\n", output_clk);
+
+    return err;
+}
+
+static int odin_set_dut_if_clk(struct nexefdrv_prvdata *data, uint32_t input_clk, uint32_t output_clk)
+{
+	int err = 0;
+	uint32_t in_div, mul, out_div;
+	uint32_t high_time, low_time, edge, no_count;
+	uint32_t value;
+	struct device *dev = &tc->pdev->dev;
+
+	err = odin_mmcm_counter_calc(dev, input_clk, output_clk,
+				     &in_div, &mul, &out_div);
+	if (err != 0)
+		return err;
+
+	/* Put DUT into reset */
+	iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK,
+		  base + ODN_CORE_EXTERNAL_RESETN);
+	msleep(20);
+
+	/* Put DUT Core MMCM into reset */
+	iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK,
+		  base + ODN_CORE_CLK_GEN_RESET);
+	msleep(20);
+
+	/* Calculate the register fields for output divider */
+	odin_mmcm_reg_param_calc(out_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to output divider register 1 */
+	value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1);
+
+	/* Read-modify-write the required fields to output divider register 2 */
+	value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2);
+
+	/* Calculate the register fields for multiplier */
+	odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count);
+
+	/* Read-modify-write the required fields to multiplier register 1*/
+	value = odin_core_readreg32(data, PCI_ODIN_CLK_BLK_DUT_MEM_CLK_MUL);
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+	REG_FIELD_SET(value, high_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1);
+
+	/* Read-modify-write the required fields to multiplier register 2 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+	REG_FIELD_SET(value, edge,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE);
+	REG_FIELD_SET(value, no_count,
+			ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2);
+
+	/* Calculate the register fields for input divider */
+	odin_mmcm_reg_param_calc(in_div, &high_time, &low_time,
+				 &edge, &no_count);
+
+	/* Read-modify-write the required fields to input divider register 1 */
+	value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+	REG_FIELD_SET(value, high_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME);
+	REG_FIELD_SET(value, low_time,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME);
+	REG_FIELD_SET(value, edge,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE);
+	REG_FIELD_SET(value, no_count,
+			 ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT);
+	iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1);
+
+	/* Bring DUT interface clock MMCM out of reset */
+	odin_core_writereg32(data, PCI_ODIN_CORE_CLK_GEN_RESET, 0);
+
+    err = odin_core_polreg32(data, PCI_ODIN_CORE_MMCM_LOCK_STATUS, PCI_ODIN_MMCM_LOCK_STATUS_DUT_IF);
+	if (err != 0) {
+		dev_err(dev, "MMCM failed to lock for DUT IF\n");
+		return err;
+	}
+
+	/* Bring DUT out of reset */
+    odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN,
+            EXTERNAL_RESET_EXTERNAL_RESETN_SPI | EXTERNAL_RESET_EXTERNAL_RESETN_DUT);
+	msleep(20);
+
+	dev_info(dev, "DUT IF clock set-up successful at %dHz\n", output_clk);
+
+	return err;
+}
+#endif
+
+/*
+ * odin_isr_clear - Clear an interrupt
+ *
+ *
+ * note: the reason of that function is unclear, it is taken from Apollo/Atlas code that have
+ * the same interrupt handler as Odin, is it because of a bug?
+ */
+static void odin_isr_clear(struct nexefdrv_prvdata *data, unsigned int intstatus)
+{
+    unsigned int max_retries = 1000;
+
+    while ((odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--)
+        odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_CLR,
+                             (INT_INTERRUPT_MASTER_ENABLE | intstatus));
+}
+
+
+typedef void (*interrupt_callback_handler)(void *);
+struct interrupt_handlers {
+    interrupt_callback_handler handler;
+    void * data;
+    uint8_t enabled;
+};
+static struct interrupt_handlers pdev_int_handlers[TC_INTERRUPT_COUNT];
+
+/*
+ * pci_isr_cb - Low latency interrupt handler
+ */
+static irqreturn_t pci_isr_cb(int irq, void *dev_id)
+{
+    uint32_t intstatus;
+
+    struct pci_dev *pcidev = (struct pci_dev *)dev_id;
+    struct nexefdrv_prvdata *data = dev_get_drvdata(&pcidev->dev);
+
+    irqreturn_t ret = IRQ_NONE;
+
+    if (dev_id == NULL) {
+        /* Spurious interrupt: not yet initialised. */
+        pr_warn("Spurious interrupt data/dev_id not initialised!\n");
+        goto exit;
+    }
+
+    /* Read interrupt status register */
+    intstatus = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_STATUS);
+
+    dev_dbg(&pcidev->dev,
+             "%s: Got an interrupt....\n",
+             __func__);
+
+    /* Now handle the ints */
+    if (intstatus & INT_INTERRUPT_DUT0) {
+        /* Check who called and say hello */
+        dev_dbg(&pcidev->dev,
+                "%s: Got a valid interrupt, trying to do something with it....\n",
+                __func__);
+
+        /* Check NNA event register */
+        if ( (__readreg32(data, NNA_REG_BANK, VHA_CR_OS0_VHA_EVENT_STATUS) != 0) &&
+             pdev_int_handlers[TC_INTERRUPT_TC5_PDP].enabled ) {
+
+            dev_dbg(&pcidev->dev,
+                     "%s: NNA interrupt....\n",
+                     __func__);
+
+            if ( pdev_int_handlers[TC_INTERRUPT_TC5_PDP].handler != NULL ) {
+                pdev_int_handlers[TC_INTERRUPT_TC5_PDP].handler(pdev_int_handlers[TC_INTERRUPT_TC5_PDP].data);
+            }
+            else {
+                WARN_ON(pdev_int_handlers[TC_INTERRUPT_TC5_PDP].handler == NULL);
+            }
+
+        }
+        else if (pdev_int_handlers[TC_INTERRUPT_EXT].enabled) {
+
+            /* Else it must be from the NNPU */
+            dev_dbg(&pcidev->dev,
+                     "%s: Probably a NNPU interrupt....\n",
+                     __func__);
+            if ( pdev_int_handlers[TC_INTERRUPT_EXT].handler != NULL ) {
+                pdev_int_handlers[TC_INTERRUPT_EXT].handler(pdev_int_handlers[TC_INTERRUPT_EXT].data);
+            }
+            else {
+                WARN_ON(pdev_int_handlers[TC_INTERRUPT_EXT].handler == NULL);
+            }
+
+        }
+        else {
+            dev_warn(&pcidev->dev, "Received an interrupt from DUT when no proper handling being registered.");
+        }
+    }
+    else {
+        /* most likely this is a shared interrupt line */
+        dev_dbg(&pcidev->dev,
+                "%s: unexpected or spurious interrupt [%x] (shared IRQ?)!\n",
+                __func__, intstatus);
+        /* WARN_ON(1); */
+
+        goto exit;
+    }
+
+    /* Ack the ints */
+    odin_isr_clear(data, intstatus);
+
+exit:
+    return ret;
+}
+
+static inline void odin_reset_int(struct nexefdrv_prvdata *data) {
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE, 0);
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_CLR, 0xFFFFFFFF);
+}
+
+/*
+ * odin_enable_int - Enable an interrupt
+ */
+static inline void odin_enable_int(struct nexefdrv_prvdata *data,
+                                   uint32_t intmask)
+{
+    uint32_t irq_enabled = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE);
+    //intmask &= INT_INTERRUPT_DUT0;
+
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE, irq_enabled | intmask | INT_INTERRUPT_MASTER_ENABLE);
+}
+
+/*
+ * odin_disable_int - Disable an interrupt
+ */
+static inline void odin_disable_int(struct nexefdrv_prvdata *data,
+                                    uint32_t intmask)
+{
+    uint32_t irq_enabled = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE);
+    //intmask &= INT_INTERRUPT_DUT0;
+
+    odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE,
+                         irq_enabled & ~intmask);
+}
+
+/*
+ * odin_allocate_registers - Allocate memory for a register (or memory) bank
+ * @data: pointer to the data
+ * @bank: bank to set
+ * @bar: BAR where the register are
+ * @base: base address in the BAR
+ * @size: size of the register set
+ */
+static inline int odin_allocate_registers(struct pci_dev *pci_dev,
+                                          struct nexefdrv_prvdata *data, int bank,
+                                          int bar, unsigned long base, unsigned long size)
+{
+    unsigned long bar_size = pci_resource_len(pci_dev, bar);
+    unsigned long bar_addr = pci_resource_start(pci_dev, bar);
+    unsigned long bar_max_size = bar_size - base;
+    BUG_ON((base > bar_size) || ((base+size) > bar_size));
+
+    data->reg_bank[bank].bar = bar;
+    data->reg_bank[bank].addr = bar_addr + base;
+    data->reg_bank[bank].size = min(size, bar_max_size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+    data->reg_bank[bank].km_addr = devm_ioremap_nocache(
+            &pci_dev->dev, data->reg_bank[bank].addr,
+            data->reg_bank[bank].size);
+#else
+    data->reg_bank[bank].km_addr = devm_ioremap(
+            &pci_dev->dev, data->reg_bank[bank].addr,
+            data->reg_bank[bank].size);
+#endif
+
+    dev_dbg(&pci_dev->dev, "[bank %u] bar:%d addr:0x%lx size:0x%lx km:0x%p\n",
+             bank, bar, data->reg_bank[bank].addr,
+             data->reg_bank[bank].size,
+             data->reg_bank[bank].km_addr);
+
+    return data->reg_bank[bank].km_addr == NULL;
+}
+
+//endregion Odin handling functions
+
+//region Specific NNA handling functions
+
+/* The function here are to handly the secure reigster from the NNA.
+ * The NNA driver currently don't know how to handle it
+ */
+static int nexef_nna_init(struct pci_dev *pci_dev, struct nexefdrv_prvdata *priv_data)
+{
+    int ret = 0;
+    struct device *dev = &pci_dev->dev;
+    /* Allocate nna registers registers */
+
+    ret = odin_allocate_registers(pci_dev, priv_data,
+                                  NNA_REG_BANK, NEXEF_NNA_REG_BAR,
+                                  NEXEF_NNA_REG_OFFSET, NEXEF_NNA_REG_SIZE);
+    if (ret) {
+        dev_err(dev, "Can't allocate memory for nna regs!");
+        ret = -ENOMEM;
+        goto out;
+    }
+
+out:
+    return ret;
+}
+
+static void nexef_nna_unlock(struct nexefdrv_prvdata *priv_data)
+{
+    __writereg32(priv_data, NNA_REG_BANK, VHA_CR_SOCIF_BUS_SECURE, 0);
+}
+
+
+//endregion Specific NNA handling functions
+
+//region NN_SYS related functions
+
+static int nexef_nnsys_init(struct pci_dev *pci_dev, struct nexefdrv_prvdata *priv_data)
+{
+    int ret = 0;
+    struct device *dev = &pci_dev->dev;
+
+    /* Allocate nnsys registers registers */
+    ret = odin_allocate_registers(pci_dev, priv_data,
+                                  NNSYS_REG_BANK, NEXEF_NNSYS_REG_BAR,
+                                  NEXEF_NNSYS_REG_OFFSET, NEXEF_NNSYS_REG_SIZE);
+    if (ret) {
+        dev_err(dev, "Can't allocate memory for nnsys regs!");
+        ret = -ENOMEM;
+        goto out;
+    }
+
+out:
+    return ret;
+}
+
+static void nexef_nnsys_unlock(struct nexefdrv_prvdata *priv_data)
+{
+    __writereg32(priv_data, NNSYS_REG_BANK, NN_SYS_CR_SOCIF_BUS_SECURE, 0);
+}
+
+static void nexef_nnsys_configure(struct nexefdrv_prvdata *priv_data)
+{
+    /* Power up everything */
+    __writereg32(priv_data, NNSYS_REG_BANK, NN_SYS_CR_POWER_EVENT,
+                 NN_SYS_CR_POWER_EVENT_DOMAIN_NNSYS_EN | NN_SYS_CR_POWER_EVENT_REQUEST_POWER_UP | NN_SYS_CR_POWER_EVENT_TYPE_EN);
+    __writereg32(priv_data, NNSYS_REG_BANK, NN_SYS_CR_POWER_EVENT,
+            NN_SYS_CR_POWER_EVENT_DOMAIN_NNA_EN | NN_SYS_CR_POWER_EVENT_REQUEST_POWER_UP | NN_SYS_CR_POWER_EVENT_TYPE_EN);
+    /* Doc talk about OCM power, but does not exist in the CR file ?! */
+
+    /* Disable OCM */
+    __writereg64(priv_data, NNSYS_REG_BANK, NN_SYS_CR_NOC_LOWER_ADDR1, 0xFFFFFFFF10000000);
+    __writereg64(priv_data, NNSYS_REG_BANK, NN_SYS_CR_NOC_UPPER_ADDR1, 0xFFFFFFFFFFFFFFFF);
+}
+
+//endregion NN_SYS related functions
+
+//region Kernel related functions
+
+static int nexef_plat_probe(struct pci_dev *pci_dev,
+                          const struct pci_device_id *id)
+{
+    int ret = 0;
+    struct nexefdrv_prvdata *data;
+    size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+    unsigned long dut_base_mem, dut_mem_size;
+    struct device *dev = &pci_dev->dev;
+
+    dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
+
+    if (IS_APOLLO_DEVICE(id->device)) {
+        dev_err(dev, "This driver can't work with an APOLLO baseboard. Please check the hardware you are using!\n");
+        goto out;
+    }
+
+    if (IS_SIRIUS_DEVICE(id->device)) {
+        dev_warn(dev, "This driver is not design to work on an Orion system. As it is really similar" \
+        "to an Odin baseboard it may work or not. Use at your own risk.");
+    }
+
+    /* Enable the device */
+    if (pci_enable_device(pci_dev))
+        goto out;
+
+    /* Reserve PCI I/O and memory resources */
+    if (pci_request_region(pci_dev, 1, "odin-regs"))
+        goto out_disable;
+
+    /* Create a kernel space mapping for each of the bars */
+    data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+    if (!data) {
+        dev_err(dev, "Memory allocation error, aborting.\n");
+        ret = -ENOMEM;
+        goto out_release;
+    }
+
+    dev_dbg(dev, "allocated nexefdrv_prvdata @ %p\n", data);
+    memset(data, 0, sizeof(*data));
+
+    /* Allocate odin core registers */
+    ret = odin_allocate_registers(pci_dev, data,
+                                  CORE_REG_BANK, PCI_ODIN_SYS_CTRL_REGS_BAR,
+                                  PCI_ODIN_SYS_CTRL_BASE_OFFSET,
+                                  PCI_ODIN_CORE_REG_SIZE);
+    if (ret) {
+        dev_err(dev, "Can't allocate memory for odin regs!");
+        ret = -ENOMEM;
+        goto out_release;
+    }
+
+    /* Display some infos */
+    {
+        uint32_t odin_id  = odin_core_readreg32(data, PCI_ODIN_CORE_ID);
+        uint32_t odin_rev = odin_core_readreg32(data, PCI_ODIN_CORE_REVISION);
+        uint32_t odin_cs  = odin_core_readreg32(data, PCI_ODIN_CORE_CHANGE_SET);
+        uint32_t odin_ui  = odin_core_readreg32(data, PCI_ODIN_CORE_USER_ID);
+        uint32_t odin_ub  = odin_core_readreg32(data, PCI_ODIN_CORE_USER_BUILD);
+
+        dev_info(dev, "Found Odin lite board v%d.%d (ID:%X CS:%X UI:%X UB:%X)",
+                (odin_rev >> 8) & 0xF, odin_rev & 0xF, odin_id & 0x7, odin_cs, odin_ui, odin_ub);
+    }
+
+#ifdef CONFIG_SET_FPGA_CLOCK
+    odin_set_dut_core_clk(data, PCI_ODIN_INPUT_CLOCK_SPEED, odin_fpga_dut_clock);
+
+#endif
+    /* Call NN_SYS init */
+    ret = nexef_nnsys_init(pci_dev, data);
+    if (ret) {
+        dev_err(dev, "nnsys register allocation failed!\n");
+        goto out_release;
+    }
+
+    ret = nexef_nna_init(pci_dev, data);
+    if (ret) {
+        dev_err(dev, "nna register allocation failed!\n");
+        goto out_release;
+    }
+
+    /* Get DUT memory infos */
+    dut_mem_size = pci_resource_len(pci_dev, PCI_ODIN_DUT_MEM_BAR);
+    if (dut_mem_size > maxmapsize)
+        dut_mem_size = maxmapsize;
+
+    dut_base_mem = pci_resource_start(pci_dev, PCI_ODIN_DUT_MEM_BAR) +
+                   pci_offset;
+
+    /* change alloc size according to module parameter */
+    if (pci_size)
+        dut_mem_size = pci_size;
+
+    dev_info(dev, "DUT Memory: bar: %d addr: 0x%lx size: 0x%lx\n",
+             PCI_ODIN_DUT_MEM_BAR,
+             dut_base_mem,
+             dut_mem_size);
+
+    /* Get the IRQ...*/
+    data->irq = pci_dev->irq;
+    data->pci_dev = pci_dev;
+    dev_set_drvdata(&pci_dev->dev, data);
+    nexef_pci_drv.pci_dev = pci_dev;
+
+    dev_dbg(dev, "Going to reset DUT... (First time)\n");
+    reset_dut(data);
+
+    dev_dbg(dev, "Reseting interrupts\n");
+    odin_reset_int(data);
+    dev_dbg(dev, "Enabling interrupts\n");
+    odin_enable_int(data, INT_INTERRUPT_DUT0 | INT_INTERRUPT_PDP);
+
+    /*
+     * Reset FPGA DUT only after disabling clocks in
+     * vha_add_dev()-> get properties.
+     * This workaround is required to ensure that
+     * clocks (on daughter board) are enabled for test slave scripts to
+     * read FPGA build version register.
+     * NOTE: Asserting other bits like DDR reset bit cause problems
+     * with bus mastering feature, thus results in memory failures.
+     */
+    dev_dbg(dev, "Going to reset DUT... (Second time)\n");
+    reset_dut(data);
+
+    odin_set_mem_mode_lma(data);
+
+    /* Configure NN_SYS */
+    dev_info(dev, "Configuring NN_SYS\n");
+    nexef_nnsys_configure(data);
+
+    /* Install the ISR callback...*/
+    dev_dbg(dev, "Trying to insert IRQ handler\n");
+    ret = devm_request_irq(dev, data->irq, &pci_isr_cb, IRQF_SHARED, DEVICE_NAME,
+                           (void *)pci_dev);
+    if (ret) {
+        dev_err(dev, "failed to request irq!\n");
+        goto out_disable_int;
+    }
+    dev_dbg(dev, "registered irq %d\n", data->irq);
+
+    /* Fill in export infos */
+    data->plat_exports.dut_mem.base = dut_base_mem;
+    data->plat_exports.dut_mem.size = dut_mem_size;
+    /* Set NNPU parameters */
+    data->plat_exports.rogue_mem_mode = TC_MEMORY_LOCAL;
+    data->plat_exports.rogue_heap_mem.base = data->plat_exports.dut_mem.base;
+    data->plat_exports.rogue_heap_mem.size = NEXEF_NNPU_HEAP_SIZE;
+    data->plat_exports.rogue_pdp_heap_mem.base = data->plat_exports.rogue_heap_mem.base +
+            data->plat_exports.rogue_heap_mem.size;
+    data->plat_exports.rogue_pdp_heap_mem.size = (data->plat_exports.dut_mem.size / 2) -
+            data->plat_exports.rogue_heap_mem.size;
+
+    data->plat_exports.nna_heap_mem.base = data->plat_exports.rogue_pdp_heap_mem.base +
+            data->plat_exports.rogue_pdp_heap_mem.size;
+    data->plat_exports.nna_heap_mem.size = (data->plat_exports.dut_mem.size / 2);
+
+    dev_info(dev, "DUT Memory regions:\n");
+    dev_info(dev, "DUT Mem  : %08llx-%08llx (size: %08llx)\n",
+             data->plat_exports.dut_mem.base,
+             data->plat_exports.dut_mem.base + data->plat_exports.dut_mem.size,
+             data->plat_exports.dut_mem.size);
+    dev_info(dev, "NNPU heap: %08llx-%08llx (size: %08llx)\n",
+            data->plat_exports.rogue_heap_mem.base,
+            data->plat_exports.rogue_heap_mem.base + data->plat_exports.rogue_heap_mem.size,
+             data->plat_exports.rogue_heap_mem.size);
+    dev_info(dev, "NNPU pdp : %08llx-%08llx (size: %08llx)\n",
+             data->plat_exports.rogue_pdp_heap_mem.base,
+             data->plat_exports.rogue_pdp_heap_mem.base + data->plat_exports.rogue_pdp_heap_mem.size,
+             data->plat_exports.rogue_pdp_heap_mem.size);
+    dev_info(dev, "NNA      : %08llx-%08llx (size: %08llx)\n",
+             data->plat_exports.nna_heap_mem.base,
+             data->plat_exports.nna_heap_mem.base + data->plat_exports.nna_heap_mem.size,
+             data->plat_exports.nna_heap_mem.size);
+
+    /* We now are ready to create the platform drivers */
+    ret = nexef_register_rogue_plat_device(data);
+    if (ret) {
+        dev_err(dev, "cannot create NNPU platform device!\n");
+        goto out_disable_int;
+    }
+
+    ret = nexef_register_nna_plat_device(data);
+    if (ret) {
+        dev_err(dev, "cannot create NNA platform device!\n");
+        goto out_disable_int;
+    }
+
+    return ret;
+
+out_disable_int:
+    /* Make sure int are no longer enabled */
+    odin_disable_int(data, INT_INTERRUPT_DUT0);
+
+out_release:
+    pci_release_regions(pci_dev);
+
+out_disable:
+    pci_disable_device(pci_dev);
+
+out:
+    return ret;
+}
+
+//region nn_sys related functions
+
+//endregion nn_sys related functions
+
+static void nexef_plat_remove(struct pci_dev *pcidev)
+{
+    struct nexefdrv_prvdata *priv_data = dev_get_drvdata(&pcidev->dev);
+
+    dev_dbg(&pcidev->dev, "removing device\n");
+
+    if (priv_data == NULL) {
+        dev_err(&pcidev->dev, "PCI priv data missing!\n");
+    } else {
+        /*
+         * We  need to disable interrupts for the
+         * embedded device via the fpga interrupt controller...
+         */
+        odin_disable_int(priv_data, INT_INTERRUPT_DUT0);
+
+        /* Unregister int */
+        devm_free_irq(&pcidev->dev, priv_data->irq, pcidev);
+
+        /* Unregister all potential platoform device allocated */
+        nexef_unregister_rogue_plat_device(priv_data);
+        nexef_unregister_nna_plat_device(priv_data);
+    }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(&pcidev->dev);
+#endif
+    pci_release_regions(pcidev);
+    pci_disable_device(pcidev);
+
+    /* Just to make sure */
+    nexef_pci_drv.pci_dev = NULL;
+}
+
+#ifdef CONFIG_PM
+static int nexef_plat_suspend(struct device *dev)
+{
+    /* nothing, for now, to be done here */
+	return 0;
+}
+
+static int nexef_plat_resume(struct device *dev)
+{
+    /* nothing, for now, to be done here */
+	return 0;
+}
+#endif
+
+static int nexef_plat_init(void)
+{
+    int ret;
+
+    ret = pci_register_driver(&nexef_pci_drv.pci_driver);
+    if (ret) {
+        pr_err("failed to register PCI driver!\n");
+        return ret;
+    }
+
+    /* pci_dev should be set in probe */
+    if (!nexef_pci_drv.pci_dev) {
+        pr_err("failed to find compatible NeXeF PCI device!\n");
+        pci_unregister_driver(&nexef_pci_drv.pci_driver);
+        return -ENODEV;
+    }
+
+    return 0;
+}
+
+static void nexef_plat_exit(void)
+{
+    /* Not sure we have thing to be done here... */
+    if (nexef_pci_drv.pci_dev) {
+        pci_unregister_driver(&nexef_pci_drv.pci_driver);
+    }
+}
+
+module_init(nexef_plat_init);
+module_exit(nexef_plat_exit);
+MODULE_LICENSE("GPL");
+
+//endregion Kernel related functions
+
+//region NNPU needed exported functions
+
+int tc_enable(struct device *dev);
+void tc_disable(struct device *dev);
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+                             void (*handler_function)(void *), void *data);
+int tc_enable_interrupt(struct device *dev, int interrupt_id);
+int tc_disable_interrupt(struct device *dev, int interrupt_id);
+int tc_sys_info(struct device *dev, uint32_t *tmp, uint32_t *pll);
+int tc_sys_strings(struct device *dev,
+                   char *str_fpga_rev, size_t size_fpga_rev,
+                   char *str_tcf_core_rev, size_t size_tcf_core_rev,
+                   char *str_tcf_core_target_build_id,
+                   size_t size_tcf_core_target_build_id,
+                   char *str_pci_ver, size_t size_pci_ver,
+                   char *str_macro_ver, size_t size_macro_ver);
+int tc_core_clock_speed(struct device *dev);
+
+#define FUNC_IN() pr_debug(">>> %s():%d\n", __func__, __LINE__)
+
+int tc_enable(struct device *dev)
+{
+    //struct pci_dev *pdev;
+    FUNC_IN();
+    //pdev = to_pci_dev(dev);
+
+    return 0; //pci_enable_device(pdev);
+}
+EXPORT_SYMBOL(tc_enable);
+
+void tc_disable(struct device *dev)
+{
+
+    //struct pci_dev *pdev;
+    FUNC_IN();
+    //pdev = to_pci_dev(dev);
+
+    //pci_disable_device(pdev);
+}
+EXPORT_SYMBOL(tc_disable);
+
+static char *int_names[] = {
+    "PDP",
+    "NNPU",
+    "NNA"
+};
+
+int tc_set_interrupt_handler(struct device *dev, int interrupt_id,
+                             void (*handler_function)(void *), void *data)
+{
+    int err = -1;
+
+    FUNC_IN();
+
+    if ( (interrupt_id >= 0) && (interrupt_id < TC_INTERRUPT_COUNT) ) {
+        dev_info(dev, "Registering interrupt handler (%p) for %s [data: %p]", handler_function, int_names[interrupt_id], data);
+        err = 0;
+
+        pdev_int_handlers[interrupt_id].handler = handler_function;
+        pdev_int_handlers[interrupt_id].data = data;
+        pdev_int_handlers[interrupt_id].enabled = 0;
+    }
+    else {
+        dev_warn(dev, "%s: Invalid interrupt id %d!", __func__, interrupt_id);
+    }
+
+    return err;
+}
+EXPORT_SYMBOL(tc_set_interrupt_handler);
+
+int tc_enable_interrupt(struct device *dev, int interrupt_id)
+{
+    int err = -1;
+
+    FUNC_IN();
+
+    if ( (interrupt_id >= 0) && (interrupt_id < TC_INTERRUPT_COUNT) ) {
+        dev_info(dev, "Enabling interrupt handler for %s\n", int_names[interrupt_id]);
+
+        err = 0;
+        pdev_int_handlers[interrupt_id].enabled = 1;
+    }
+    else {
+        dev_warn(dev, "%s: Invalid interrupt id %d!", __func__, interrupt_id);
+    }
+
+    return err;
+}
+EXPORT_SYMBOL(tc_enable_interrupt);
+
+int tc_disable_interrupt(struct device *dev, int interrupt_id)
+{
+    int err = -1;
+
+    FUNC_IN();
+
+    if ( (interrupt_id >= 0) && (interrupt_id < TC_INTERRUPT_COUNT) ) {
+        dev_info(dev, "Disabling interrupt handler for %s\n", int_names[interrupt_id]);
+
+        err = 0;
+        pdev_int_handlers[interrupt_id].enabled = 0;
+    }
+    else {
+        dev_warn(dev, "%s: Invalid interrupt id %d!", __func__, interrupt_id);
+    }
+
+    return err;
+}
+EXPORT_SYMBOL(tc_disable_interrupt);
+
+int tc_sys_info(struct device *dev, uint32_t *tmp, uint32_t *pll)
+{
+    *tmp = 0;
+    *pll = 0;
+    return 0;
+}
+EXPORT_SYMBOL(tc_sys_info);
+
+int tc_sys_strings(struct device *dev,
+                   char *str_fpga_rev, size_t size_fpga_rev,
+                   char *str_tcf_core_rev, size_t size_tcf_core_rev,
+                   char *str_tcf_core_target_build_id,
+                   size_t size_tcf_core_target_build_id,
+                   char *str_pci_ver, size_t size_pci_ver,
+                   char *str_macro_ver, size_t size_macro_ver)
+{
+    struct nexefdrv_prvdata *priv_data = dev_get_drvdata(dev);
+    uint32_t odin_rev, odin_cs;
+
+    FUNC_IN();
+
+    odin_rev = odin_core_readreg32(priv_data, PCI_ODIN_CORE_REVISION);
+    odin_cs  = odin_core_readreg32(priv_data, PCI_ODIN_CORE_CHANGE_SET);
+
+    snprintf(str_fpga_rev, size_fpga_rev, "3NX-F odin build\n");
+    snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d", (odin_rev >> 8) & 0xF, odin_rev & 0xF);
+    snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id, "%d", odin_cs);
+    snprintf(str_pci_ver, size_pci_ver, "??\n");
+    snprintf(str_macro_ver, size_macro_ver, "??\n");
+
+    return 0;
+}
+EXPORT_SYMBOL(tc_sys_strings);
+
+int tc_core_clock_speed(struct device *dev)
+{
+    FUNC_IN();
+    return 25000000L;
+}
+EXPORT_SYMBOL(tc_core_clock_speed);
+
+//endregion NNPU needed exported functions
+
+//region Platform related functions
+
+static uint64_t nexef_get_rogue_dma_mask(struct platdev_export_info *export_info)
+{
+    /* Does not access system memory, so there is no DMA limitation */
+    if (export_info->rogue_mem_mode == TC_MEMORY_LOCAL)
+        return DMA_BIT_MASK(64);
+
+    return DMA_BIT_MASK(32);
+}
+
+static int nexef_register_rogue_plat_device(struct nexefdrv_prvdata *priv_data)
+{
+    int err = 0;
+	struct resource nexef_rogue_resources[] = {
+            DEFINE_RES_MEM_NAMED(NEXEF_ROGUE_REG_OFFSET +
+                                 pci_resource_start(priv_data->pci_dev,
+                                                    NEXEF_ROGUE_REG_BAR),
+                                 NEXEF_ROGUE_REG_SIZE, NEXEF_NNPU_PDEV_NAME),
+	};
+	struct tc_rogue_platform_data pdata = {
+		.mem_mode = priv_data->plat_exports.rogue_mem_mode,
+		.tc_memory_base = priv_data->plat_exports.dut_mem.base,
+		.rogue_heap_memory_base = priv_data->plat_exports.rogue_heap_mem.base,
+		.rogue_heap_memory_size = priv_data->plat_exports.rogue_heap_mem.size,
+		.pdp_heap_memory_base = priv_data->plat_exports.rogue_pdp_heap_mem.base,
+		.pdp_heap_memory_size = priv_data->plat_exports.rogue_pdp_heap_mem.size,
+	};
+	struct platform_device_info odin_rogue_dev_info = {
+		.parent = &priv_data->pci_dev->dev,
+		.name = TC_DEVICE_NAME_ROGUE,
+		.id = -2,
+		.res = nexef_rogue_resources,
+		.num_res = ARRAY_SIZE(nexef_rogue_resources),
+		.data = &pdata,
+		.size_data = sizeof(pdata),
+		.dma_mask = nexef_get_rogue_dma_mask(&priv_data->plat_exports),
+	};
+
+	priv_data->plat_exports.rogue_pdev = platform_device_register_full(&odin_rogue_dev_info);
+
+	if (IS_ERR(priv_data->plat_exports.rogue_pdev)) {
+		err = PTR_ERR(priv_data->plat_exports.rogue_pdev);
+		dev_err(&priv_data->pci_dev->dev,
+			"Failed to register `%s' device (%d)\n", TC_DEVICE_NAME_ROGUE, err);
+        priv_data->plat_exports.rogue_pdev = NULL;
+	}
+	return err;
+}
+
+static int nexef_register_nna_plat_device(struct nexefdrv_prvdata *priv_data)
+{
+    int err = 0;
+
+    struct resource nexef_nna_resources[] = {
+            DEFINE_RES_MEM_NAMED(NEXEF_NNA_REG_OFFSET +
+                                 pci_resource_start(priv_data->pci_dev,
+                                                    NEXEF_NNA_REG_BAR),
+                                 NEXEF_NNA_REG_SIZE, NEXEF_NNA_PDEV_NAME),
+    };
+    struct nexef_nna_platform_data pdata = {
+            // tc->dut2_mem_base - tc->tc_mem.base
+            .nna_memory_base = priv_data->plat_exports.nna_heap_mem.base,
+            .nna_memory_offset = priv_data->plat_exports.nna_heap_mem.base - priv_data->plat_exports.dut_mem.base,
+            .nna_memory_size = priv_data->plat_exports.nna_heap_mem.size,
+    };
+    struct platform_device_info nexef_nna_dev_info = {
+            .parent = &priv_data->pci_dev->dev,
+            .name = NEXEF_NNA_DEVICE_NAME,
+            .id = -2,
+            .res = nexef_nna_resources,
+            .num_res = ARRAY_SIZE(nexef_nna_resources),
+            .data = &pdata,
+            .size_data = sizeof(pdata),
+            //.dma_mask = nexef_get_rogue_dma_mask(tc),
+    };
+
+    priv_data->plat_exports.nna_pdev = platform_device_register_full(&nexef_nna_dev_info);
+
+    if (IS_ERR(priv_data->plat_exports.nna_pdev)) {
+        err = PTR_ERR(priv_data->plat_exports.nna_pdev);
+        dev_err(&priv_data->pci_dev->dev,
+                "Failed to register `%s' device (%d)\n", NEXEF_NNA_DEVICE_NAME, err);
+        priv_data->plat_exports.nna_pdev = NULL;
+    }
+
+    return err;
+}
+
+static void nexef_unregister_rogue_plat_device(struct nexefdrv_prvdata *priv_data)
+{
+    if (priv_data->plat_exports.rogue_pdev) {
+        dev_dbg(&priv_data->pci_dev->dev, "Unregistering NNPU platform device");
+        platform_device_unregister(priv_data->plat_exports.rogue_pdev);
+    }
+}
+
+static void nexef_unregister_nna_plat_device(struct nexefdrv_prvdata *priv_data)
+{
+    if (priv_data->plat_exports.nna_pdev) {
+        dev_dbg(&priv_data->pci_dev->dev, "Unregistering NNA platform device");
+        platform_device_unregister(priv_data->plat_exports.nna_pdev);
+    }
+}
+
+//endregion Platform related functions

+ 12 - 0
driver/nexef_platform/set_fpga_freq.py

@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+import sys
+print sys.version
+
+from dbg_py import *
+
+if __name__ == "__main__":
+    config_devices()
+    set_dut_core_clk(25)
+    set_dut_iface_clk(25)
+

+ 137 - 0
driver/vha/Makefile

@@ -0,0 +1,137 @@
+# Alias for backward compatibility
+CONFIG_VHA_APOLLO       := $(CONFIG_VHA_FPGA)
+CONFIG_HW_AX3_MC        := $(CONFIG_HW_MULTICORE)
+
+obj-$(CONFIG_VHA)       += vha.o
+CONFIG_VHA_INFO         := $(CONFIG_VHA)
+obj-$(CONFIG_VHA_INFO)  += vha_info.o
+
+# Common files
+vha-y                   := vha_api.o vha_common.o
+vha-y                   += vha_dbg.o vha_pdump.o
+
+ifeq ($(CONFIG_HW_AX3_MC), y) 
+subdir-ccflags-y        += -I$(src)/multi
+vha-y                   += multi/vha_dev.o multi/vha_wm.o multi/vha_mmu.o multi/vha_mt19937.o
+else
+subdir-ccflags-y        += -I$(src)/single
+vha-y                   += single/vha_dev.o single/vha_cnn.o single/vha_mmu.o
+vha-$(CONFIG_HW_AX2)    += single/vha_dev_ax2.o
+vha-$(CONFIG_HW_AX3)    += single/vha_dev_ax3.o
+endif
+
+PLAT := platform
+vha-$(CONFIG_VHA_PCI)          += $(PLAT)/vha_plat_pci.o
+vha-$(CONFIG_VHA_DUMMY)        += $(PLAT)/vha_plat_dummy.o
+vha-$(CONFIG_VHA_DUMMY_HW_SIM) += $(PLAT)/vha_plat_dummy.o
+vha-$(CONFIG_VHA_APOLLO)       += $(PLAT)/vha_plat_apollo.o
+vha-$(CONFIG_VHA_ODIN)         += $(PLAT)/vha_plat_odin.o
+vha-$(CONFIG_VHA_EMU)          += $(PLAT)/vha_plat_emu.o
+vha-$(CONFIG_VHA_FROST)        += $(PLAT)/vha_plat_frost.o
+vha-$(CONFIG_VHA_ORION)        += $(PLAT)/vha_plat_orion.o
+vha-$(CONFIG_VHA_NEXEF)        += $(PLAT)/vha_plat_nexef.o
+vha-$(CONFIG_VHA_DT_EXAMPLE)   += $(PLAT)/vha_plat_dt_example.o $(PLAT)/vha_plat_dt.o
+
+vha-$(CONFIG_VHA_THEAD_LIGHT_FPGA_C910)   += $(PLAT)/vha_plat_thead.o $(PLAT)/vha_plat_thead_light_fpga_c910.o
+vha-$(CONFIG_VHA_THEAD_LIGHT)   += $(PLAT)/vha_plat_thead.o $(PLAT)/vha_plat_thead_light.o
+
+# System configuration
+# For AX2 set Mirage by default
+CONFIG_VHA_SYS_MIRAGE := $(CONFIG_HW_AX2)
+# For 3NX set to Aura by default. 
+CONFIG_VHA_SYS_AURA := $(CONFIG_HW_AX3)
+# For 4NX(3NX-MC) set to Magna by default.
+CONFIG_VHA_SYS_MAGNA := $(CONFIG_HW_AX3_MC)
+
+ccflags-$(CONFIG_VHA_SYS_MIRAGE) += -DCFG_SYS_MIRAGE
+ccflags-$(CONFIG_VHA_SYS_AURA)   += -DCFG_SYS_AURA
+ccflags-$(CONFIG_VHA_SYS_VAGUS)  += -DCFG_SYS_VAGUS
+ccflags-$(CONFIG_VHA_SYS_MAGNA)  += -DCFG_SYS_MAGNA
+
+ccflags-y        += -I$(src)/$(PLAT)/
+subdir-ccflags-y += -I$(src)
+
+ifdef CONFIG_NEXEF_NNPU_INCLUDE
+    subdir-ccflags-y += -I${CONFIG_NEXEF_NNPU_INCLUDE}
+endif
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=VHA_CORE
+
+ccflags-$(CONFIG_BUS_MASTERING) += -DFPGA_BUS_MASTERING
+
+ccflags-$(CONFIG_VHA_DUMMY)        += -DCONFIG_VHA_DUMMY
+ccflags-$(CONFIG_VHA_DUMMY_HW_SIM) += -DCONFIG_VHA_DUMMY -DCONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+ccflags-$(CONFIG_VHA_APOLLO)       += -DCONFIG_VHA_APOLLO
+ccflags-$(CONFIG_VHA_ORION)        += -DCONFIG_VHA_ORION
+ccflags-$(CONFIG_VHA_ODIN)         += -DCONFIG_VHA_ODIN
+ccflags-$(CONFIG_VHA_NEXEF)        += -DCONFIG_VHA_NEXEF
+
+ccflags-$(CONFIG_ION)    += -Idrivers/staging/android/ion
+
+ccflags-$(CONFIG_HW_AX2) += -DHW_AX2
+
+# Mirrored page tables enabled by default if not specified
+CONFIG_VHA_MMU_MIRRORED_CTX ?= y
+ccflags-$(CONFIG_VHA_MMU_MIRRORED_CTX) += -DVHA_MMU_MIRRORED_CTX_SUPPORT
+
+ccflags-$(CONFIG_HW_AX3) += -DHW_AX3
+
+# Default cores
+CONFIG_VHA_NCORES ?= 6
+ccflags-$(CONFIG_HW_AX3_MC) += -DCONFIG_HW_MULTICORE -DCONFIG_VHA_NCORES=$(CONFIG_VHA_NCORES)
+
+# Enhanced APM enabled by default if not specified
+CONFIG_VHA_ENHANCED_APM ?= y
+ccflags-$(CONFIG_VHA_ENHANCED_APM) += -DVHA_ENHANCED_APM
+
+# Default OSID = 0
+CONFIG_TARGET_OSID ?= 0
+# Magna does not use multiple OSes approach
+ifeq ($(CONFIG_HW_AX3), y)
+  ifeq ($(CONFIG_HW_AX3_MC),)
+      ccflags-y += -DOSID=$(CONFIG_TARGET_OSID)
+  endif
+endif
+
+# Enable Safety critical features by default
+ifeq ($(CONFIG_HW_AX3_MC)$(CONFIG_VHA_SYS_VAGUS), y)
+    CONFIG_VHA_SCF ?= y
+endif
+
+ifeq ($(CONFIG_HW_AX3_MC)$(CONFIG_VHA_SCF), yy)
+    vha-y                 += multi/vha_sc_dbg.o
+endif
+
+ccflags-$(CONFIG_VHA_SCF) += -DVHA_SCF
+
+# enable support for *_EVENT_INJECT registers only for MC 
+# and if kernel FUNCTION_ERROR_INJECTION is enabled
+ifeq ($(CONFIG_FUNCTION_ERROR_INJECTION), y)
+  ifeq ($(CONFIG_HW_AX3), y)
+    ccflags-y += -DVHA_EVENT_INJECT
+  endif
+endif
+
+ifeq ($(CONFIG_FUNCTONAL_TEST_CONTROL), y)
+    ccflags-y += -DVHA_FUNCT_CTRL
+endif
+
+ifeq ($(CONFIG_FORCE_IOREG_DEBUG), y)
+    ccflags-y += -DVHA_FORCE_IO_DEBUG
+endif
+
+# Magna does not use multiple OSes approach
+ifeq ($(CONFIG_HW_AX3), y)
+  ifeq ($(CONFIG_HW_AX3_MC),y)
+    CONFIG_VHA_LO_PRI_SUBSEGS = n
+  else
+    CONFIG_VHA_LO_PRI_SUBSEGS ?= n
+  endif
+else
+    CONFIG_VHA_LO_PRI_SUBSEGS = n
+endif
+ccflags-$(CONFIG_VHA_LO_PRI_SUBSEGS) += -DVHA_USE_LO_PRI_SUB_SEGMENTS
+ifeq ($(CONFIG_VHA_LO_PRI_SUBSEGS), y)
+    obj-$(CONFIG_VHA) += vha_monitor.o
+endif
+

+ 4213 - 0
driver/vha/multi/vha_dev.c

@@ -0,0 +1,4213 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/preempt.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include <uapi/vha.h>
+#include <uapi/vha_errors.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+#include "vha_mt19937.h"
+
+static uint32_t shared_onchipmem_size;
+module_param(shared_onchipmem_size, uint, 0444);
+MODULE_PARM_DESC(shared_onchipmem_size,
+	"Size of shared on-chip memory in bytes");
+
+/* WM debug statistics types */
+#define VHA_WM_DBG_MODE_PERF 0
+#define VHA_WM_DBG_MODE_BAND 1
+#define WM_DBG_MODE_ON(type) \
+	(wm_dbg_ctrl[VHA_WM_DBG_MODE_##type])
+
+static uint32_t wm_dbg_ctrl[2] = { 0, 0 };
+module_param_array(wm_dbg_ctrl, uint, NULL, 0444);
+MODULE_PARM_DESC(wm_dbg_ctrl,
+	"WM DEBUG CONTROL: switch for PERF and BAND: 0=disable 1=enable");
+
+static uint32_t slc_hash_mode;
+module_param(slc_hash_mode, uint, 0444);
+MODULE_PARM_DESC(slc_hash_mode,
+	"SLC_CTRL_HASH_MODE: Address decoding for SLC. 0-none; 1-pvr_v3; 2-linear; 3-in_page. See TRM");
+
+#ifdef VHA_SCF
+static uint32_t sys_ram_correction_threshold = 0;
+module_param(sys_ram_correction_threshold, uint, 0444);
+MODULE_PARM_DESC(sys_ram_correction_threshold,
+	"Threshold for system level ram correction");
+
+static uint32_t core_host_ram_correction_threshold = 0;
+module_param(core_host_ram_correction_threshold, uint, 0444);
+MODULE_PARM_DESC(core_host_ram_correction_threshold,
+	"Threshold for host core level ram correction");
+
+static uint32_t core_wm_ram_correction_threshold = 0;
+module_param(core_wm_ram_correction_threshold, uint, 0444);
+MODULE_PARM_DESC(core_wm_ram_correction_threshold,
+	"Threshold for wm core level ram correction");
+#endif
+
+#define CONF_WRITES_WAIT_TIMEOUT_MS 20
+
+#define SCHED_SEQ_CORES_MASK   0xff
+#define SCHED_SEQ_CORES_SHIFT  0
+#define SCHED_SEQ_WM_ID_MASK   0x7
+#define SCHED_SEQ_WM_ID_SHIFT  8
+#define SCHED_SEQ_GET_CORES(idx) \
+	((vha->scheduling_sequence[idx] >> SCHED_SEQ_CORES_SHIFT) & SCHED_SEQ_CORES_MASK)
+#define SCHED_SEQ_GET_WM(idx) \
+	((vha->scheduling_sequence[idx] >> SCHED_SEQ_WM_ID_SHIFT) & SCHED_SEQ_WM_ID_MASK)
+/*
+ * scheduling_sequence can be used to force execution on specific WMs/cores.
+ * It encodes the WM id (byte1) and the core mask (byte0), for example:
+ * scheduling_sequence=0x001 -> WM0/core0
+ * scheduling_sequence=0x204 -> WM2/core2
+ * scheduling_sequence=0x520 -> WM5/core5
+ * scheduling_sequence=0x310 -> WM3/core4
+ */
+static int32_t scheduling_sequence_len;
+static uint32_t scheduling_sequence[VHA_MC_SCHED_SEQ_LEN_MAX] = { 0 };
+module_param_array(scheduling_sequence, uint, &scheduling_sequence_len, 0444);
+MODULE_PARM_DESC(scheduling_sequence, "multicore scheduling sequence");
+
+#define MAX_STALLING_DATA_ENTRIES 2
+static int32_t stalling_data_len;
+static uint32_t stalling[MAX_STALLING_DATA_ENTRIES] = { 0 };
+module_param_array(stalling, uint, &stalling_data_len, 0444);
+MODULE_PARM_DESC(stalling, "stalling data");
+
+static bool test_direct_events;
+module_param(test_direct_events, bool, 0444);
+MODULE_PARM_DESC(test_direct_events,
+		"When set CORE&INTERCONNECT events are directly sent to host, to WM, otherwise");
+
+static int32_t pri_windows_list_len;
+static uint32_t pri_windows_list[VHA_MAX_PRIORITIES] = { 0 };
+module_param_array(pri_windows_list, uint, &pri_windows_list_len, 0444);
+MODULE_PARM_DESC(pri_windows_list,
+		"priority window size list starting from lowest; all 0s mean no starvation avoidance");
+
+/* Priority scheduler local data. */
+struct vha_sched_local_data {
+	void *rand_gen_handle;
+};
+/* Priority window array. */
+/* NOTE: Setting all to 0 implies strict priority scheduling (no starvation avoidance). */
+static uint32_t pri_windows[VHA_MAX_PRIORITIES] = {0};
+
+/* Parity related defines. */
+#ifdef VHA_SCF
+	#define VHA_PARITY_READ_COUNT_MAX  4
+#endif
+
+struct vha_errcode {
+	uint8_t e;
+	const char* s;
+	enum vha_reset_type reset_type;
+	uint64_t rsp_err;
+};
+
+/* SYS event errors. */
+#define ERR_SYS_EVENT_DESC(b) VHA_SYS_EVENT_TYPE(b), __stringify(b)
+static const struct vha_biterr sys_err_bits[] = {
+	{-EIO,       ERR_SYS_EVENT_DESC(AXI_ERROR       ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_AXI_ERROR)},
+	{-EFAULT,    ERR_SYS_EVENT_DESC(MMU_PAGE_FAULT  ), VHA_RESET_TYPE_MMU,  VHA_RSP_ERROR(HW_SYS_MMU_PAGE_FAULT)},
+	{-ETIMEDOUT, ERR_SYS_EVENT_DESC(SYS_MEM_WDT     ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_SYS_MEM_WDT)},
+#ifdef VHA_SCF
+	/*
+	 * Unfortunately, hw guys did not specify the way to identify the failed
+	 * WM. Waiting for them to fix this. */
+	{-EIO,       ERR_SYS_EVENT_DESC(AXI_MEMORY_PARITY_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_AXI_MEMORY_PARITY_ERROR)},
+	{-EIO,       ERR_SYS_EVENT_DESC(MMU_PARITY_ERROR       ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_MMU_PARITY_ERROR)}, /*VHA_RESET_TYPE_MMU},*/
+	{-EIO,       ERR_SYS_EVENT_DESC(RAM_CORRECTION         ), VHA_RESET_TYPE_NONE, VHA_RSP_ERROR(HW_SYS_RAM_CORRECTION)},
+	{-EIO,       ERR_SYS_EVENT_DESC(RAM_DETECTION          ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_RAM_DETECTION)},
+	{-EIO,       ERR_SYS_EVENT_DESC(LSYNC_INV_REQ          ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_LSYNC_INV_REQ)},
+	{-EIO,       ERR_SYS_EVENT_DESC(LOGIC_ERROR            ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SYS_LOGIC_ERROR)},
+	{-EIO,       VHA_REG_PARITY_ERROR_EN, __stringify(PARITY_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(SW_SYS_EVNT_PARITY_ERROR)},
+#endif
+	{0}
+};
+
+/* WM event errors. */
+#define ERR_WM_EVENT_DESC(b) VHA_WM_EVENT_TYPE(b), __stringify(b)
+static const struct vha_biterr wm_err_bits[] = {
+	{-ETIMEDOUT, ERR_WM_EVENT_DESC(WM_WL_WDT     ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_EVNT_WM_WL_WDT)},
+	{-ETIMEDOUT, ERR_WM_EVENT_DESC(WM_WL_IDLE_WDT), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_EVNT_WM_WL_IDLE_WDT)},
+	{-ETIMEDOUT, ERR_WM_EVENT_DESC(WM_SOCIF_WDT  ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_EVNT_WM_SOCIF_WDT)},
+	{-EFAULT,    ERR_WM_EVENT_DESC(LOGIC_FAULT   ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_EVNT_LOGIC_FAULT)},
+#ifdef VHA_SCF
+	{-EIO,       VHA_REG_PARITY_ERROR_EN, __stringify(PARITY_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(SW_EVNT_WM_PARITY_ERROR)},
+#endif
+	{0}
+};
+
+/* WM response FIFO status error codes. */
+#define ERR_WM_RSP_STATUS_DESC(v)  VHA_WM_RESPONSE_ERROR_CODE(v), __stringify(v)
+static const struct vha_errcode wm_rsp_err_codes[] = {
+	{ERR_WM_RSP_STATUS_DESC(CORE_IRQ_BEFORE_KICK   ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_IRQ_BEFORE_KICK)},
+	{ERR_WM_RSP_STATUS_DESC(INDIRECT_MASK_SET_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_INDIRECT_MASK_SET_ERROR)},
+	{ERR_WM_RSP_STATUS_DESC(KICK_CORE_ACCESS_ERROR ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_KICK_CORE_ACCESS_ERROR)},
+	{ERR_WM_RSP_STATUS_DESC(CNN_CONTROL_START_HIGH ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CNN_CONTROL_START_HIGH)},
+	{ERR_WM_RSP_STATUS_DESC(CNN_STATUS_ERROR       ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CNN_STATUS_ERROR)},
+	{ERR_WM_RSP_STATUS_DESC(INT_CORE_ACCESS_ERROR  ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_INT_CORE_ACCESS_ERROR)},
+	{ERR_WM_RSP_STATUS_DESC(CORE_EVENT_ERROR       ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_EVENT_ERROR)},
+	{ERR_WM_RSP_STATUS_DESC(CORE_EVENT_NOT_CLEARED ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_EVENT_NOT_CLEARED)},
+	{ERR_WM_RSP_STATUS_DESC(CORE_EVENT_IRQ_HIGH    ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_EVENT_IRQ_HIGH)},
+	{ERR_WM_RSP_STATUS_DESC(INTERCONNECT_ERROR     ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_INTERCONNECT_ERROR)},
+};
+
+/* CNN core status errors. */
+#define ERR_CORE_STATUS_DESC(b) VHA_CORE_STATUS(b), __stringify(b)
+static const struct vha_biterr core_err_bits[] = {
+	{-EIO,       ERR_CORE_STATUS_DESC(LOGIC_ERROR    ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_LOGIC_ERROR)},
+	{-EIO,       ERR_CORE_STATUS_DESC(RAM_CORRECTION ), VHA_RESET_TYPE_NONE, VHA_RSP_ERROR(HW_RAM_CORRECTION)},
+	{-EIO,       ERR_CORE_STATUS_DESC(RAM_DETECTION  ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_RAM_DETECTION)},
+	{-EIO,       ERR_CORE_STATUS_DESC(CORE_SYNC_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_CORE_SYNC_ERROR)},
+	{-ETIMEDOUT, ERR_CORE_STATUS_DESC(CORE_WDT       ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_WDT)},
+	{-ETIMEDOUT, ERR_CORE_STATUS_DESC(CORE_MEM_WDT   ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_MEM_WDT)},
+	{-EIO,       ERR_CORE_STATUS_DESC(CNN_ERROR      ), VHA_RESET_TYPE_WM,   VHA_RSP_ERROR(HW_CORE_CNN_ERROR)},
+	{0}
+};
+
+/* Interconnect status errors. */
+#define ERR_IC_STATUS_DESC(b) VHA_IC_STATUS(b), __stringify(b)
+static const struct vha_biterr ic_err_bits[] = {
+	{-EIO,       ERR_IC_STATUS_DESC(LOCKSTEP_ERROR         ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_LOCKSTEP_ERROR)},
+	{-EIO,       ERR_IC_STATUS_DESC(LOGIC_ERROR            ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_IC_LOGIC_ERROR)},
+	{-EIO,       ERR_IC_STATUS_DESC(SOCIF_READ_MISMATCH    ), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SOCIF_READ_MISMATCH)},
+	{-EIO,       ERR_IC_STATUS_DESC(SOCIF_READ_UNRESPONSIVE), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(HW_SOCIF_READ_UNRESPONSIVE)},
+#ifdef VHA_SCF
+	{-EIO,       VHA_REG_PARITY_ERROR_EN, __stringify(PARITY_ERROR), VHA_RESET_TYPE_FULL, VHA_RSP_ERROR(SW_IC_PARITY_ERROR)},
+#endif
+	{0}
+};
+
+bool vha_dev_dbg_params_check(struct vha_dev *vha)
+{
+	if (vha->scheduling_sequence_len > 0) {
+		uint32_t i;
+		for (i = 0; i < vha->scheduling_sequence_len; i++) {
+			uint8_t wm_id = SCHED_SEQ_GET_WM(i);
+			uint8_t core_mask = SCHED_SEQ_GET_CORES(i);
+			if ((wm_id >= vha->hw_props.num_cnn_core_devs) ||
+				(~vha->full_core_mask & core_mask)) {
+				dev_info(vha->dev,
+						"%u/0x%02x -> %u/0x%02x (0x%02x)",
+						wm_id, core_mask,
+						vha->hw_props.num_cnn_core_devs, vha->full_core_mask,
+						(~vha->full_core_mask & core_mask));
+				dev_err(vha->dev,
+						"'scheduling_sequence' contains cores that do not exist on this h/w.\n");
+				return false;
+			}
+		}
+	}
+	return true;
+}
+
+bool vha_dev_dbg_params_init(struct vha_dev *vha)
+{
+	vha->scheduling_sequence_len = scheduling_sequence_len;
+	memcpy(vha->scheduling_sequence,
+		scheduling_sequence, sizeof(scheduling_sequence));
+	vha->scheduling_counter = 0;
+
+	vha->stalling_sysbus_host_stall_ratio = stalling[0];
+	vha->stalling_membus_sys_stall_ratio  = stalling[1];
+
+	return vha_dev_dbg_params_check(vha);
+}
+
+int vha_dev_scheduler_init(struct vha_dev *vha)
+{
+	int ret;
+	uint32_t seed, i;
+	bool use_default_pri_windows = true;
+
+	vha->hw_sched_status.num_cores_free = vha->hw_props.num_cnn_core_devs;
+	vha->hw_sched_status.num_wms_free   = vha->hw_props.num_cnn_core_devs;
+	vha->hw_sched_status.free_core_mask =
+				VHA_GET_CORE_MASK(vha->hw_props.num_cnn_core_devs);
+	vha->hw_sched_status.free_wm_mask =
+				VHA_GET_WM_MASK(vha->hw_props.num_cnn_core_devs);
+	vha->full_core_mask = vha->hw_sched_status.free_core_mask;
+	vha->wm_core_assignment = (uint64_t)(
+			VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED);
+	vha->active_core_mask = 0;
+	vha->apm_core_mask = 0;
+
+	/* Allocate priority scheduler data. */
+	vha->hw_sched_status.sched_data = kzalloc(sizeof(struct vha_sched_local_data), GFP_KERNEL);
+	if (vha->hw_sched_status.sched_data == NULL) {
+		dev_err(vha->dev, "%s: failed allocating scheduler data\n", __func__);
+		return -ENOMEM;
+	}
+	/* Initialise random number generator for priority scheduling. */
+	get_random_bytes(&seed, sizeof(seed));
+	ret = vha_mt19937_init(seed, &vha->hw_sched_status.sched_data->rand_gen_handle);
+	if (ret != 0) {
+		dev_err(vha->dev, "%s: failed initialising random generator\n", __func__);
+		kfree(vha->hw_sched_status.sched_data);
+		return ret;
+	}
+	/* Attempt to set priority windows passed on to kernel module. */
+	if (pri_windows_list_len == VHA_MAX_PRIORITIES) {
+		uint32_t num_zeros = 0;
+		for (i = 0; i < VHA_MAX_PRIORITIES; i++)
+			if (pri_windows_list[i] == 0)
+				num_zeros++;
+		if (num_zeros < VHA_MAX_PRIORITIES)
+		{
+			dev_warn(vha->dev, "%s: some priority windows are set to 0; "
+					"using default settings\n", __func__);
+		} else {
+			memcpy(pri_windows, pri_windows_list, sizeof(pri_windows));
+			use_default_pri_windows = false;
+		}
+	} else if (pri_windows_list_len > 0) {
+		dev_warn(vha->dev, "%s: too few priority windows provided (needed %u); "
+				"using default settings\n", __func__, VHA_MAX_PRIORITIES);
+	}
+	/* Calculate priority windows. */
+	if (use_default_pri_windows) {
+#define BASE_PRI_WINDOW_WIDTH 30
+		for (i = 0; i < VHA_MAX_PRIORITIES; i++)
+			pri_windows[i] = BASE_PRI_WINDOW_WIDTH + (i * 2 * BASE_PRI_WINDOW_WIDTH);
+#undef BASE_PRI_WINDOW_WIDTH
+	}
+
+	return 0;
+}
+
+int vha_dev_scheduler_deinit(struct vha_dev *vha)
+{
+	int ret;
+
+	if (vha->hw_sched_status.sched_data == NULL) {
+		dev_warn(vha->dev, "%s: scheduler not initialised\n", __func__);
+		return 0;
+	}
+	ret = vha_mt19937_deinit(vha->hw_sched_status.sched_data->rand_gen_handle);
+	if (ret != 0) {
+		dev_err(vha->dev, "%s: failed deinitialising random generator\n", __func__);
+	}
+	kfree(vha->hw_sched_status.sched_data);
+
+	return ret;
+}
+
+void vha_dev_mh_setup(struct vha_dev *vha, int ctx_id, struct vha_mh_config_regs *regs)
+{
+	uint64_t val64 = 0;
+
+	regs->cnn_preload_control |= VHA_CR_SETBITS(OS0_CNN_PRELOAD_CONTROL,
+								CBUF_N_REQS, VHA_CR_CNN_PRELOAD_CTRL_N_64);
+	/* Setup preload for MMM */
+	regs->cnn_preload_control |= VHA_CR_SETBITS(OS0_CNN_PRELOAD_CONTROL,
+								MMM_RD_N_REQS, VHA_CR_CNN_PRELOAD_CTRL_N_256);
+	regs->cnn_preload_control |= VHA_CR_SETBITS(OS0_CNN_PRELOAD_CONTROL,
+								MMM_WR_N_REQS, VHA_CR_CNN_PRELOAD_CTRL_N_256);
+
+	img_pdump_printf("-- MH setup:%d\n", ctx_id);
+	IOWRITE64_CR_PDUMP(regs->cnn_preload_control, OS0_CNN_PRELOAD_CONTROL);
+
+	regs->req_ctxt_override = VHA_SET_FIELD_SIMPLE_VAL(REQ_CTXT_OVERRIDE, OVERRIDE_OS0, EN);
+	IOWRITE64_CR_PDUMP(regs->req_ctxt_override, REQ_CTXT_OVERRIDE);
+
+	if (slc_hash_mode) {
+		regs->slc_control = VHA_CR_SETBITS(SLC_CTRL, HASH_MODE,
+				slc_hash_mode);
+		IOWRITE64_CR_PDUMP(val64, SLC_CTRL);
+	}
+}
+
+static int set_power_event(struct vha_dev *vha, uint64_t event)
+{
+	int ret=0;
+	uint64_t val64;
+	/* Clear any pending power events */
+	IOWRITE64_CR_PDUMP(0, POWER_EVENT);
+	/* Confirm no power events are pending */
+	ret = IOPOLL64_CR_PDUMP(0, 100, 1000,
+			((uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, POWER_COMPLETE) |
+			 (uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, POWER_ABORT)),
+			SYS_EVENT_STATUS);
+	if(ret)
+		return ret;
+	/* Trigger power event */
+	IOWRITE64_CR_PDUMP(event, POWER_EVENT);
+	/* Wait for power complete */
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_STATUS, POWER_COMPLETE, EN);
+	ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+			(uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, POWER_COMPLETE),
+			SYS_EVENT_STATUS);
+	if(ret)
+		return ret;
+	/* Switch off power event */
+	IOWRITE64_CR_PDUMP(0, POWER_EVENT);
+	/* Clear power complete event status */
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_CLEAR, POWER_COMPLETE, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_EVENT_CLEAR);
+	/* Confirm power complete is cleared */
+	ret = IOPOLL64_CR_PDUMP(0, 100, 1000,
+			(uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, POWER_COMPLETE),
+			SYS_EVENT_STATUS);
+	return ret;
+}
+
+#ifdef VHA_SCF
+static void ecc_correction_setup(struct vha_dev *vha)
+{
+	uint64_t val64;
+
+	val64 = VHA_CR_SETBITS(SYS_EVENT_THRESHOLD, RAM_CORRECTION,
+		sys_ram_correction_threshold);
+	IOWRITE64_CR_PDUMP(val64, SYS_EVENT_THRESHOLD);
+
+	val64 = VHA_CR_SETBITS(CORE_EVENT_WM_THRESHOLD, RAM_CORRECTION,
+		core_wm_ram_correction_threshold);
+	IOWRITE64_CR_PDUMP(val64, CORE_EVENT_WM_THRESHOLD);
+
+	val64 = VHA_CR_SETBITS(CORE_EVENT_HOST_THRESHOLD, RAM_CORRECTION,
+		core_host_ram_correction_threshold);
+	IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_THRESHOLD);
+}
+#endif
+
+static int vha_dev_prepare_cores(struct vha_dev *vha, uint8_t core_mask)
+{
+	/* Enabling selected cores on the platform
+	 * Note: don't touch TLC, is an always ON domain */
+	uint64_t val64 = VHA_CR_SETBITS(POWER_EVENT, DOMAIN,
+				(core_mask << 1)) |
+				VHA_SET_FIELD_SIMPLE_VAL(POWER_EVENT, TYPE, POWER_UP) |
+				VHA_SET_FIELD_SIMPLE_VAL(POWER_EVENT, REQ, EN);
+
+	img_pdump_printf("-- Trigger POWER UP domain event\n");
+	return set_power_event(vha, val64);
+}
+
+static int vha_dev_flush_cores(struct vha_dev *vha, uint8_t core_mask)
+{
+	uint64_t val64;
+
+	img_pdump_printf("-- Deselect any cores\n");
+	IOWRITE64_CR_PDUMP(0, CORE_CTRL_INDIRECT);
+	/* Disabling selected cores on the platform
+	 * Note: don't touch TLC, is an always ON domain */
+	val64 = VHA_CR_SETBITS(POWER_EVENT, DOMAIN, (core_mask << 1)) |
+			VHA_SET_FIELD_SIMPLE_VAL(POWER_EVENT, TYPE, POWER_DOWN) |
+			VHA_SET_FIELD_SIMPLE_VAL(POWER_EVENT, REQ, EN);
+
+	img_pdump_printf("-- Trigger POWER DOWN domain event\n");
+	return set_power_event(vha, val64);
+}
+
+void vha_dev_setup(struct vha_dev *vha)
+{
+	uint64_t val64;
+
+	img_pdump_printf("-- MMU set virtual address range0:%#llx-%#llx\n",
+			IMG_MEM_VA_HEAP1_BASE, IMG_MEM_VA_HEAP1_SIZE);
+	val64 = (uint64_t)vha->mmu_page_size <<
+			VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT;
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_ONE,
+		BASE_ADDR, IMG_MEM_VA_HEAP1_BASE);
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_ONE,
+		END_ADDR, (IMG_MEM_VA_HEAP1_BASE + IMG_MEM_VA_HEAP1_SIZE));
+	IOWRITE64_PDUMP(val64, VHA_CR_MMU_PAGE_SIZE_RANGE_ONE);
+
+	img_pdump_printf("-- MMU set virtual address range1:%#llx-%#llx\n",
+			IMG_MEM_VA_HEAP2_BASE, IMG_MEM_VA_HEAP2_SIZE);
+	val64 = (uint64_t)vha->mmu_page_size <<
+			VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT ;
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_TWO,
+		BASE_ADDR, IMG_MEM_VA_HEAP2_BASE);
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_TWO,
+		END_ADDR, (IMG_MEM_VA_HEAP2_BASE + IMG_MEM_VA_HEAP2_SIZE));
+	IOWRITE64_PDUMP(val64, VHA_CR_MMU_PAGE_SIZE_RANGE_TWO);
+
+#ifdef VHA_SCF
+	ecc_correction_setup(vha);
+#endif
+}
+
+void vha_dev_wait(struct vha_dev *vha)
+{
+	/* Nothing to do */
+}
+
+static void vha_dev_disable_events(struct vha_dev *vha, uint8_t core_mask, bool sys_release)
+{
+	uint8_t id;
+
+	if (sys_release) {
+		img_pdump_printf("-- Disable SYS events\n");
+		IOWRITE64_CR_PDUMP(0, SYS_EVENT_ENABLE);
+
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			img_pdump_printf("-- Select WM%u\n", id);
+			VHA_LOCK_WM();
+			VHA_SELECT_WM(id);
+			img_pdump_printf("-- Clear WM%u events\n", id);
+			IOWRITE64_CR_PDUMP(VHA_WM_EVENTS_DEFAULT, WM_EVENT_CLEAR);
+			img_pdump_printf("-- Disable WM%u events\n", id);
+			IOWRITE64_CR_PDUMP(0, WM_EVENT_ENABLE);
+			VHA_UNLOCK_WM();
+		}
+	}
+
+	img_pdump_printf("-- Select cores\n");
+	IOWRITE64_CR_PDUMP((uint64_t)core_mask, CORE_CTRL_INDIRECT);
+
+	if (test_direct_events) {
+		img_pdump_printf("-- Disable CORE events to HOST\n");
+		IOWRITE64_CR_PDUMP(0, CORE_EVENT_HOST_ENABLE);
+
+		img_pdump_printf("-- Disable INTERCONNECT events to HOST\n");
+		IOWRITE64_CR_PDUMP(0, INTERCONNECT_EVENT_HOST_ENABLE);
+	} else {
+		img_pdump_printf("-- Disable CORE events to WM\n");
+		IOWRITE64_CR_PDUMP(0, CORE_EVENT_WM_ENABLE);
+
+		img_pdump_printf("-- Disable INTERCONNECT events to WM\n");
+		IOWRITE64_CR_PDUMP(0, INTERCONNECT_EVENT_WM_ENABLE);
+	}
+}
+
+static void vha_dev_ready(struct vha_dev *vha, uint8_t core_mask, bool sys_setup)
+{
+	uint8_t id;
+
+	if (sys_setup) {
+		img_pdump_printf("-- Enable SYS events\n");
+		IOWRITE64_CR_PDUMP(VHA_SYS_EVENTS_DEFAULT, SYS_EVENT_ENABLE);
+		img_pdump_printf("-- Clear SYS events\n");
+		IOWRITE64_CR_PDUMP(VHA_SYS_EVENTS_DEFAULT, SYS_EVENT_CLEAR);
+
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			img_pdump_printf("-- Select WM%u\n", id);
+			VHA_LOCK_WM();
+			VHA_SELECT_WM(id);
+			img_pdump_printf("-- Enable WM%u events\n", id);
+			IOWRITE64_CR_PDUMP(VHA_WM_EVENTS_DEFAULT, WM_EVENT_ENABLE);
+			img_pdump_printf("-- Clear WM%u events\n", id);
+			IOWRITE64_CR_PDUMP(VHA_WM_EVENTS_DEFAULT, WM_EVENT_CLEAR);
+			VHA_UNLOCK_WM();
+		}
+	}
+
+	img_pdump_printf("-- Select cores\n");
+	IOWRITE64_CR_PDUMP((uint64_t)core_mask, CORE_CTRL_INDIRECT);
+
+	if (test_direct_events) {
+		img_pdump_printf("-- Enable CORE events to HOST\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_ENABLE);
+		img_pdump_printf("-- Clear CORE events on HOST\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_CLEAR);
+
+		img_pdump_printf("-- Enable INTERCONNECT events to HOST\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_HOST_ENABLE);
+		img_pdump_printf("-- Clear INTERCONNECT events on HOST\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_HOST_CLEAR);
+	} else {
+		img_pdump_printf("-- Enable CORE events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_ENABLE);
+		img_pdump_printf("-- Clear CORE events on WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_CLEAR);
+
+		img_pdump_printf("-- Enable INTERCONNECT events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_ENABLE);
+		img_pdump_printf("-- Clear INTERCONNECT events on WM\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_CLEAR);
+	}
+}
+
+/* Global reset */
+static int vha_dev_reset(struct vha_dev *vha, uint8_t core_mask, bool sys_reset)
+{
+	uint64_t val64 = 0;
+	uint8_t mask = 0;
+	uint8_t id;
+	int ret = 0;
+
+	WARN_ON(!mutex_is_locked(&vha->lock));
+
+	dev_dbg(vha->dev, "%s core mask:%#x\n", __func__, core_mask);
+
+	img_pdump_printf("-- Top level RESET sequence BEGIN\n");
+	/* Perform reset procedure */
+
+	if (sys_reset) {
+		/* First reset all WMs with cores assigned. */
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			struct vha_hw_sched_info sched_info = {0};
+			sched_info.core_mask = vha_wm_get_cores(vha, id);
+			if (sched_info.core_mask) {
+				sched_info.wm_id = id;
+				vha_wm_reset(vha, &sched_info);
+				core_mask &= ~sched_info.core_mask;
+			}
+		}
+	}
+
+	/* Core reset procedure. */
+	img_pdump_printf("-- Resetting cores\n");
+
+	/* Proceed core by core, unassigned cores only */
+	for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+		if (!(core_mask & VHA_CORE_ID_TO_MASK(id)))
+			continue;
+
+		/* Reset Assertion */
+
+		/* 1.  Select current core. */
+		img_pdump_printf("-- Select core%u\n", id);
+		mask = VHA_CORE_ID_TO_MASK(id);
+		IOWRITE64_CR_PDUMP(mask, CORE_CTRL_INDIRECT);
+		/* 3. Disable page fault interrupts for core while resetting. */
+		img_pdump_printf("-- Disable page fault interrupts for core%u\n", id);
+		val64 = IOREAD64_CR_REGIO(SYS_EVENT_ENABLE);
+		val64 &= ~(VHA_CR_SETBITS(SYS_EVENT_ENABLE, MMU_PAGE_FAULT, mask));
+		IOWRITE64_CR_PDUMP(val64, SYS_EVENT_ENABLE);
+		/* 4. Force global clocks to ON on current core (others set to AUT0). */
+		img_pdump_printf("-- Force global clocks ON for core%u (others set to AUTO)\n", id);
+		val64 = VHA_SYS_CLOCK_MODE(INTERCONNECT, ON) |
+				VHA_SYS_CLOCK_MODE_MULTI(CORE, ON, mask) |
+				VHA_SYS_CLOCK_MODE_MULTI(CORE, AUTO, (uint8_t)~mask) |
+				VHA_SYS_CLOCK_MODE_MULTI(NOC, AUTO, ~0) |
+				VHA_SYS_CLOCK_MODE_MULTI(WM, AUTO, ~0) |
+				VHA_SYS_CLOCK_MODE(AXI, AUTO) |
+				VHA_SYS_CLOCK_MODE(SLC, AUTO) |
+				VHA_SYS_CLOCK_MODE(LSYNC, AUTO) |
+				VHA_SYS_CLOCK_MODE(SOCM, AUTO) |
+				VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
+		IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+		/* 5. Set all core level clocks to AUTO. */
+		img_pdump_printf("-- Set all core%u level clocks to AUTO\n", id);
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 6. Move core into soft reset. */
+		img_pdump_printf("-- Perform soft reset on core%u\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_SOFT_RESET, CORE_RESET, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_SOFT_RESET);
+		/*    Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(CORE_SOFT_RESET);
+		/*    Clear reset. */
+		IOWRITE64_CR_PDUMP(0, CORE_SOFT_RESET);
+		/* 7. Wait until core memory bus reset has completed. */
+		img_pdump_printf("-- Wait until core%u memory bus reset has completed\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 8. Clear core memory bus reset interrupt. */
+		img_pdump_printf("-- Clear core%u memory bus reset interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, MEMBUS_RESET_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/* 9. Clear the core indirect register. */
+		img_pdump_printf("-- Deselect core%u\n", id);
+		IOWRITE64_CR_PDUMP(0, CORE_CTRL_INDIRECT);
+		/* 10. Ensure no resets are pending. */
+		img_pdump_printf("-- Ensure no resets are pending\n");
+		IOWRITE64_CR_PDUMP(0, SYS_RESET_CTRL);
+		/* 11. Move current core into full reset state. */
+		img_pdump_printf("-- Move core%u into full reset state\n", id);
+		val64 = VHA_CR_SETBITS(SYS_RESET_CTRL, CORE, mask);
+		IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
+		/* 12. Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+
+		/* Reset Deassertion */
+
+		/* 1. Move current core out of reset state. */
+		img_pdump_printf("-- Move core%u out of reset state\n", id);
+		val64 &= ~(VHA_CR_SETBITS(SYS_RESET_CTRL, CORE, mask));
+		IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
+		/*    Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+		/* 2. Select current core again. */
+		img_pdump_printf("-- Select core%u again\n", id);
+		IOWRITE64_CR_PDUMP(mask, CORE_CTRL_INDIRECT);
+		/* 5. Force core clocks to ON for everything. */
+		img_pdump_printf("-- Force core clocks ON for everything\n");
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(ON);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 6. Perform core level RAM initialisation. */
+		img_pdump_printf("-- Perform core%u level RAM initialisation\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(FUSA_CONTROL, ECC_INIT_KICK, EN);
+		IOWRITE64_CR_PDUMP(val64, FUSA_CONTROL);
+		/* 7. Perform LOCM scrubbing. */
+		img_pdump_printf("-- Perform core%u LOCM scrubbing\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(LOCM_SCRUB_CTRL, KICK, EN);
+		IOWRITE64_CR_PDUMP(val64, LOCM_SCRUB_CTRL);
+		/* 8. Wait until the RAM initialisation sequence has completed. */
+		img_pdump_printf("-- Wait until the RAM initialisation sequence has completed\n");
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 9. Clear core RAM reset interrupt. */
+		img_pdump_printf("-- Clear core%u RAM reset interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, RAM_INIT_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/*    Confirm that 'RAM_INIT_DONE' field is cleared. */
+		img_pdump_printf("-- Confirm that core%u RAM reset interrupt is cleared\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(0ULL, 10, 100, val64, CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 10. Wait until the LOCM scrubbing sequence has completed. */
+		img_pdump_printf("-- Wait until the LOCM scrubbing sequence has completed.\n");
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 11. Deassert core LOCM scrubbing. */
+		img_pdump_printf("-- Deassert core%u LOCM scrubbing\n", id);
+		IOWRITE64_CR_PDUMP(0, LOCM_SCRUB_CTRL);
+		/* 12. Clear core LOCM scrub interrupt. */
+		img_pdump_printf("-- Clear core%u LOCM scrub interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, LOCM_SCRUB_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/*     Confirm that 'LOCM_SCRUB_DONE' field is cleared. */
+		img_pdump_printf("-- Confirm that core%u LOCM scrub interrupt is cleared\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(0ULL, 10, 100, val64, CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 13. Enable the interrupts from core to WM. */
+		img_pdump_printf("-- Enable CORE events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_ENABLE);
+		/* 14. Clear all status from CORE_EVENT_WM (clears the RAM_INIT_DONE). */
+		img_pdump_printf("-- Clear CORE events on WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, RAM_INIT_DONE, EN) |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, LOCM_SCRUB_DONE, EN) |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, MEMBUS_RESET_DONE, EN),
+				CORE_EVENT_WM_CLEAR);
+		/* 15. Enable the interrupts from interconnect to WM */
+		img_pdump_printf("-- Enable INTERCONNECT events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_ENABLE);
+		/* 16. Disable all interrupts from the CORE to the HOST */
+		img_pdump_printf("-- Disable CORE events on host\n");
+		IOWRITE64_CR_PDUMP(0, CORE_EVENT_HOST_ENABLE);
+		/* 17. Set all core level clocks back to AUTO. */
+		img_pdump_printf("-- Set all core%u level clocks back to AUTO\n", id);
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 18. Set core global clock back to AUTO. */
+		img_pdump_printf("-- Set core%u global clock back to AUTO (others set to AUTO)\n", id);
+		val64 = VHA_SYS_CLOCKS_DEFAULT(AUTO);
+		IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+
+		/* Setup stalling if requested. */
+		if (vha->stalling_membus_sys_stall_ratio != 0)
+			IOWRITE64_CR_REGIO(vha->stalling_membus_sys_stall_ratio,
+								NN_SYS2_MEMBUS_SYS_STALL_RATIO);
+	}
+
+	if (!sys_reset)
+		return 0;
+
+	dev_dbg(vha->dev, "%s handling system level reset\n", __func__);
+
+	/* Move the rest of modules into reset state. */
+	img_pdump_printf("-- Move other modules into reset state\n");
+	val64 = VHA_SET_FIELD_SIMPLE_FULL(SYS_RESET_CTRL, WM) |
+					VHA_SET_FIELD_SIMPLE_VAL(SYS_RESET_CTRL, INTERCONNECT, EN) |
+					VHA_SET_FIELD_SIMPLE_VAL(SYS_RESET_CTRL, SLC, EN) |
+					VHA_SET_FIELD_SIMPLE_VAL(SYS_RESET_CTRL, MH, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+	/* Move the rest of modules out of reset state. */
+	img_pdump_printf("-- Move other modules out of reset state\n");
+	IOWRITE64_CR_PDUMP(0ULL, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+	/* Wait until core memory bus reset has completed. */
+	img_pdump_printf("-- Wait until sys memory bus reset has completed\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_STATUS, MEMBUS_RESET_DONE, EN);
+	ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+			(uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, MEMBUS_RESET_DONE),
+			SYS_EVENT_STATUS);
+	if(ret)
+		return ret;
+	/* Clear memory bus reset status. */
+	img_pdump_printf("-- Clear memory bus reset status\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_CLEAR, MEMBUS_RESET_DONE, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_EVENT_CLEAR);
+	/* Force all system level clocks on. */
+	img_pdump_printf("-- Force all system level clocks ON (except core)\n");
+	val64 = IOREAD64_CR_REGIO(SYS_CLK_CTRL0);
+	val64 &= VHA_SYS_CLOCKS_CORE_FULL_MASK;
+	val64 |= VHA_SYS_CLOCKS_RESET(ON);
+	IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+	/* Initiate system RAM initialisation. */
+	img_pdump_printf("-- Initiate system RAM initialisation\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_RAM_INIT, KICK, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_RAM_INIT);
+	/* Initiate system SOCM scrubbing. */
+	img_pdump_printf("-- Initiate system SOCM scrubbing\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SOCM_SCRUB_CTRL, KICK, EN);
+	IOWRITE64_CR_PDUMP(val64, SOCM_SCRUB_CTRL);
+	/* Wait until the RAM initialisation sequence has completed. */
+	img_pdump_printf("-- Wait until the RAM initialisation sequence has completed\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_STATUS, RAM_INIT_DONE, EN);
+	ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+			(uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, RAM_INIT_DONE),
+			SYS_EVENT_STATUS);
+	if(ret)
+		return ret;
+	/* Wait until the SOCM scrubbing sequence has completed. */
+	img_pdump_printf("-- Wait until the SOCM scrubbing sequence has completed\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_STATUS, SOCM_SCRUB_DONE, EN);
+	ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+			(uint64_t)VHA_CR_BITMASK(SYS_EVENT_STATUS, SOCM_SCRUB_DONE),
+			SYS_EVENT_STATUS);
+	if(ret)
+		return ret;
+	/* Deassert system SOCM scrubbing */
+	img_pdump_printf("-- Deassert system SOCM scrubbing\n");
+	IOWRITE64_CR_PDUMP(0, SOCM_SCRUB_CTRL);
+	img_pdump_printf("-- Clear sys events\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_CLEAR, RAM_INIT_DONE, EN) |
+			VHA_SET_FIELD_SIMPLE_VAL(SYS_EVENT_CLEAR, SOCM_SCRUB_DONE, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_EVENT_CLEAR);
+	/* Set all clocks back to AUTO. */
+	img_pdump_printf("-- Set all sys clocks back to AUTO\n");
+	val64 = VHA_SYS_CLOCKS_DEFAULT(AUTO);
+	IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+	/* Reset the system level register banks. */
+	img_pdump_printf("-- Reset the system level register banks\n");
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_RESET_CTRL, REGBANK, EN);
+	IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+	/* Clear reset */
+	IOWRITE64_CR_PDUMP(0, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+	img_pdump_printf("-- Top level RESET sequence END\n");
+
+	vha->wm_core_assignment = (uint64_t)(
+			VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED);
+
+	/* Setup stalling if requested. */
+	if (vha->stalling_sysbus_host_stall_ratio != 0)
+		IOWRITE64_CR_REGIO(vha->stalling_sysbus_host_stall_ratio,
+							NN_SYS2_SYSBUS_HOST_STALL_RATIO);
+
+	return ret;
+}
+
+static void vha_dev_enable_clocks(struct vha_dev *vha, uint8_t core_mask)
+{
+	uint64_t sys_clks = 0;
+	uint64_t main_clks = 0;
+
+	/* Always AUTO gating when needed */
+	sys_clks = VHA_SYS_CLOCKS_DEFAULT(AUTO);
+	main_clks = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+	/* Enable sys clocks */
+	img_pdump_printf("-- Enable SYS clocks\n");
+	IOWRITE64_CR_PDUMP(sys_clks, SYS_CLK_CTRL0);
+	/* Dummy SYS clocks status read*/
+	sys_clks = IOREAD64_CR_PDUMP(SYS_CLK_STATUS0);
+	/* Enable main clocks on all cores */
+	img_pdump_printf("-- Enable MAIN clocks on cores\n");
+	IOWRITE64_CR_PDUMP((uint64_t)core_mask, CORE_CTRL_INDIRECT);
+	IOWRITE64_CR_PDUMP(main_clks, CLK_CTRL0);
+}
+
+static int vha_dev_disable_clocks(struct vha_dev *vha, uint8_t core_mask, bool sys_release)
+{
+	uint64_t sys_clks = 0;
+	uint8_t id;
+	int ret = 0;
+
+	if (sys_release) {
+		/* Number of WMs equal to number of cores */
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			VHA_LOCK_WM();
+			VHA_SELECT_WM(id);
+			/* Check WM is idle, handle parity */
+			img_pdump_printf("-- Wait for WM%d IDLE state\n", id);
+			ret = IOPOLL64_CR_PDUMP_PARITY(VHA_CR_WM_STATUS_STATE_IDLE, 100, 1000,
+					(uint64_t)VHA_CR_WM_STATUS_STATE_MASK,
+					WM_STATUS);
+			VHA_UNLOCK_WM();
+			if(ret) {
+				struct vha_hw_sched_info sched_info = {
+						.wm_id = id,
+						.core_mask = 0
+				};
+				dev_err(vha->dev, "Performing wm%d reset due to HW error detection.", id);
+				vha_wm_reset(vha, &sched_info);
+				dev_err(vha->dev, "%s Waiting for WM%d IDLE state failed!",
+						__func__, id);
+				return ret;
+			}
+		}
+	}
+	vha_wm_release_cores(vha, core_mask, true);
+
+	img_pdump_printf("-- Address cores\n");
+	IOWRITE64_CR_PDUMP((uint64_t)core_mask, CORE_CTRL_INDIRECT);
+
+	/* If auto gating was turned on, wait for clocks GATED state on all cores */
+	img_pdump_printf("-- Wait for clocks IDLE state\n");
+	ret = IOPOLL64_CR_PDUMP(0, 100, 1000,
+			VHA_CR_CLK_STATUS0_MASKFULL,
+			CLK_STATUS0);
+	if(ret) {
+		dev_err(vha->dev, "%s Waiting for clocks IDLE state failed!\n",
+				__func__);
+		return ret;
+	}
+
+	if (sys_release) {
+		/* Wait for MMU,CCM,RDI,XBAR IDLE state */
+		img_pdump_printf("-- Wait for memory bus interface IDLE state\n");
+		ret = IOPOLL64_CR_PDUMP(VHA_CR_SLC_IDLE_MASKFULL, 1000, 1000,
+				VHA_CR_SLC_IDLE_MASKFULL,
+				SLC_IDLE);
+		if(ret) {
+			dev_err(vha->dev, "%s Waiting for memory bus interface IDLE state failed\n",
+					__func__);
+			return ret;
+		}
+	}
+	/* Finally disable core clocks */
+	img_pdump_printf("-- Disable MAIN clocks\n");
+	IOWRITE64_CR_PDUMP(0, CLK_CTRL0); /* main */
+
+	if (sys_release) {
+		/* Finally disable sys clocks */
+		img_pdump_printf("-- Disable SYS clocks (except REGBANK)\n");
+		sys_clks = VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
+		IOWRITE64_CR_PDUMP(sys_clks, SYS_CLK_CTRL0); /* sys */
+	}
+
+	return ret;
+}
+
+void vha_update_utilization(struct vha_dev *vha)
+{
+	uint8_t i;
+	uint64_t tmp;
+	uint64_t core_total_proc_us = 0ULL;
+	for (i = 0; i < vha->hw_props.num_cnn_core_devs; i++) {
+		/* Calculate core utilization. */
+		tmp = vha->stats.core_stats[i].total_proc_us;
+		do_div(tmp, vha->stats.uptime_ms);
+		vha->stats.core_stats[i].utilization = tmp;
+		/* Calculate WM utilization. */
+		tmp = vha->stats.wm_stats[i].total_proc_us;
+		do_div(tmp, vha->stats.uptime_ms);
+		vha->stats.wm_stats[i].utilization = tmp;
+		/* Calculate cumulative core processing time. */
+		core_total_proc_us += vha->stats.core_stats[i].total_proc_us;
+	}
+	/* Calculate cluster utilization. */
+	tmp = core_total_proc_us;
+	do_div(tmp, (vha->stats.uptime_ms * vha->hw_props.num_cnn_core_devs));
+	vha->stats.cnn_utilization = tmp;
+}
+
+#ifdef VHA_EVENT_INJECT
+/*
+ * Inject EVENT_STATUS bits, requested by respective debugfs nodes, to
+ * the registers defined by the currently handled WM.
+ */
+static inline void __inject_event_regs(struct vha_dev* vha, struct vha_mc_irq_status* irq_status)
+{
+	int id, wm_id;
+	u32 mask, wm_mask;
+	uint64_t vha_cr_sys_event = vha->injection.vha_cr_sys_event & VHA_CR_SYS_EVENT_INJECT_MASKFULL;
+	uint64_t vha_cr_wm_event = vha->injection.vha_cr_wm_event & VHA_CR_WM_EVENT_INJECT_MASKFULL;
+	uint64_t vha_cr_core_event = vha->injection.vha_cr_core_event & VHA_CR_CORE_EVENT_INJECT_MASKFULL;
+	uint64_t vha_cr_interconnect_event = vha->injection.vha_cr_interconnect_event & VHA_CR_INTERCONNECT_EVENT_INJECT_MASKFULL;
+
+
+	if(!__EVENT_INJECT())
+		return;
+
+	if(vha_cr_sys_event) {
+		IOWRITE64_CR_REGIO(vha_cr_sys_event, SYS_EVENT_INJECT);
+	}
+
+	/* handle WM event injection */
+	wm_mask = VHA_CR_GETBITS(HOST_EVENT_SOURCE, WM, irq_status->event_source);
+	if(!wm_mask)
+	  return;
+	spin_lock_irqsave(&vha->irq_lock, vha->irq_flags);
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		if(~wm_mask & (1 << wm_id))
+			continue; /* inject only to currently handled WM's */
+		if(vha_cr_wm_event) {
+			VHA_SELECT_WM(wm_id);
+			IOWRITE64_CR_REGIO(vha_cr_wm_event, WM_EVENT_INJECT);
+		}
+		/* now handle WM's core and ic injections . IC sources are the same as core sources */
+		if(!vha_cr_core_event && !vha_cr_interconnect_event)
+			continue;
+		/* get cores handled by specific WM, inject errors only to those cores */
+		mask = vha_wm_get_cores(vha, wm_id);
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if(~mask & (1 << id))
+				continue; /* inject only to currently handled CORE's */
+			if(vha_cr_core_event) {
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT,
+																	MASK, (1 << id)),
+																	CORE_CTRL_INDIRECT);
+				IOWRITE64_CR_REGIO(vha_cr_core_event, CORE_EVENT_INJECT);
+			}
+			if(vha_cr_interconnect_event) {
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(IC_CORE_INDIRECT,
+																	MASK, (1 << id)),
+																	IC_CORE_INDIRECT);
+				IOWRITE64_CR_REGIO(vha_cr_interconnect_event, INTERCONNECT_EVENT_INJECT);
+			}
+		}
+	}
+	/* read new injected event sources */
+	irq_status->event_source |= IOREAD64_CR_REGIO(HOST_EVENT_SOURCE);
+	spin_unlock_irqrestore(&vha->irq_lock, vha->irq_flags);
+}
+
+static inline void __inject_parity_err(struct vha_dev* vha, struct vha_mc_irq_status* irq_status) {
+	int id, wm_id;
+	u32 mask, wm_mask;
+
+	if(!__EVENT_INJECT())
+		return;
+
+	if (VHA_REG_GET_PARITY_ERROR(vha->injection.vha_cr_sys_event)) {
+		VHA_REG_SET_PARITY_ERROR(irq_status->sys_events);
+		irq_status->event_source |= VHA_CR_HOST_EVENT_SOURCE_SYS_EN;
+	}
+
+	wm_mask = VHA_CR_GETBITS(HOST_EVENT_SOURCE, WM, irq_status->event_source);
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		if(~wm_mask & (1 << wm_id))
+			continue; /* inject only to currently handled WM's */
+		if (VHA_REG_GET_PARITY_ERROR(vha->injection.vha_cr_wm_event)) {
+			VHA_REG_SET_PARITY_ERROR(irq_status->wm_events[wm_id]);
+			irq_status->event_source |= 1 << (wm_id + VHA_CR_HOST_EVENT_SOURCE_WM_SHIFT);
+		}
+
+		mask = vha_wm_get_cores(vha, wm_id);
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if(~mask & (1 << id))
+				continue; /* inject only to currently handled CORE's */
+			if (VHA_REG_GET_PARITY_ERROR(vha->injection.vha_cr_interconnect_event)) {
+				VHA_REG_SET_PARITY_ERROR(irq_status->ic_events[id]);
+				irq_status->event_source |= 1 << (id + VHA_CR_HOST_EVENT_SOURCE_IC_SHIFT);
+			}
+		}
+	}
+
+}
+#endif
+
+/* Top half */
+irqreturn_t vha_handle_irq(struct device *dev)
+{
+	struct vha_dev *vha = vha_dev_get_drvdata(dev);
+	irqreturn_t ret = IRQ_NONE;
+	struct vha_mc_irq_status irq_status = {0};
+	uint32_t multi_src_mask = 0;
+	uint8_t id;
+	struct TIMESPEC hw_proc_end[VHA_NUM_CORES] = {{0}};
+	bool hw_proc_end_recorded[VHA_NUM_CORES] = {0};
+
+#define CHECK_FOR_DEAD_HW(r) \
+	if (r == VHA_DEAD_HW || r == ~0) { \
+		WARN_ONCE(1, "Hardware is dead!"); \
+		if (!in_interrupt()) \
+			mutex_unlock(&vha->lock); \
+		return IRQ_NONE; \
+	}
+
+	/* ML: This thing is a complete mess in regdef file. The field is present
+	 * in most of these EVENT regs, but its definition varies a lot, so no idea
+	 * what it is actually meant to mean.
+	 */
+#define CHECK_FOR_LOGIC_ERROR(s, r) \
+	if (r & VHA_##s##_EVENT_TYPE(LOGIC_ERROR)) { \
+		WARN_ONCE(1, "Parity error detected!"); \
+		if (!in_interrupt()) \
+			mutex_unlock(&vha->lock); \
+		return IRQ_NONE; \
+	}
+
+	if (!vha)
+		return IRQ_NONE;
+
+	/* Note: Top half can be called from the platform worker thread */
+	if (!in_interrupt())
+		mutex_lock(&vha->lock);
+
+	irq_status.event_source = IOREAD64_CR_REGIO(HOST_EVENT_SOURCE);
+	/* On fpga platform it is possible to get a spurious interrupt when the hw died.
+	 * Do not proceed, just throw a warning. */
+	CHECK_FOR_DEAD_HW(irq_status.event_source);
+
+#ifdef VHA_EVENT_INJECT
+	__inject_event_regs(vha, &irq_status);
+#endif
+
+	if (VHA_CR_GETBITS(HOST_EVENT_SOURCE, SYS, irq_status.event_source)) {
+		/* Read events. */
+		irq_status.sys_events = IOREAD64_CR_REGIO(SYS_EVENT_STATUS);
+		/* Just in case check for dead hw. */
+		CHECK_FOR_DEAD_HW(irq_status.sys_events);
+#ifdef VHA_SCF
+		if (vha->hw_props.supported.parity && !vha->parity_disable) {
+			uint32_t i;
+			for (i = 0; i < VHA_PARITY_READ_COUNT_MAX; i++) {
+				/* Finish if bit parity is ok */
+				if (!img_mem_calc_parity(irq_status.sys_events))
+					break;
+				/* Otherwise re-read the reg. */
+				irq_status.sys_events = IOREAD64_CR_REGIO(SYS_EVENT_STATUS);
+			}
+			/* Raise an error if maximum re-read count is reached. */
+			if (i == VHA_PARITY_READ_COUNT_MAX) {
+				dev_err(dev, "SYS_EVENT_STATUS register parity error!\n");
+				/* Use the real event to indicate the error */
+				VHA_REG_SET_PARITY_ERROR(irq_status.sys_events);
+			}
+		}
+#endif
+		/* Check for hw logic error. */
+		/* ML: ??? */
+		//CHECK_FOR_LOGIC_ERROR(SYS, irq_status.sys_events);
+
+		/* wake thread even if only parity error is set. Erroneous event may occur that only
+		 * parity is set among other bits
+		 */
+		if (irq_status.sys_events & (VHA_SYS_EVENTS_DEFAULT | VHA_REG_PARITY_ERROR_EN)) {
+			/* Clear interrupts (best not to write pdump in ISR). */
+			IOWRITE64_CR_REGIO(irq_status.sys_events & VHA_SYS_EVENTS_DEFAULT,
+								SYS_EVENT_CLEAR);
+			ret = IRQ_WAKE_THREAD;
+		}
+	}
+	/* Read WM event source mask. */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, WM,
+												irq_status.event_source);
+	if (multi_src_mask) {
+		spin_lock_irqsave(&vha->irq_lock, vha->irq_flags);
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (multi_src_mask & (1 << id)) {
+				/* Select WM to read events from. */
+				VHA_SELECT_WM(id);
+				/* Read events. */
+				irq_status.wm_events[id] = IOREAD64_CR_REGIO(WM_EVENT_STATUS);
+				/* Just in case check for dead hw. */
+				CHECK_FOR_DEAD_HW(irq_status.wm_events[id]);
+
+				/* Record hw processing end timestamps */
+				GETNSTIMEOFDAY(&hw_proc_end[id]);
+				hw_proc_end_recorded[id] = true;
+#ifdef VHA_SCF
+				if (vha->hw_props.supported.parity && !vha->parity_disable) {
+					uint32_t i;
+					for (i = 0; i < VHA_PARITY_READ_COUNT_MAX; i++) {
+						/* Finish if parity is ok */
+						if (!img_mem_calc_parity(irq_status.wm_events[id]))
+							break;
+						/* Otherwise re-read the reg. */
+						irq_status.wm_events[id] = IOREAD64_CR_REGIO(WM_EVENT_STATUS);
+					}
+					/* Raise an error if maximum re-read count is reached. */
+					if (i == VHA_PARITY_READ_COUNT_MAX) {
+						dev_err(dev, "WM_EVENT_STATUS[%u] register parity error!\n", id);
+						/* Use the real event to indicate the error */
+						VHA_REG_SET_PARITY_ERROR(irq_status.wm_events[id]);
+					}
+				}
+#endif
+				{
+					/* Post check for AXI bus errors */
+					uint64_t ace_status = IOREAD64(vha->reg_base, VHA_CR_ACE_STATUS);
+					if (ace_status) {
+						dev_err(vha->dev, "AXI bus protocol error: %#llx\n",
+									ace_status);
+						/* Use AXI error event to indicate that */
+						irq_status.event_source |= VHA_CR_SETBITS(HOST_EVENT_SOURCE, SYS, 1);
+						irq_status.sys_events |=  VHA_CR_SETBITS(SYS_EVENT_TYPE, AXI_ERROR, 1);
+					}
+				}
+
+				/* wake thread even if only parity error is set. Erroneous event may occur that only
+				 * parity is set among other bits
+				 */
+				if (irq_status.wm_events[id] & (VHA_WM_EVENTS_DEFAULT | VHA_REG_PARITY_ERROR_EN)) {
+					/* Events can't be cleared, disable to avoid interrupt storm */
+					IOWRITE64_CR_REGIO(0, WM_EVENT_ENABLE);
+					ret = IRQ_WAKE_THREAD;
+				}
+			}
+		}
+		spin_unlock_irqrestore(&vha->irq_lock, vha->irq_flags);
+	}
+	/* Read CORE event source mask. */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, CORE,
+												irq_status.event_source);
+	/* Note: Direct (Host) core event is only used for frequency measurement,
+	 * Indirect (WM) core events are read in bottom handler */
+	if (multi_src_mask) {
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (multi_src_mask & (1 << id)) {
+				/* Select core to read events from. */
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT,
+													MASK, (1 << id)),
+													CORE_CTRL_INDIRECT);
+				/* Read events. */
+				/* In normal operation CORE events are routed to WM,
+				 * therefore there's no need to handle parity here
+				 */
+				irq_status.core_events[id] = IOREAD64_CR_REGIO(CORE_EVENT_HOST_STATUS);
+				/* Just in case check for dead hw. */
+				CHECK_FOR_DEAD_HW(irq_status.core_events[id]);
+				/* Check for hw logic error. */
+				/* ML: ??? */
+				//CHECK_FOR_LOGIC_ERROR(CORE, irq_status.core_events[id]);
+				if (irq_status.core_events[id] & VHA_CORE_EVENTS_DEFAULT) {
+					/* Clear interrupts (best not to write pdump in ISR). */
+					IOWRITE64_CR_REGIO(irq_status.core_events[id] & VHA_CORE_EVENTS_DEFAULT,
+										CORE_EVENT_HOST_CLEAR);
+					/* Record hw processing end timestamps */
+					/*
+					 *       for regular workloads. This stat update is used
+					 *       only for cluster clock measurement, so it is
+					 *       executed only once after module is loaded. */
+					GETNSTIMEOFDAY(&hw_proc_end[id]);
+					hw_proc_end_recorded[id] = true;
+					ret = IRQ_WAKE_THREAD;
+				}
+			}
+		}
+	}
+	/* Read IC event source mask. */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, IC,
+												irq_status.event_source);
+	/* Indirect (WM) interconnect events are read in bottom handler */
+	if (multi_src_mask) {
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (ret || (multi_src_mask & (1 << id))) {
+				/* Select IC to read events from. */
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(IC_CORE_INDIRECT,
+													MASK, (1 << id)),
+													IC_CORE_INDIRECT);
+				/* Read events. */
+				/* In normal operation IC events are routed to WM,
+				 * therefore there's no need to handle parity here
+				 */
+				irq_status.ic_events[id] = IOREAD64_CR_REGIO(INTERCONNECT_EVENT_HOST_STATUS);
+#ifdef VHA_SCF
+				if (vha->hw_props.supported.parity && !vha->parity_disable) {
+					uint32_t i;
+					for (i = 0; i < VHA_PARITY_READ_COUNT_MAX; i++) {
+						/* Finish if parity is ok */
+						if (!img_mem_calc_parity(irq_status.ic_events[id]))
+							break;
+						/* Otherwise re-read the reg. */
+						irq_status.ic_events[id] = IOREAD64_CR_REGIO(INTERCONNECT_EVENT_HOST_STATUS);
+					}
+					/* Raise an error if maximum re-read count is reached. */
+					if (i == VHA_PARITY_READ_COUNT_MAX) {
+						dev_err(dev, "WM_EVENT_STATUS[%u] register parity error!\n", id);
+						/* Use the real event to indicate the error */
+						VHA_REG_SET_PARITY_ERROR(irq_status.ic_events[id]);
+					}
+				}
+#endif
+				/* Just in case check for dead hw. */
+				CHECK_FOR_DEAD_HW(irq_status.ic_events[id]);
+				/* Check for hw logic error. */
+				/* ML: ??? */
+				//CHECK_FOR_LOGIC_ERROR(IC, irq_status.ic_events[id]);
+				if (multi_src_mask && (irq_status.ic_events[id] & (VHA_IC_EVENTS_DEFAULT | VHA_REG_PARITY_ERROR_EN))) {
+					/* Clear interrupts (best not to write pdump in ISR). */
+					IOWRITE64_CR_REGIO(irq_status.ic_events[id] & VHA_IC_EVENTS_DEFAULT,
+										INTERCONNECT_EVENT_HOST_CLEAR);
+					ret = IRQ_WAKE_THREAD;
+				}
+			}
+		}
+	}
+
+#ifdef VHA_EVENT_INJECT
+	__inject_parity_err(vha, &irq_status);
+#endif
+
+	if (!in_interrupt())
+		mutex_unlock(&vha->lock);
+
+	if (ret == IRQ_WAKE_THREAD) {
+		spin_lock(&vha->irq_lock);
+		/* Store all the event info. */
+		vha->irq_status.event_source |= irq_status.event_source;
+		vha->irq_status.sys_events   |= irq_status.sys_events;
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			vha->irq_status.wm_events[id] |= irq_status.wm_events[id];
+			if (hw_proc_end_recorded[id]) {
+				/* Record hw processing end timestamps */
+				VHA_WM_STAT_SHIFT_PROC_END(vha, id);
+				VHA_SET_WM_STAT(vha, hw_proc_end, id, hw_proc_end[id]);
+			}
+		}
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			vha->irq_status.core_events[id] |= irq_status.core_events[id];
+			vha->irq_status.ic_events[id]   |= irq_status.ic_events[id];
+		}
+		spin_unlock(&vha->irq_lock);
+	}
+
+#undef CHECK_FOR_DEAD_HW
+#undef CHECK_FOR_LOGIC_ERROR
+	if (ret) {
+		dev_dbg(dev, "IRQ EVT:0x%08llx SYS:0x%08llx\n", irq_status.event_source, irq_status.sys_events);
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
+			if (irq_status.wm_events[id] || irq_status.core_events[id] || irq_status.ic_events[id])
+				dev_dbg(dev, "WM%d:0x%08llx CORE%d:0x%08llx IC%d:0x%08llx\n",
+						id, irq_status.wm_events[id], id, irq_status.core_events[id],
+				id, irq_status.ic_events[id]);
+	}
+
+	return ret;
+}
+
+static void vha_do_queued_cmd(struct vha_dev *vha, uint8_t wm_id)
+{
+	struct vha_cmd *cmd, *pend;
+
+	cmd = vha->queuedcmd[wm_id].cmd;
+
+#if defined(DEBUG)
+	{
+		char queued_txt[24] = "none";
+		char pending_txt[24] = "none";
+		if (cmd)
+			snprintf(queued_txt, 24, "0x%08x/%u",
+					cmd->user_cmd.cmd_id, cmd->session->id);
+		if (vha->pendcmd[wm_id].cmd)
+			snprintf(pending_txt, 24, "0x%08x/%u",
+					vha->pendcmd[wm_id].cmd->user_cmd.cmd_id,
+					vha->pendcmd[wm_id].cmd->session->id);
+		dev_dbg(vha->dev,
+				"%s: WM%u pending %s, queued %s\n",
+				__func__, wm_id, pending_txt, queued_txt);
+	}
+#endif
+
+	if (!cmd || (cmd &&
+				 ((vha->low_latency == VHA_LL_DISABLED ||
+					 vha->low_latency == VHA_LL_SELF_KICK) ||
+					!cmd->queued))) {
+		dev_dbg(vha->dev, "%s: skipping!\n", __func__);
+		return;
+	}
+
+	/* store actual pending command as it will be modified */
+	pend = vha->pendcmd[wm_id].cmd;
+
+	/* at this point we should be able to process the cmd */
+	vha_do_cnn_cmd(cmd);
+
+	/* restore pending */
+	vha->pendcmd[wm_id].cmd = pend;
+}
+
+/*
+ * Roll back commands for a particular WM.
+ */
+static bool vha_rollback_wm_cmds(struct vha_dev *vha, uint8_t wm_id,
+		bool free_res)
+{
+	bool processing = false;
+#if defined(DEBUG)
+	char queued_txt[24] = "none";
+	char pending_txt[24] = "none";
+#endif
+	/* Not processed commands are still on the pending list
+	 * of each session, so just mark the hw pending lists as empty */
+	if (vha->pendcmd[wm_id].cmd) {
+#if defined(DEBUG)
+		snprintf(pending_txt, 24, "0x%08x/%u",
+				vha->pendcmd[wm_id].cmd->user_cmd.cmd_id,
+				vha->pendcmd[wm_id].cmd->session->id);
+#endif
+		if (free_res) {
+			/* Free command resources. */
+			vha_wm_release_cores(vha,
+					vha->pendcmd[wm_id].cmd->hw_sched_info.core_mask, false);
+			vha_dev_free_cmd_res(vha, vha->pendcmd[wm_id].cmd, false);
+			vha->pri_q_counters[vha->pendcmd[wm_id].cmd->user_cmd.priority]++;
+		}
+		VHA_INC_WL_STAT(vha, kicks_aborted, vha->pendcmd[wm_id].cmd);
+		vha->stats.cnn_kicks_aborted++;
+		vha->pendcmd[wm_id].cmd->in_hw = false;
+		vha->pendcmd[wm_id].cmd->queued = false;
+		vha->pendcmd[wm_id].cmd->rolled_back = true;
+		vha->pendcmd[wm_id].cmd = NULL;
+		processing = true;
+	}
+	/* low_latency ...*/
+	if (vha->queuedcmd[wm_id].cmd) {
+#if defined(DEBUG)
+		snprintf(queued_txt, 24, "0x%08x/%u",
+				vha->queuedcmd[wm_id].cmd->user_cmd.cmd_id,
+				vha->queuedcmd[wm_id].cmd->session->id);
+#endif
+		/* Free command resources. */
+		vha_wm_release_cores(vha,
+				vha->queuedcmd[wm_id].cmd->hw_sched_info.core_mask, false);
+		vha_dev_free_cmd_res(vha, vha->queuedcmd[wm_id].cmd, false);
+		if (vha->low_latency == VHA_LL_SELF_KICK) {
+			VHA_INC_WL_STAT(vha, kicks_aborted, vha->queuedcmd[wm_id].cmd);
+			vha->stats.cnn_kicks_aborted++;
+			vha->pri_q_counters[vha->queuedcmd[wm_id].cmd->user_cmd.priority]++;
+		}
+		vha->queuedcmd[wm_id].cmd->in_hw = false;
+		vha->queuedcmd[wm_id].cmd->queued = false;
+		vha->queuedcmd[wm_id].cmd->rolled_back = true;
+		vha->queuedcmd[wm_id].cmd = NULL;
+	}
+#if defined(DEBUG)
+	dev_dbg(vha->dev, "%s: WM%u pending %s, queued %s\n",
+			__func__, wm_id, pending_txt, queued_txt);
+#endif
+
+	return processing;
+}
+
+bool vha_rollback_cmds(struct vha_dev *vha)
+{
+	uint32_t wm_id;
+	bool processing = false;
+
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		bool wm_processing = vha_rollback_wm_cmds(vha, wm_id, true);
+		processing = processing || wm_processing;
+	}
+
+	return processing;
+}
+
+static void vha_stop_processing(struct vha_dev *vha)
+{
+	uint32_t wm_id;
+
+	VHA_LOCK_WM();
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++)
+		if (vha->pendcmd[wm_id].cmd != NULL) {
+			uint64_t wm_mask = VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM, VHA_WM_ID_TO_MASK(wm_id));
+			vha_wm_reset(vha, &vha->pendcmd[wm_id].cmd->hw_sched_info);
+			VHA_SELECT_WM(wm_id);
+			/* Remove WM related interrupt info if it happens to be set. */
+			if (vha->irq_status.event_source & wm_mask)
+			{
+				/* Unset the WM related source bit. */
+				vha->irq_status.event_source &= ~wm_mask;
+				/* Clear all WM related events. */
+				IOWRITE64_CR_REGIO(vha->irq_status.wm_events[wm_id] & VHA_WM_EVENTS_DEFAULT,
+									WM_EVENT_CLEAR);
+				vha->irq_status.wm_events[wm_id] = 0ULL;
+			}
+		}
+	VHA_UNLOCK_WM();
+}
+
+int vha_dev_suspend_work(struct vha_dev *vha)
+{
+	bool processing = false;
+	int ret;
+
+	/* Check if anything is being processed right now. */
+	vha_stop_processing(vha);
+	/* Rollback commands after hw is stopped. */
+	processing = vha_rollback_cmds(vha);
+	/* Forcing hardware disable. */
+	ret = vha_dev_stop(vha, processing);
+
+	return ret;
+}
+
+/*
+ * Handles the command (of given cmd_idx) already processed by the hw.
+ */
+static bool vha_handle_cmd(struct vha_dev *vha, uint8_t wm_id, uint64_t status,
+		int err, uint64_t rsp_err_flags)
+{
+	struct vha_cmd *cmd = NULL;
+
+	if (wm_id >= vha->hw_props.num_cnn_core_devs)
+		return false;
+
+	cmd = vha->pendcmd[wm_id].cmd;
+	if (unlikely(!cmd)) {
+		dev_dbg(vha->dev, "No command. Probably it has been aborted\n");
+		return false;
+	}
+
+	vha_cnn_cmd_completed(cmd, status, err, rsp_err_flags);
+
+	if (status) {
+		/* Rollback any queued command ... */
+		vha_rollback_wm_cmds(vha, wm_id, false);
+		/* Notify immediately current command */
+		vha_cmd_notify(cmd);
+
+		return false;
+	}
+
+	if (vha->queuedcmd[wm_id].cmd)
+		vha->pendcmd[wm_id].cmd = vha->queuedcmd[wm_id].cmd;
+	else
+		vha->pendcmd[wm_id].cmd = NULL;
+
+	vha->queuedcmd[wm_id].cmd = NULL;
+	if (vha->pendcmd[wm_id].cmd)
+		dev_dbg(vha->dev, "%s: WM%u 0x%08x/%u -> new pending 0x%08x/%u\n",
+				__func__, wm_id, cmd->user_cmd.cmd_id, cmd->session->id,
+				vha->pendcmd[wm_id].cmd->user_cmd.cmd_id,
+				vha->pendcmd[wm_id].cmd->session->id);
+	else
+		dev_dbg(vha->dev, "%s: WM%u 0x%08x/%u -> no new pending\n",
+				__func__, wm_id, cmd->user_cmd.cmd_id, cmd->session->id);
+
+	vha_cmd_notify(cmd);
+
+	return true;
+}
+
+void vha_dev_update_per_core_kicks(uint8_t core_mask, uint32_t *kicks_array)
+{
+	while (core_mask != 0) {
+		uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
+		core_mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
+		kicks_array[curr_core_id]++;
+	}
+}
+
+static int vha_report_wm_rsp_failure(struct vha_dev *vha, uint8_t wm_id,
+		uint64_t wm_rsp_status, uint64_t *core_status, uint64_t *ic_status,
+		enum vha_reset_type *reset_type, uint64_t *error_flags)
+{
+	uint8_t err_code = VHA_WM_RESPONSE_GET_ERROR_CODE(wm_rsp_status);
+	int cmdid = -1;
+	int sesid = -1;
+	uint32_t i = 0;
+	int err = -EIO;
+
+	if (vha->pendcmd[wm_id].cmd) {
+		cmdid = vha->pendcmd[wm_id].cmd->user_cmd.cmd_id;
+		sesid = vha->pendcmd[wm_id].cmd->session->id;
+	}
+	if (vha_observers.error)
+		vha_observers.error(vha->id, sesid, cmdid, wm_rsp_status);
+
+	if (VHA_REG_GET_PARITY_ERROR(wm_rsp_status)) {
+		dev_err(vha->dev, " WM%u response error: PARITY\n", wm_id);
+		*reset_type = VHA_RESET_TYPE_WM;
+		*error_flags |= VHA_RSP_ERROR(SW_WM_PARITY_ERROR);
+	} else if (VHA_REG_GET_WL_ID_MISMATCH_ERROR(wm_rsp_status)) {
+		dev_err(vha->dev, " WM%u response error: WL_ID_MISMATCH\n", wm_id);
+		*reset_type = VHA_RESET_TYPE_WM;
+		*error_flags |= VHA_RSP_ERROR(SW_WL_ID_MISMATCH_ERROR);
+	} else if (VHA_REG_GET_CONF_ERROR(wm_rsp_status)) {
+		dev_err(vha->dev, " WM%u response error: CONFIRMATION_WRITES\n", wm_id);
+		*reset_type = VHA_RESET_TYPE_WM;
+		*error_flags |= VHA_RSP_ERROR(SW_CONF_ERROR);
+	} else if (VHA_REG_GET_COMBINED_CRC_ERROR(wm_rsp_status)) {
+		dev_err(vha->dev, " WM%u response error: COMBINED_CRC\n", wm_id);
+		*reset_type = VHA_RESET_TYPE_WM;
+		*error_flags |= VHA_RSP_ERROR(SW_CRC_MISMATCH_ERROR);
+	} else {
+		while (i < ARRAY_SIZE(wm_rsp_err_codes)) {
+			if (wm_rsp_err_codes[i].e == err_code) {
+				uint8_t core_id = VHA_WM_RESPONSE_GET_FAILED_CORE_IDX(wm_rsp_status);
+				/* Store reset type. */
+				*reset_type = wm_rsp_err_codes[i].reset_type;
+				/* Error that caused the Workload Manager to halt*/
+				dev_err(vha->dev, " WM%u error code:%d -> %s, failure on core%u\n",
+						wm_id, err_code, wm_rsp_err_codes[i].s, core_id);
+				*error_flags |=  wm_rsp_err_codes[i].rsp_err;
+				if (core_id < vha->hw_props.num_cnn_core_devs) {
+					i = 0;
+					while (core_err_bits[i].e != 0) {
+						if (core_status[core_id] & core_err_bits[i].b) {
+							dev_err(vha->dev, "         %s\n", core_err_bits[i].s);
+							err = core_err_bits[i].e;
+						}
+						i++;
+					}
+					i = 0;
+					while (ic_err_bits[i].e != 0) {
+						if (ic_status[core_id] & ic_err_bits[i].b) {
+							dev_err(vha->dev, "         %s\n", ic_err_bits[i].s);
+							err = ic_err_bits[i].e;
+						}
+						i++;
+					}
+				} else
+					dev_err(vha->dev, "         invalid FAILED_CORE_ID, should be <%u\n",
+							vha->hw_props.num_cnn_core_devs);
+				goto exit;
+			}
+			i++;
+		}
+
+		dev_err(vha->dev, " invalid WM ERROR_CODE: %u\n", err_code);
+	}
+
+exit:
+	return err;
+}
+
+static void vha_handle_sys_failure(struct vha_dev *vha, uint64_t status, int err, uint64_t rsp_err_flags)
+{
+	int cmdid = -1;
+	int sesid = -1;
+	uint32_t wm_id;
+	struct vha_cmd *cmd = NULL;
+
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		cmd = vha->pendcmd[wm_id].cmd;
+		if (cmd) {
+			cmdid = cmd->user_cmd.cmd_id;
+			sesid = cmd->session->id;
+		}
+		if (vha_observers.error)
+			vha_observers.error(vha->id, sesid, cmdid, status);
+		cmdid = -1;
+
+		if (cmd) {
+			/* Update stats. */
+			vha->stats.total_failures++;
+			vha->stats.cnn_kicks_completed++;
+			VHA_INC_WL_STAT(vha, kicks_completed, cmd);
+			vha_wm_reset(vha, &cmd->hw_sched_info);
+			/* Free command resources. */
+			vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+			vha_dev_free_cmd_res(vha, cmd, true);
+		}
+		/* Move command queue. */
+		vha_do_queued_cmd(vha, wm_id);
+		/* Handle actual command */
+		vha_handle_cmd(vha, wm_id, status, err, rsp_err_flags);
+	}
+}
+
+static void vha_handle_wm_failure(struct vha_dev *vha, uint8_t wm_id,
+		uint64_t status, int err, uint64_t rsp_err_flags)
+{
+	int cmdid = -1;
+	int sesid = -1;
+	struct vha_cmd *cmd = NULL;
+
+	cmd = vha->pendcmd[wm_id].cmd;
+	if (cmd) {
+		cmdid = cmd->user_cmd.cmd_id;
+		sesid = cmd->session->id;
+	}
+	if (vha_observers.error)
+		vha_observers.error(vha->id, sesid, cmdid, status);
+
+	if (cmd) {
+		/* Update stats. */
+		vha->stats.total_failures++;
+		vha->stats.cnn_kicks_completed++;
+		VHA_INC_WL_STAT(vha, kicks_completed, cmd);
+		/* Free command resources. */
+		vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+		vha_dev_free_cmd_res(vha, cmd, true);
+	}
+	/* Move command queue. */
+	vha_do_queued_cmd(vha, wm_id);
+	/* Handle actual command */
+	vha_handle_cmd(vha, wm_id, status, err, rsp_err_flags);
+}
+
+static enum vha_reset_type vha_sys_get_reset_type(struct vha_dev *vha,
+		uint64_t event_mask) {
+	enum vha_reset_type sys_reset_type = VHA_RESET_TYPE_NONE;
+#ifdef VHA_SCF
+	uint64_t sys_err_events = VHA_SYS_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+	uint64_t sys_err_events = VHA_SYS_ERR_EVENTS;
+#endif
+	if (event_mask & sys_err_events) {
+		uint32_t i = 0;
+		while (sys_err_bits[i].e != 0) {
+			if (event_mask & sys_err_bits[i].b) {
+				/* Indicate the highest reset level of all errors. */
+				if (sys_err_bits[i].reset_type > sys_reset_type)
+					sys_reset_type = sys_err_bits[i].reset_type;
+			}
+			i++;
+		}
+	}
+
+	return sys_reset_type;
+}
+
+static void vha_sys_get_wm_reset_types(struct vha_dev *vha, uint64_t event_mask,
+		enum vha_reset_type *wm_reset_types) {
+	uint8_t wm_id;
+	uint8_t pf_errors;
+#ifdef VHA_SCF
+//	uint8_t parity_errors;
+#endif
+
+	/* Check MMU page fault errors. */
+	pf_errors = (uint8_t)VHA_CR_GETBITS(SYS_EVENT_STATUS, MMU_PAGE_FAULT,
+										event_mask);
+	if (pf_errors) {
+		wm_id = 0;
+		while(wm_id < vha->hw_props.num_cnn_core_devs) {
+			if (pf_errors & (1 << wm_id))
+				wm_reset_types[wm_id] = VHA_RESET_TYPE_MMU;
+			else
+				wm_reset_types[wm_id] = VHA_RESET_TYPE_NONE;
+			wm_id++;
+		}
+	}
+#ifdef VHA_SCF
+	/* Check MMU parity errors. */
+	
+//	uint8_t parity_errors = (uint8_t)VHA_CR_GETBITS(SYS_EVENT_STATUS, MMU_PARITY_ERROR,
+//												event_mask);
+//	if (parity_errors) {
+//		wm_id = 0;
+//		while(wm_id < vha->hw_props.num_cnn_core_devs) {
+//			if (parity_errors & (1 << wm_id))
+//				wm_reset_types[wm_id] = VHA_RESET_TYPE_MMU;
+//			else
+//				wm_reset_types[wm_id] = VHA_RESET_TYPE_NONE;
+//			wm_id++;
+//		}
+//	}
+#endif
+}
+
+static enum vha_reset_type vha_wm_get_reset_type(struct vha_dev *vha,
+		uint64_t event_mask) {
+	enum vha_reset_type wm_reset_type = VHA_RESET_TYPE_NONE;
+#ifdef VHA_SCF
+	uint64_t wm_err_events = VHA_WM_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+	uint64_t wm_err_events = VHA_WM_ERR_EVENTS;
+#endif
+	if (event_mask & wm_err_events) {
+		uint32_t i = 0;
+		while (wm_err_bits[i].e != 0) {
+			if (event_mask & wm_err_bits[i].b) {
+				/* Indicate the highest reset level of all errors. */
+				if (wm_err_bits[i].reset_type > wm_reset_type)
+					wm_reset_type = wm_err_bits[i].reset_type;
+			}
+			i++;
+		}
+	}
+
+	return wm_reset_type;
+}
+
+static enum vha_reset_type vha_core_get_reset_type(struct vha_dev *vha,
+		uint64_t event_mask) {
+	enum vha_reset_type core_reset_type = VHA_RESET_TYPE_NONE;
+	uint64_t core_err_events = VHA_CORE_ERR_EVENTS;
+
+	if (event_mask & core_err_events) {
+		uint32_t i = 0;
+		while (core_err_bits[i].e != 0) {
+			if (event_mask & core_err_bits[i].b) {
+				/* Indicate the highest reset level of all errors. */
+				if (core_err_bits[i].reset_type > core_reset_type)
+					core_reset_type = core_err_bits[i].reset_type;
+			}
+			i++;
+		}
+	}
+
+	return core_reset_type;
+}
+
+static enum vha_reset_type vha_ic_get_reset_type(struct vha_dev *vha,
+		uint64_t event_mask) {
+	enum vha_reset_type ic_reset_type = VHA_RESET_TYPE_NONE;
+#ifdef VHA_SCF
+	uint64_t ic_err_events = VHA_IC_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+	uint64_t ic_err_events = VHA_IC_ERR_EVENTS;
+#endif
+
+	if (event_mask & ic_err_events) {
+		uint32_t i = 0;
+		while (ic_err_bits[i].e != 0) {
+			if (event_mask & ic_err_bits[i].b) {
+				/* Indicate the highest reset level of all errors. */
+				if (ic_err_bits[i].reset_type > ic_reset_type)
+					ic_reset_type = ic_err_bits[i].reset_type;
+			}
+			i++;
+		}
+	}
+
+	return ic_reset_type;
+}
+
+static int vha_report_sys_failures(struct vha_dev *vha, uint64_t event_mask, uint64_t *error_flags)
+{
+	int error = 0;
+	uint32_t i;
+	bool print_header = true;
+	uint8_t pf_status;
+
+	/* Print event status in human readable form. */
+	i = 0;
+	while (sys_err_bits[i].e != 0) {
+		if (event_mask & sys_err_bits[i].b) {
+			if (print_header) {
+				dev_err(vha->dev, " SYS event status:\n");
+				print_header = false;
+			}
+			dev_err(vha->dev, "     %s\n", sys_err_bits[i].s);
+			/* Convert from register bits into POSIX errno.
+			 * If multiple errors, then arbitrary errno choice. */
+			error = sys_err_bits[i].e;
+			*error_flags |=  sys_err_bits[i].rsp_err;
+		}
+		i++;
+	}
+
+	if (error) {
+		dev_err(vha->dev, " SYS failure:\n");
+		dev_err(vha->dev, "  SYS_CLK_STATUS0:   0x%016llx\n",
+				IOREAD64_CR_REGIO(SYS_CLK_STATUS0));
+		dev_err(vha->dev, "  SYS_EVENT_STATUS:  0x%016llx\n",
+				event_mask);
+		for (i = 0; i < vha->hw_props.num_cnn_core_devs; i++) {
+			if (vha->active_core_mask & (1 << i)) {
+				/* Select core to read clocks from. */
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT,
+													MASK, (1 << i)),
+													CORE_CTRL_INDIRECT);
+				dev_err(vha->dev, "  CORE%u CLK_STATUS0: 0x%016llx\n",
+						i, IOREAD64_CR_REGIO(CLK_STATUS0));
+			}
+		}
+	}
+
+	if (error == -ETIMEDOUT) {
+		dev_err(vha->dev, "  SLC_STATUS1:       0x%016llx\n",
+				IOREAD64_CR_REGIO(SLC_STATUS1));
+		dev_err(vha->dev, "  SLC_STATUS2:       0x%016llx\n",
+				IOREAD64_CR_REGIO(SLC_STATUS2));
+		dev_err(vha->dev, "  SLC_IDLE:          0x%016llx\n",
+				IOREAD64_CR_REGIO(SLC_IDLE));
+	}
+
+	/* Additionally report MMU PF failure if occurred. */
+	pf_status = (uint8_t)VHA_CR_GETBITS(SYS_EVENT_STATUS, MMU_PAGE_FAULT,
+										event_mask);
+	if (pf_status) {
+		/* dump mmu status */
+		vha_mmu_status(vha, pf_status);
+	}
+
+	return error;
+}
+
+static int vha_report_wm_failures(struct vha_dev *vha, uint8_t wm_id, uint64_t event_mask, uint64_t *error_flags)
+{
+	int error = 0;
+	uint32_t i;
+	bool print_header = true;
+
+	/* Print event status in human readable form. */
+	i = 0;
+	print_header = true;
+	while (wm_err_bits[i].e != 0) {
+		if (event_mask & wm_err_bits[i].b) {
+			if (print_header) {
+				dev_err(vha->dev, " WM%u event status:\n", wm_id);
+				print_header = false;
+			}
+			dev_err(vha->dev, "     %s\n", wm_err_bits[i].s);
+			/* Convert from register bits into POSIX errno.
+			 * If multiple errors, then arbitrary errno choice. */
+			error = wm_err_bits[i].e;
+			*error_flags |=  wm_err_bits[i].rsp_err;
+		}
+		i++;
+	}
+
+	if (error == -ETIMEDOUT) {
+		vha_wm_status(vha, wm_id, vha_wm_get_cores(vha, wm_id));
+	}
+	return error;
+}
+
+static int vha_report_core_failures(struct vha_dev *vha, uint8_t core_id, uint64_t event_mask, uint64_t *error_flags)
+{
+	int error = 0;
+	uint32_t i;
+	bool print_header = true;
+
+	/* Print event status in human readable form. */
+	i = 0;
+	print_header = true;
+	while (core_err_bits[i].e != 0) {
+		if (event_mask & core_err_bits[i].b) {
+			if (print_header) {
+				dev_err(vha->dev, " Core %u event status:\n", core_id);
+				print_header = false;
+			}
+			dev_err(vha->dev, "     %s\n", core_err_bits[i].s);
+			/* Convert from register bits into POSIX errno.
+			 * If multiple errors, then arbitrary errno choice. */
+			error = core_err_bits[i].e;
+			*error_flags |=  core_err_bits[i].rsp_err;
+		}
+		i++;
+	}
+
+	return error;
+}
+
+static int vha_report_ic_failures(struct vha_dev *vha, uint8_t core_id, uint64_t event_mask, uint64_t *error_flags)
+{
+	int error = 0;
+	uint32_t i;
+	bool print_header = true;
+
+	/* Print event status in human readable form. */
+	i = 0;
+	print_header = true;
+	while (ic_err_bits[i].e != 0) {
+		if (event_mask & ic_err_bits[i].b) {
+			if (print_header) {
+				dev_err(vha->dev, " IC %u event status:\n", core_id);
+				print_header = false;
+			}
+			dev_err(vha->dev, "     %s\n", ic_err_bits[i].s);
+			/* Convert from register bits into POSIX errno.
+			 * If multiple errors, then arbitrary errno choice. */
+			error = ic_err_bits[i].e;
+			*error_flags |=  ic_err_bits[i].rsp_err;
+		}
+		i++;
+	}
+
+	return error;
+}
+
+static uint8_t vha_events_process_errors(struct vha_dev *vha,
+		struct vha_mc_irq_status *irq_status, bool *full_reset,
+		bool *process_sys_events, uint64_t *error_flags) {
+
+	int error = 0;
+	int wm_error = 0;
+	int core_error = 0;
+	int ic_error = 0;
+	uint8_t wm_process_mask = 0;
+	uint8_t wm_source_mask = 0;
+	uint8_t wm_id;
+	enum vha_reset_type reset_type = VHA_RESET_TYPE_NONE;
+	enum vha_reset_type wm_reset_type = VHA_RESET_TYPE_NONE;
+	enum vha_reset_type wm_reset_types[VHA_NUM_CORES] = {0};
+	uint64_t sys_err_status = 0;
+	uint64_t wm_err_status_full_reset = 0;
+	uint64_t wm_err_statuses[VHA_NUM_CORES] = {0};
+	enum vha_reset_type core_reset_type = 0;
+	enum vha_reset_type ic_reset_type = 0;
+	uint8_t core_id;
+
+#define COMBINE_SYS_WM_STATUS(s, w) \
+	(((w & ~((uint64_t)VHA_WM_ERR_EVENTS)) | s) | \
+	 ((w & ((uint64_t)VHA_WM_ERR_EVENTS)) << 32))
+#define INSERT_WM_ERROR(s, e) \
+	((s | ((uint64_t)e)) << 32)
+
+	/* Assume no full reset. */
+	*full_reset = false;
+	/* Assume no SYS events. */
+	*process_sys_events = false;
+
+	/* Process SYS events. */
+	if (VHA_CR_GETBITS(HOST_EVENT_SOURCE, SYS, irq_status->event_source)) {
+#ifdef VHA_SCF
+		uint64_t sys_err_events = VHA_SYS_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+		uint64_t sys_err_events = VHA_SYS_ERR_EVENTS;
+#endif
+		sys_err_status = irq_status->sys_events & sys_err_events;
+		if (sys_err_status) {
+			/* Determine reset types. */
+			reset_type = vha_sys_get_reset_type(vha, irq_status->sys_events);
+			if (reset_type < VHA_RESET_TYPE_FULL) {
+				vha_sys_get_wm_reset_types(vha, irq_status->sys_events, wm_reset_types);
+				wm_id = 0;
+				while(wm_id < vha->hw_props.num_cnn_core_devs) {
+					if (wm_reset_types[wm_id] > VHA_RESET_TYPE_NONE)
+						wm_err_statuses[wm_id] = sys_err_status;
+					wm_id++;
+				}
+			}
+			/* Report SYS errors. */
+			error = vha_report_sys_failures(vha, irq_status->sys_events, error_flags);
+		}
+		/* If no full reset is requested at this stage
+		 * and there are non-error SYS events raised,
+		 * signal them to be processed too. */
+		if ((reset_type < VHA_RESET_TYPE_FULL) &&
+			(irq_status->sys_events & ~sys_err_events))
+			*process_sys_events = true;
+	}
+
+	/* Process WM events. */
+	/* Read WM event source mask. */
+	wm_source_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, WM,
+												irq_status->event_source);
+	if (wm_source_mask)
+		for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++)
+			if (wm_source_mask & (1 << wm_id)) {
+#ifdef VHA_SCF
+				uint64_t wm_err_events = VHA_WM_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+				uint64_t wm_err_events = VHA_WM_ERR_EVENTS;
+#endif
+				uint64_t wm_err_status = irq_status->wm_events[wm_id] & wm_err_events;
+				if (wm_err_status) {
+					/* If no full reset is requested... */
+					if (reset_type < VHA_RESET_TYPE_FULL) {
+						/* Determine reset type for this WM. */
+						wm_reset_type = vha_wm_get_reset_type(
+								vha, irq_status->wm_events[wm_id]);
+						/* If full reset is requested for this WM, just skip
+						 * checking other ones. Otherwise update reset type
+						 * for this WM if needed. */
+						if (wm_reset_type == VHA_RESET_TYPE_FULL) {
+							reset_type = VHA_RESET_TYPE_FULL;
+							wm_err_status_full_reset = wm_err_status;
+						} else if (wm_reset_type > wm_reset_types[wm_id])
+							wm_reset_types[wm_id] = wm_reset_type;
+					}
+					/* Compose accumulated error status. */
+					wm_err_statuses[wm_id] =
+							COMBINE_SYS_WM_STATUS(sys_err_status, wm_err_status);
+					/* Report WM errors. */
+					wm_error = vha_report_wm_failures(vha, wm_id,
+												irq_status->wm_events[wm_id], error_flags);
+					/* If no SYS error reported, get the first WM one. */
+					if (error == 0)
+						error = wm_error;
+				}
+			}
+
+	/* Process core events */
+	for (core_id = 0; core_id < vha->hw_props.num_cnn_core_devs; core_id++)
+		if (irq_status->core_events[core_id] & VHA_CORE_ERR_EVENTS) {
+			/* Determine reset type for this Core. */
+			core_reset_type = vha_core_get_reset_type(vha, irq_status->core_events[core_id]);
+
+			/* We do not reset the core itself, instead, we need to reset
+			   the WM that used it, so let's find it */
+			for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++)
+				if (vha->pendcmd[wm_id].cmd != NULL) {
+					uint8_t  core_mask = vha->pendcmd[wm_id].cmd->hw_sched_info.core_mask;
+					if (core_mask & (1 << core_id)) {
+						/* Override wm reset type */
+						if (core_reset_type == VHA_RESET_TYPE_FULL)
+							reset_type = VHA_RESET_TYPE_FULL;
+						else if (core_reset_type > wm_reset_types[wm_id])
+							wm_reset_types[wm_id] = core_reset_type;
+
+						core_error = vha_report_core_failures(vha, core_id,
+											irq_status->core_events[core_id], error_flags);
+
+						/* If no SYS or WM error reported, get the first Core one. */
+						if (error == 0)
+							error = core_error;
+						/* Add core error to this WM's status. */
+						wm_err_statuses[wm_id] =
+								INSERT_WM_ERROR(wm_err_statuses[wm_id], VHA_REG_WM_CORE_ERROR_EN);
+					}
+				}
+		}
+
+	/* Process IC events */
+	for (core_id = 0; core_id < vha->hw_props.num_cnn_core_devs; core_id++) {
+#ifdef VHA_SCF
+		uint64_t ic_err_events = VHA_IC_ERR_EVENTS | VHA_REG_PARITY_ERROR_EN;
+#else
+		uint64_t ic_err_events = VHA_IC_ERR_EVENTS;
+#endif
+		if (irq_status->ic_events[core_id] & ic_err_events) {
+			/* Determine reset type for this Core. */
+			ic_reset_type = vha_ic_get_reset_type(vha, irq_status->ic_events[core_id]);
+
+			/* We do not reset the core itself, instead, we need to reset
+			   the WM that used it, so let's find it */
+			for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++)
+				if (vha->pendcmd[wm_id].cmd != NULL) {
+					uint8_t  core_mask = vha->pendcmd[wm_id].cmd->hw_sched_info.core_mask;
+					if (core_mask & (1 << core_id)) {
+						/* Override wm reset type */
+						if (ic_reset_type == VHA_RESET_TYPE_FULL)
+							reset_type = VHA_RESET_TYPE_FULL;
+						else if (ic_reset_type > wm_reset_types[wm_id])
+							wm_reset_types[wm_id] = ic_reset_type;
+
+						ic_error = vha_report_ic_failures(vha, core_id,
+											irq_status->ic_events[core_id], error_flags);
+
+						/* If no SYS or WM error reported, get the first Core one. */
+						if (error == 0)
+							error = ic_error;
+						/* Add IC error to this WM's status. */
+						wm_err_statuses[wm_id] =
+								INSERT_WM_ERROR(wm_err_statuses[wm_id], VHA_REG_WM_IC_ERROR_EN);
+					}
+				}
+		}
+	}
+
+
+	/* Perform selective resets. */
+	if (reset_type < VHA_RESET_TYPE_FULL) {
+		int ret;
+		for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+			struct vha_cmd *cmd = vha->pendcmd[wm_id].cmd;
+
+			switch (wm_reset_types[wm_id]) {
+			case VHA_RESET_TYPE_MMU:
+				if (cmd) {
+					/* Invalidate MMU. */
+					ret = vha_mmu_flush_ctx(vha, cmd->session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].hw_id);
+					if(ret) {
+						
+						dev_err(vha->dev, "Error during MMU flush, doing full reset\n");
+						wm_err_status_full_reset = wm_err_statuses[wm_id];
+						reset_type = VHA_RESET_TYPE_FULL;
+						break;
+					}
+				}
+				// fall through
+			case VHA_RESET_TYPE_WM:
+				dev_err(vha->dev, "Performing wm%d reset due to HW error detection.", wm_id);
+				if (cmd)
+					/* Reset WM and assigned cores. */
+					ret = vha_wm_reset(vha, &cmd->hw_sched_info);
+				else {
+					/* Just reset WM. */
+					struct vha_hw_sched_info sched_info = {
+							.wm_id = wm_id,
+							.core_mask = 0
+					};
+					ret = vha_wm_reset(vha, &sched_info);
+				}
+				if(ret) {
+					dev_err(vha->dev, "Error during WM%d reset, doing full reset\n", wm_id);
+					wm_err_status_full_reset = wm_err_statuses[wm_id];
+					reset_type = VHA_RESET_TYPE_FULL;
+					break;
+				}
+				VHA_LOCK_WM();
+				VHA_SELECT_WM(wm_id);
+				/* Clear all WM related events. */
+				IOWRITE64_CR_REGIO(VHA_WM_EVENTS_DEFAULT, WM_EVENT_CLEAR);
+				/* Re-enable WM events here as this WM will not be handled further. */
+				IOWRITE64_CR_REGIO(VHA_WM_EVENTS_DEFAULT, WM_EVENT_ENABLE);
+				VHA_UNLOCK_WM();
+				/* Handle pending command. */
+				vha_handle_wm_failure(vha, wm_id, wm_err_statuses[wm_id], error, *error_flags);
+				break;
+			case VHA_RESET_TYPE_NONE:
+				/* Mark WM source for normal processing if it was signalled. */
+				if (cmd)
+					wm_process_mask |= wm_source_mask & (1 << wm_id);
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	/* check once again, reset_type may have been updated due to failure during reset procedure */
+	if(reset_type == VHA_RESET_TYPE_FULL){
+		/* Handle all pending commands. */
+		vha_handle_sys_failure(vha,
+			COMBINE_SYS_WM_STATUS(sys_err_status, wm_err_status_full_reset), error, *error_flags);
+		/* Full reset is requested anyway, so skip processing further SYS events. */
+		*process_sys_events = false;
+		/* Full reset will be executed outside. Just indicate here
+		 * that it's required.*/
+		*full_reset = true;
+	}
+
+	return wm_process_mask;
+}
+
+/* if vha event register reports WM events, so handle them */
+static void vha_handle_wm_response(struct vha_dev *vha, uint8_t wm_id,
+		uint64_t response_status, uint64_t *core_status_array,
+		uint64_t *ic_status_array, bool *full_reset, uint64_t *error_flags)
+{
+	enum vha_reset_type reset_type = VHA_RESET_TYPE_NONE;
+	int err = *error_flags ? -EIO : 0 ;
+
+	if (response_status &
+			(VHA_WM_RESPONSE_STATUS(WL_FAILURE) |
+			 VHA_REG_PARITY_ERROR_EN |
+			 VHA_REG_WL_ID_MISMATCH_ERROR_EN |
+			 VHA_REG_CONF_ERROR_EN |
+			 VHA_REG_COMBINED_CRC_ERROR_EN)) {
+		err = vha_report_wm_rsp_failure(vha, wm_id, response_status,
+							core_status_array, ic_status_array, &reset_type, error_flags);
+	}
+
+	/* Move command queue. */
+	switch (reset_type) {
+	case VHA_RESET_TYPE_NONE:
+		vha_do_queued_cmd(vha, wm_id);
+		break;
+	case VHA_RESET_TYPE_WM:
+		if (!*full_reset && vha->pendcmd[wm_id].cmd) {
+			dev_err(vha->dev, "Performing wm%d reset due to HW error detection.", wm_id);
+			if (vha_wm_reset(vha, &vha->pendcmd[wm_id].cmd->hw_sched_info)) {
+				dev_err(vha->dev, "%s: Error during WM%u reset, forcing full reset upon finish",
+						__func__, wm_id);
+				*full_reset = true;
+			}
+		}
+		break;
+	case VHA_RESET_TYPE_FULL:
+		*full_reset = true;
+		break;
+	default:
+		break;
+	}
+	/* Handle actual command */
+	if (vha_handle_cmd(vha, wm_id, response_status, err, *error_flags) == false)
+		reset_type = VHA_RESET_TYPE_NONE;
+}
+
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+/* Simulating hw execution time by scheduling this delayed work. */
+void vha_dummy_worker(struct work_struct *work)
+{
+	struct vha_dummy_work *dummy_work =
+					container_of(work, struct vha_dummy_work, dummy_dwork.work);
+	struct vha_dev *vha = dummy_work->vha;
+	struct vha_cmd *cmd;
+
+	mutex_lock(&vha->lock);
+
+	cmd = vha->pendcmd[dummy_work->wm_id].cmd;
+	if (cmd) {
+		uint64_t error_flags = 0;
+		bool full_reset = false;
+		/* Record hw processing end timestamps */
+		VHA_WM_STAT_SHIFT_PROC_END(vha, cmd->hw_sched_info.wm_id);
+		GETNSTIMEOFDAY(&vha->stats.wm_stats[cmd->hw_sched_info.wm_id].hw_proc_end);
+		/* Update per core/WM stats. */
+		VHA_INC_WL_STAT(vha, kicks_completed, cmd);
+		vha->stats.cnn_kicks_completed++;
+		/* Free command resources. */
+		if (!vha->hw_sched_status.assignments[cmd->hw_sched_info.assignment_id].queued)
+			vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+		vha_dev_free_cmd_res(vha, cmd, true);
+		/* Handle current pending command */
+		vha_handle_wm_response(vha, dummy_work->wm_id, 0, NULL, NULL, &full_reset, &error_flags);
+		/* Schedule following commands */
+		vha_chk_cmd_queues(vha, true);
+	}
+
+	mutex_unlock(&vha->lock);
+}
+#endif
+
+#ifdef VHA_SCF
+static void vha_handle_conf_status(struct vha_dev *vha, struct vha_cmd *cmd, bool *full_reset, uint64_t *status)
+{
+	if (wait_for_completion_timeout(&cmd->conf_done, msecs_to_jiffies(CONF_WRITES_WAIT_TIMEOUT_MS))) {
+		if (cmd->conf_top_error) {
+			dev_err(vha->dev, "CONF_ERR_TOP\n");
+			*full_reset = true;
+			VHA_REG_SET_CONF_ERROR(*status);
+			return;
+		}
+		if (cmd->conf_core_error) {
+			dev_err(vha->dev, "CONF_ERR_BOTTOM\n");
+			VHA_REG_SET_CONF_ERROR(*status);
+			return;
+		}
+	} else {
+		dev_err(vha->dev, "Confirmation writes procedure failed!\n");
+		VHA_REG_SET_CONF_ERROR(*status);
+	}
+}
+
+
+static void vha_check_crc(struct vha_dev *vha, struct vha_cmd *cmd, uint64_t *status)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	uint32_t core_id = 0;
+	uint32_t idx = 0;
+	uint8_t num_cores = VHA_CORE_MASK_TO_NUM(sched_info->core_mask);
+	uint32_t crcs[VHA_MAX_CORES];
+	uint32_t *golden_crcs = NULL;
+	struct vha_buffer *buf = session->cnn_dbg.cnn_combined_crc;
+	bool crc_enabled = !!(cmd->user_cmd.flags & VHA_CHECK_CRC);
+
+	if (!buf || !buf->kptr) {
+		dev_err(vha->dev, "%s: Invalid crc buf\n", __func__);
+		return;
+	}
+
+	img_mem_sync_device_to_cpu(session->mem_ctx, buf->id);
+	for (core_id = 0; core_id < VHA_MAX_CORES; core_id++)
+		if (sched_info->core_mask & (1 << core_id)) {
+			memcpy(&crcs[idx], (uint8_t*)buf->kptr + core_id * VHA_COMBINED_CRC_CORE_OFFSET, sizeof(crcs[0]));
+			idx++;
+		}
+
+	vha_update_crcs(vha, crcs, num_cores);
+
+	if (crc_enabled) {
+		struct vha_user_cnn_submit_multi_cmd *cnn_user_cmd =
+			(struct vha_user_cnn_submit_multi_cmd *)&cmd->user_cmd;
+		golden_crcs = cnn_user_cmd->crcs;
+
+		for (idx = 0; idx < num_cores; idx++)
+			if (crcs[idx] != golden_crcs[idx]) {
+				VHA_REG_SET_COMBINED_CRC_ERROR(*status);
+				dev_err(vha->dev, "%s: combined CRC mismatch !!!\n"
+								  "\tcrc %x\n"
+								  "\tgolden_crc %x\n", __func__, crcs[idx], golden_crcs[idx]);
+			} else {
+				dev_info(vha->dev, "%s: combined CRC ok, crc %x\n", __func__, crcs[idx]);
+			}
+	}
+}
+#endif
+
+/* Bottom half */
+irqreturn_t vha_handle_thread_irq(struct device *dev)
+{
+	struct vha_dev *vha = vha_dev_get_drvdata(dev);
+	irqreturn_t ret = IRQ_HANDLED;
+	struct vha_mc_irq_status irq_status;
+	uint64_t multi_src_mask = 0;
+	uint8_t id;
+	uint8_t wm_id;
+	uint8_t wm_process_mask = 0;
+	bool full_reset = false;
+	bool process_sys_events = false;
+	uint64_t error_flags = 0;
+
+	if (!vha)
+		return IRQ_NONE;
+
+	mutex_lock(&vha->lock);
+
+#ifdef CONFIG_FAULT_INJECTION
+	if (!vha->irq_bh_pid)
+		vha->irq_bh_pid = task_pid_nr(current);
+
+	if (vha->fault_inject & VHA_FI_IRQ_WORKER)
+		current->make_it_fail = true;
+	else
+		current->make_it_fail = false;
+#endif
+
+	spin_lock_irq(&vha->irq_lock);
+	irq_status = vha->irq_status;
+	memset(&vha->irq_status, 0, sizeof(vha->irq_status));
+	if (irq_status.sys_events || vha->do_calibration) {
+		uint64_t proc_time = 0;
+
+		if (get_timespan_us(&vha->stats.wm_stats[VHA_CALIBRATION_WM_ID].hw_proc_start,
+							&vha->stats.wm_stats[VHA_CALIBRATION_WM_ID].hw_proc_end,
+							&proc_time)) {
+			vha->stats.last_proc_us = proc_time;
+		} else {
+			vha->stats.last_proc_us = 0;
+		}
+	}
+	spin_unlock_irq(&vha->irq_lock);
+
+	/* Read CORE event source mask. */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, CORE,
+												irq_status.event_source);
+	/* Check for clock calibration first. */
+	if ((multi_src_mask == VHA_CALIBRATION_CORE_MASK) &&
+			(irq_status.core_events[VHA_CALIBRATION_CORE_ID] &
+									VHA_CORE_EVENT_TYPE(CORE_WDT))) {
+		if (vha_check_calibration(vha)) {
+			goto calibration_end;
+		}
+	}
+
+	/* Read core/interconnect events if System or WM event occurred */
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		uint8_t mask = 0;
+
+		if (irq_status.wm_events[wm_id])
+			mask = vha_wm_get_cores(vha, wm_id);
+
+		if (irq_status.sys_events)
+			mask |= (1 << wm_id);
+
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if(mask & (1 << id)) {
+				/* Select core to read events from. */
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT,
+													MASK, (1 << id)),
+													CORE_CTRL_INDIRECT);
+
+				irq_status.core_events[id] |= IOREAD64_CR_REGIO(CORE_EVENT_WM_STATUS);
+				if (irq_status.core_events[id] & VHA_CORE_ERR_EVENTS) {
+					IOWRITE64_CR_REGIO(irq_status.core_events[id] & VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_CLEAR);
+					irq_status.core_events[id] |= IOREAD64_CR_REGIO(CORE_EVENT_WM_STATUS);
+				}
+
+				/* Select IC to read events from. */
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(IC_CORE_INDIRECT,
+													MASK, (1 << id)),
+													IC_CORE_INDIRECT);
+
+				irq_status.ic_events[id] |= IOREAD64_CR_REGIO(INTERCONNECT_EVENT_WM_STATUS);
+				if (irq_status.ic_events[id] & VHA_IC_ERR_EVENTS) {
+					IOWRITE64_CR_REGIO(irq_status.ic_events[id] & VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_CLEAR);
+					irq_status.ic_events[id] |= IOREAD64_CR_REGIO(INTERCONNECT_EVENT_WM_STATUS);
+				}
+			}
+		}
+	}
+
+	/* Process errors first. */
+	wm_process_mask = vha_events_process_errors(vha, &irq_status,
+											&full_reset, &process_sys_events, &error_flags);
+
+	/* Process non-error system events. */
+	if (process_sys_events) {
+		/* Handle normal system events. */
+	}
+
+	/* Process non-failed WM events. */
+	if (wm_process_mask) {
+		uint64_t rsp_err_status = 0ULL;
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
+			if (wm_process_mask & (1 << id)) {
+				if (irq_status.wm_events[id] & VHA_WM_EVENTS) {
+					uint16_t wm_cmd_id;
+					uint64_t status;
+#ifdef VHA_SCF
+					uint64_t wm_rsp_err_events =
+							((VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL |
+								VHA_REG_PARITY_ERROR_EN | VHA_REG_WL_ID_MISMATCH_ERROR_EN) &
+								~((uint64_t)VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN |
+										(uint64_t)VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN));
+#else
+					uint64_t wm_rsp_err_events =
+							((VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL |
+								VHA_REG_WL_ID_MISMATCH_ERROR_EN) &
+								~(VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN |
+									VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN));
+#endif
+					struct vha_cmd *cmd = vha->pendcmd[id].cmd;
+
+					if (cmd != NULL) {
+						/* Select WM to read response from. */
+						VHA_LOCK_WM();
+						VHA_SELECT_WM(id);
+						/* Handle RESPONSE_FIFO. */
+						/* Read RESPONSE_FIFO_WL_STATUS. */
+						status = IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_STATUS);
+#ifdef VHA_SCF
+						if (vha->hw_props.supported.parity && !vha->parity_disable) {
+							uint32_t i;
+							for (i = 0; i < VHA_PARITY_READ_COUNT_MAX; i++) {
+								/* Finish if parity is ok */
+								if (!img_mem_calc_parity(status))
+									break;
+								/* Otherwise re-read the reg. */
+								status = IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_STATUS);
+							}
+							/* Raise an error if maximum re-read count is reached. */
+							if (i == VHA_PARITY_READ_COUNT_MAX) {
+								dev_err(dev, "WM_RESPONSE_FIFO_WL_STATUS register parity error!\n");
+								/* Use the real event to indicate the error */
+								VHA_REG_SET_PARITY_ERROR(status);
+								dev_info(dev, "status: 0x%016llx!\n", status);
+							}
+						}
+#endif
+						/* Read RESPONSE_FIFO_WL_ID. */
+						wm_cmd_id = IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_ID);
+						/* Gather and process perf/stats data. */
+						if (WM_DBG_MODE_ON(PERF))
+							vha->stats.cnn_last_cycles = IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_PERF);
+						if (WM_DBG_MODE_ON(BAND)) {
+#define GET_MEM_STAT_TRANS(stat, reg) \
+		vha->stats.last_mem_stats.stat##_transactions = \
+			IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_BW_##reg)
+#define GET_MEM_STAT_WORDS(stat, reg) \
+		vha->stats.last_mem_stats.stat##_words = \
+			IOREAD64_CR_REGIO(WM_RESPONSE_FIFO_WL_BW_##reg##_WORD)
+
+							GET_MEM_STAT_TRANS(locm_rd,  LOCM_RD);
+							GET_MEM_STAT_TRANS(locm_wr,  LOCM_WR);
+							GET_MEM_STAT_TRANS(locm_mwr, LOCM_MWR);
+							GET_MEM_STAT_TRANS(socm_rd,  SOCM_RD);
+							GET_MEM_STAT_TRANS(socm_wr,  SOCM_WR);
+							GET_MEM_STAT_TRANS(socm_mwr, SOCM_MWR);
+							GET_MEM_STAT_TRANS(ddr_rd,   DDR_RD);
+							GET_MEM_STAT_TRANS(ddr_wr,   DDR_WR);
+							GET_MEM_STAT_TRANS(ddr_mwr,  DDR_MWR);
+
+							GET_MEM_STAT_WORDS(locm_rd, LOCM_RD);
+							GET_MEM_STAT_WORDS(locm_wr, LOCM_WR);
+							GET_MEM_STAT_WORDS(socm_rd, SOCM_RD);
+							GET_MEM_STAT_WORDS(socm_wr, SOCM_WR);
+							GET_MEM_STAT_WORDS(ddr_rd,  DDR_RD);
+							GET_MEM_STAT_WORDS(ddr_wr,  DDR_WR);
+#undef GET_MEM_STAT_TRANS
+#undef GET_MEM_STAT_WORDS
+						}
+						/* Pop response from RESPONSE_FIFO. */
+						IOWRITE64_CR_REGIO(VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_EN,
+											WM_RESPONSE_FIFO_READ);
+						IOWRITE64_CR_REGIO(VHA_WM_EVENTS_DEFAULT, WM_EVENT_ENABLE);
+						VHA_UNLOCK_WM();
+						/* Check if id matches the command. */
+						if (VHA_CR_GETBITS(WM_RESPONSE_FIFO_WL_ID, WL_ID, wm_cmd_id) !=
+															cmd->wm_cmd_id) {
+							dev_err(vha->dev, "%s: WM%u WL id mismatch for cmd 0x%08x/%u: "
+									"0x%04x vs. 0x%04x\n", __func__, id,
+									cmd->user_cmd.cmd_id, cmd->session->id, cmd->wm_cmd_id,
+									(uint16_t)VHA_CR_GETBITS(
+											WM_RESPONSE_FIFO_WL_ID, WL_ID,
+											wm_cmd_id));
+							/* Indicate WL id mismatch. */
+							VHA_REG_SET_WL_ID_MISMATCH_ERROR(status);
+						}
+						/* Leave only potential errors. */
+						status &= wm_rsp_err_events;
+						/* Store the latest error status for potential full_reset. */
+						if (status)
+							rsp_err_status = status;
+#ifdef VHA_SCF
+						if (vha->confirm_config_reg)
+							vha_handle_conf_status(vha, cmd, &full_reset, &status);
+
+						if (vha->cnn_combined_crc_enable)
+							vha_check_crc(vha, cmd, &status);
+#endif
+						/* Update per core/WM stats. */
+						VHA_INC_WL_STAT(vha, kicks_completed, cmd);
+
+						/* Free command resources. */
+						if (!vha->hw_sched_status.assignments[cmd->hw_sched_info.assignment_id].queued)
+							vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+						vha_dev_free_cmd_res(vha, cmd, true);
+
+						/* Finally handle the response. */
+						vha_handle_wm_response(vha, id, status, irq_status.core_events,
+												irq_status.ic_events, &full_reset, &error_flags);
+
+						if (status)
+							vha->stats.total_failures++;
+						vha->stats.cnn_kicks_completed++;
+					} else {
+						WARN_ON(1);
+					}
+				} else {
+					/* Ignore or report??? */
+				}
+			}
+		/* If any of processed WLs required full reset, all the WLs being
+		 * currently processed need to be failed and rolled back.
+		 * The reset itself will be executed at the end of the handler. */
+		if (full_reset)
+			vha_handle_sys_failure(vha, rsp_err_status, -EIO, error_flags);
+	}
+
+	/* Read core event source mask. */
+	/* Debug purpose only ... */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, CORE,
+												irq_status.event_source);
+	if (multi_src_mask)
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
+			if (multi_src_mask & (1 << id)) {
+				if (irq_status.core_events[id] & VHA_CORE_ERR_EVENTS)
+					dev_err(vha->dev, "%s: Core%d error event has been detected: %llx\n",
+							__func__, id, irq_status.core_events[id]);
+			}
+	/* Read IC event source mask. */
+	/* Debug purpose only ... */
+	multi_src_mask = (uint32_t)VHA_CR_GETBITS(HOST_EVENT_SOURCE, IC,
+												irq_status.event_source);
+	if (multi_src_mask)
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
+			if (multi_src_mask & (1 << id)) {
+				if (irq_status.ic_events[id] & VHA_IC_ERR_EVENTS)
+					dev_err(vha->dev, "%s: Interconnect%d error event has been detected: %llx\n",
+							__func__, id, irq_status.ic_events[id]);
+			}
+
+calibration_end:
+	if (full_reset) {
+		dev_err(vha->dev, "Performing full system reset due to HW error detection.");
+		/* Stop cores and execute the actual full reset finally. */
+		ret = vha_dev_stop(vha, true);
+		/* Check queues ... */
+		vha_chk_cmd_queues(vha, true);
+	} else {
+		/* Run in BH context! */
+		vha_chk_cmd_queues(vha, false);
+	}
+
+#ifdef CONFIG_FAULT_INJECTION
+	if (vha->fault_inject & VHA_FI_IRQ_WORKER)
+		current->make_it_fail = false;
+#endif
+	mutex_unlock(&vha->lock);
+
+	return ret;
+}
+
+#ifdef CONFIG_VHA_DUMMY
+static int vha_dummy_dev_start(struct vha_dev *vha)
+{
+	if (vha->state == VHA_STATE_ON)
+		return 0; /* not an error */
+
+	vha->state = VHA_STATE_ON;
+	/* Remember the time hw is powered on */
+	GETNSTIMEOFDAY(&vha->stats.hw_start);
+	return 0;
+}
+
+static int vha_dummy_dev_stop(struct vha_dev *vha)
+{
+	uint64_t tmp = 0;
+	struct TIMESPEC now;
+
+	if (vha->state == VHA_STATE_OFF)
+		return -1;
+
+	vha->state = VHA_STATE_OFF;
+	/* Update the up time of the core */
+	GETNSTIMEOFDAY(&now);
+	if (get_timespan_us(&vha->stats.hw_start, &now, &tmp)) {
+		do_div(tmp, 1000UL);
+		vha->stats.uptime_ms += tmp;
+		if (vha->stats.uptime_ms)
+			vha_update_utilization(vha);
+		else
+			dev_dbg(vha->dev,
+				"%s Too short execution time to calculate utilization!\n",
+				__func__);
+	} else
+		WARN_ON(1);
+
+	return 0;
+}
+#endif
+
+int vha_dev_start(struct vha_dev *vha)
+{
+	int ret = 0;
+	uint8_t core_mask;
+	uint8_t active_core_mask = vha->full_core_mask;
+	int id;
+
+#if defined(VHA_ENHANCED_APM) && !defined(CONFIG_VHA_DUMMY)
+	active_core_mask &= ~vha->hw_sched_status.free_core_mask;
+#endif
+
+	if (vha->do_calibration)
+		active_core_mask |= VHA_CALIBRATION_CORE_MASK;
+
+	/* If device disabled & no core active */
+	if (vha->state == VHA_STATE_OFF && !vha->active_core_mask) {
+		pm_runtime_get_sync(vha->dev);
+		dev_dbg(vha->dev, "%s system power up\n", __func__);
+	}
+
+	/* Cancel any APM request for active cores that are busy at this point */
+	{
+		/* Find active cores that are busy and under APM */
+		uint8_t apm_core_mask = active_core_mask & vha->apm_core_mask;
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (apm_core_mask & (1 << id))
+				cancel_delayed_work(&vha->apm_dworks[id].dwork);
+		}
+		vha->apm_core_mask &= ~(apm_core_mask);
+	}
+
+	/* Find cores that have to be powered on */
+	core_mask = (vha->active_core_mask ^ active_core_mask) &
+			~vha->active_core_mask;
+	if (core_mask) {
+		dev_dbg(vha->dev, "%s core mask:%#x  (%#x -> %#x)\n",
+				__func__, core_mask, vha->active_core_mask, active_core_mask);
+
+		/////////////// POWER ON //////////////////////////
+		img_pdump_printf("-- POWER_ON_BEGIN\n");
+		/* Prepare device cores ...  */
+		ret = vha_dev_prepare_cores(vha, core_mask);
+		if (ret) {
+			dev_err(vha->dev, "%s: Error preparing device cores!\n", __func__);
+			goto error;
+		}
+		/* Enable device cores clocks */
+		vha_dev_enable_clocks(vha, core_mask);
+		/* Reset device cores & system for the very first time */
+		ret = vha_dev_reset(vha, core_mask,
+				vha->active_core_mask ? false : true);
+		if (ret){
+			dev_err(vha->dev, "%s: Error reseting device cores!\n", __func__);
+			goto error;
+		}
+		/* Enable device cores clocks */
+		vha_dev_enable_clocks(vha,  core_mask);
+		img_pdump_printf("-- POWER_ON_END\n");
+		/////////////////////////////////////////////////////
+
+		vha_dev_ready(vha, core_mask,
+				vha->active_core_mask ? false : true);
+
+		/* Store actual status about active cores */
+		vha->active_core_mask = active_core_mask;
+	}
+
+	if (vha->state == VHA_STATE_OFF) {
+		/* Call device specific setup */
+		vha_dev_setup(vha);
+		/* Remember the time device is powered on */
+		GETNSTIMEOFDAY(&vha->stats.hw_start);
+
+		vha->state = VHA_STATE_ON;
+#ifdef VHA_SCF
+		/* Start the SW watchdog */
+		vha_start_swd(vha, 0);
+#endif
+	}
+
+	return 0;
+error:
+	pm_runtime_put_sync(vha->dev);
+	vha->state = VHA_STATE_OFF;
+	vha->active_core_mask = 0;
+	return ret;
+}
+
+static int vha_dev_stop_cores(struct vha_dev *vha, uint8_t core_mask, bool reset)
+{
+	int ret = 0;
+
+	if (core_mask) {
+		/* Store actual status about active cores */
+		vha->active_core_mask &= ~core_mask;
+
+		/* Disable events at first */
+		vha_dev_disable_events(vha, core_mask,
+				vha->active_core_mask ? false : true);
+
+		/////////////// POWER_OFF //////////////////////////
+		img_pdump_printf("-- POWER_OFF_BEGIN\n");
+		/* Reset core in case of error or pending inference */
+		if (reset) {
+			ret = vha_dev_reset(vha, core_mask,
+					vha->active_core_mask ? false : true);
+			if(ret)
+				dev_warn(vha->dev,
+						"%s: Problem with resetting device cores!\n",
+						__func__);
+		}
+
+		/* Disable device clocks */
+		ret = vha_dev_disable_clocks(vha, core_mask,
+				vha->active_core_mask ? false : true);
+		if(ret)
+			dev_warn(vha->dev,
+					"%s: Problem with disabling clocks for cores!\n",
+					__func__);
+
+		/* Execute any outstanding routines to flush the device cores */
+		ret = vha_dev_flush_cores(vha, core_mask);
+		if(ret)
+			dev_warn(vha->dev,
+					"%s: Problem with flushing device cores!\n",
+					__func__);
+		img_pdump_printf("-- POWER_OFF_END\n");
+		/////////////////////////////////////////////////////
+	}
+
+	/* If device enabled & no core active */
+	if (vha->state == VHA_STATE_ON && !vha->active_core_mask) {
+		int id;
+
+		/* Cancel APM requests if we are about to power off the device */
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
+			cancel_delayed_work(&vha->apm_dworks[id].dwork);
+		vha->apm_core_mask = 0;
+
+		dev_dbg(vha->dev, "%s system power down\n", __func__);
+
+		vha->state = VHA_STATE_OFF;
+
+		/* Update the up time of the device */
+		if (!vha->do_calibration) {
+			uint64_t tmp = 0;
+			struct TIMESPEC now;
+			GETNSTIMEOFDAY(&now);
+			if (get_timespan_us(&vha->stats.hw_start, &now, &tmp)) {
+				do_div(tmp, 1000UL);
+				vha->stats.uptime_ms += tmp;
+				if (vha->stats.uptime_ms)
+					vha_update_utilization(vha);
+				else
+					dev_dbg(vha->dev,
+							"%s Too short execution time to calculate utilization!\n",
+							__func__);
+			} else
+				WARN_ON(1);
+		}
+
+		vha->active_mmu_ctx = VHA_INVALID_ID;
+
+		spin_lock_irq(&vha->irq_lock);
+		memset(&vha->irq_status, 0, sizeof(vha->irq_status));
+		spin_unlock_irq(&vha->irq_lock);
+
+		if (reset) {
+			pm_runtime_mark_last_busy(vha->dev);
+			pm_runtime_put_sync_autosuspend(vha->dev);
+		} else {
+			pm_runtime_put_sync(vha->dev);
+		}
+	}
+
+	return ret;
+}
+
+int vha_dev_stop(struct vha_dev *vha, bool reset)
+{
+	int ret = 0;
+	uint8_t active_core_mask = 0;
+	uint8_t core_mask;
+
+#if defined(VHA_ENHANCED_APM) && !defined(CONFIG_VHA_DUMMY)
+	active_core_mask = vha->full_core_mask &
+			~vha->hw_sched_status.free_core_mask;
+#endif
+
+	if (vha->do_calibration)
+		active_core_mask &= ~VHA_CALIBRATION_CORE_MASK;
+
+	/* Find cores that have to be powered off */
+	core_mask = (vha->active_core_mask ^ active_core_mask) &
+			vha->active_core_mask;
+
+	if (core_mask)
+		dev_dbg(vha->dev, "%s core mask:%#x  (%#x -> %#x)\n",
+				__func__, core_mask, vha->active_core_mask, active_core_mask);
+
+	ret = vha_dev_stop_cores(vha, core_mask, reset);
+
+	return ret;
+}
+
+static bool vha_is_mmu_ctx_shared(struct vha_cmd *cmd)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+
+	/* If the session of the command we are trying to execute shares
+	 * the hw mmu ctx with different session */
+	if (vha->mmu_ctxs[session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1) {
+		uint8_t id;
+
+		/* Check currently processed commands */
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			/* Check if the mmu hw context is same as current command */
+			if (vha->pendcmd[id].cmd != NULL &&
+					vha->pendcmd[id].cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id ==
+							session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+int vha_dev_schedule_cmd(struct vha_dev *vha, struct vha_cmd *cmd)
+{
+	struct vha_hw_sched_status *status = &vha->hw_sched_status;
+	struct vha_hw_sched_info *info;
+	struct vha_user_cnn_submit_multi_cmd* user_cmd =
+			(struct vha_user_cnn_submit_multi_cmd*)&cmd->user_cmd;
+	uint8_t wm_id;
+	uint8_t core_id = 0;
+	uint8_t core_mask = 0;
+	uint8_t assignment_id;
+	uint8_t i;
+
+	/* If no command provided, just check if anything can potentially
+	 * be scheduled. */
+	if (cmd == NULL) {
+		/* Calculate the number of cores in use. */
+		uint8_t num_used_cores = 0;
+		uint8_t assignment_id;
+		for (assignment_id = 0; assignment_id < VHA_NUM_CORES; assignment_id++)
+			if (status->assignments[assignment_id].core_mask) {
+				uint8_t num_cores = VHA_CORE_MASK_TO_NUM(status->assignments[assignment_id].core_mask);
+				num_used_cores += num_cores;
+				if (vha->low_latency != VHA_LL_DISABLED)
+					if (status->assignments[assignment_id].queued)
+						num_used_cores += num_cores;
+			}
+		/* If all the cores are in use, nothing can be scheduled. */
+		if (num_used_cores ==
+				(vha->hw_props.num_cnn_core_devs * ((vha->low_latency != VHA_LL_DISABLED) ? 2 : 1)))
+			return -1;
+		return 0;
+	}
+
+	if  (cmd->user_cmd.cmd_type != VHA_CMD_CNN_SUBMIT_MULTI)
+		return 0;
+
+#define VHA_LL_BRANCH(l) \
+		{ \
+			if (vha->low_latency == VHA_LL_DISABLED) \
+				return -1; \
+			else \
+				goto l; \
+		}
+
+	/* Check for shared mmu hardware context, as we can't schedule command
+	 * on free cores, while other currently processing cores use the same
+	 * mmu hw context, because data would be overwritten */
+	if (vha_is_mmu_ctx_shared(cmd)) {
+		dev_dbg(vha->dev, "%s: Postpone command due to shared mmu context!\n",
+				__func__);
+		return -1;
+	}
+
+	info = &cmd->hw_sched_info;
+	/* If external scheduling is requested... */
+	if (vha->scheduling_sequence_len > 0) {
+		uint8_t wm_mask;
+		/* Queueing is not supported for external scheduling. */
+		if (status->num_cores_free < user_cmd->num_cores)
+			return -1;
+		/* Read scheduling data for this workload from scheduling sequence. */
+		wm_id = SCHED_SEQ_GET_WM(vha->scheduling_counter);
+		wm_mask = VHA_WM_ID_TO_MASK(wm_id);
+		core_mask = SCHED_SEQ_GET_CORES(vha->scheduling_counter);
+		/* Sanity check the data. */
+		if (((status->free_wm_mask & wm_mask) == 0) ||
+			((status->free_core_mask & core_mask) != core_mask))
+			return -1;
+		status->free_core_mask &= ~(core_mask);
+		/* Increment scheduling counter. */
+		vha->scheduling_counter =
+				(vha->scheduling_counter + 1) %
+					vha->scheduling_sequence_len;
+	} else {
+		/* Check if there are cores available. */
+		if (status->num_cores_free < user_cmd->num_cores)
+			VHA_LL_BRANCH(attempt_to_queue_multi);
+		/* Check if there is a WM available. */
+		if (status->num_wms_free == 0)
+			VHA_LL_BRANCH(attempt_to_queue_multi);
+		/* Find a free WM. */
+		wm_id = ffs(status->free_wm_mask) - 1;
+		/*  Check if enough cores are available. */
+		if (user_cmd->num_cores > status->num_cores_free)
+			VHA_LL_BRANCH(attempt_to_queue_multi);
+		/* Find the required number of free cores. */
+		for (i = 0; i < user_cmd->num_cores; i++) {
+			core_id = ffs(status->free_core_mask) - 1;
+			core_mask |= VHA_CORE_ID_TO_MASK(core_id);
+			status->free_core_mask &= ~(core_mask);
+		}
+	}
+
+	/* Update resource status. */
+	for (assignment_id = 0; assignment_id < VHA_NUM_CORES; assignment_id++)
+		if (status->assignments[assignment_id].core_mask == 0)
+			break;
+	if (assignment_id == VHA_NUM_CORES) {
+		dev_info(vha->dev, "%s: Scheduling data inconsistency detected!\n", __func__);
+		return -1;
+	}
+	status->assignments[assignment_id].assignment_id = assignment_id;
+	status->assignments[assignment_id].wm_id = wm_id;
+	status->assignments[assignment_id].core_mask = core_mask;
+	status->num_cores_free -= user_cmd->num_cores;
+	status->num_wms_free--;
+	status->free_wm_mask &= ~(VHA_WM_ID_TO_MASK(wm_id));
+
+	/* Store command scheduling info. */
+	*info = status->assignments[assignment_id];
+
+	goto skip_label_attempt_to_queue_multi;
+
+attempt_to_queue_multi:
+	/* Check if there is an assignment matching this scheduling request
+	 * available in the list of assignments. */
+	for (assignment_id = 0; assignment_id < VHA_NUM_CORES; assignment_id++)
+		/* If this assignment is not queued already and it has the same
+		 * number of cores as this scheduling request... */
+		if (!status->assignments[assignment_id].queued &&
+				status->assignments[assignment_id].core_mask &&
+				(VHA_CORE_MASK_TO_NUM(status->assignments[assignment_id].core_mask) ==
+																user_cmd->num_cores)) {
+			wm_id = status->assignments[assignment_id].wm_id;
+			if (vha->low_latency == VHA_LL_SELF_KICK
+					/* If the current command we are trying to queue belongs
+					 * to a different session than the pending one. */
+					&& (vha->pendcmd[wm_id].cmd != NULL &&
+						vha->pendcmd[wm_id].cmd->session != cmd->session)
+					/* If the session of the command we are trying to queue shares
+					 * the hw mmu ctx with the session of pending cmd */
+					&& (cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id ==
+							vha->pendcmd[wm_id].cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id)
+					/* Sanity if hw mmu ctx is really shared at this point. */
+					&& (vha->mmu_ctxs[cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1)
+				) {
+				/* Skip this assignment. */
+				continue;
+			}
+			/* Make the assignment queued. */
+			status->assignments[assignment_id].queued = true;
+			/* Store command scheduling info. */
+			*info = status->assignments[assignment_id];
+			break;
+		}
+	/* Fail if no matching assignments found. */
+	if (assignment_id == VHA_NUM_CORES)
+		return -1;
+
+skip_label_attempt_to_queue_multi:
+
+	/* For hw commands... */
+	if (CMD_EXEC_ON_HW(cmd)) {
+		if (!VHA_IS_DUMMY(vha)) {
+			int tries = 3; /* magic number, just try harder to start the device */
+			/* Start device. */
+			while(tries--) {
+				if (vha_dev_start(vha))
+					dev_warn(vha->dev, "%s: Error starting device cores. Try once again.", __func__);
+				else
+					break;
+			}
+		}
+#ifdef CONFIG_VHA_DUMMY
+		else
+			vha_dummy_dev_start(vha);
+#endif
+	}
+
+#undef VHA_LL_BRANCH
+
+	dev_dbg(vha->dev, "%s: cmd 0x%08x/%u scheduled on WM%u/core(s) 0x%02x\n",
+			__func__, cmd->user_cmd.cmd_id, cmd->session->id,
+			info->wm_id, info->core_mask);
+	return 0;
+}
+
+void vha_dev_free_cmd_res(struct vha_dev *vha, struct vha_cmd *cmd, bool update_stats)
+{
+	struct vha_hw_sched_status *status = &vha->hw_sched_status;
+	struct vha_hw_sched_info *info = &cmd->hw_sched_info;
+	struct vha_user_cnn_submit_multi_cmd* user_cmd =
+		(struct vha_user_cnn_submit_multi_cmd*)&cmd->user_cmd;
+
+	if (update_stats) {
+		uint64_t proc_time = 0;
+		struct TIMESPEC *from = &cmd->hw_proc_start;
+		struct TIMESPEC *to = &vha->stats.wm_stats[info->wm_id].hw_proc_end;
+
+		if (TIMESPEC_COMPARE(&vha->stats.wm_stats[info->wm_id].hw_proc_end_prev,
+								&cmd->hw_proc_start) >= 0)
+			from = &vha->stats.wm_stats[info->wm_id].hw_proc_end_prev;
+
+		if (get_timespan_us(from, to, &proc_time)) {
+			vha->stats.last_proc_us = proc_time;
+		} else {
+			vha->stats.last_proc_us = 0;
+		}
+		/* Update WL stats. */
+		VHA_UPDATE_WL_STAT(vha, total_proc_us, cmd, vha->stats.last_proc_us);
+		/* Update common stats. */
+		vha_cnn_update_stats(vha);
+	}
+
+
+	/* If assignment for this workload is queued... */
+	if (status->assignments[info->assignment_id].queued) {
+		/* Just mark it as not queued again. */
+		status->assignments[info->assignment_id].queued = false;
+		/* Clear scheduling info for this workload. */
+		info->freed = true;
+		/* Do not update the scheduling status. */
+		return;
+	}
+
+	/* Update the scheduling status. */
+	status->num_cores_free += user_cmd->num_cores;
+	status->free_core_mask |= info->core_mask;
+	status->num_wms_free++;
+	status->free_wm_mask |= VHA_WM_ID_TO_MASK(info->wm_id);
+	/* Clear the assignment and scheduling info for this workload. */
+	memset(&status->assignments[info->assignment_id], 0,
+			sizeof(struct vha_hw_sched_info));
+	info->freed = true;
+}
+
+static void sched_apm_multi(struct vha_dev *vha)
+{
+	struct vha_apm_work *apm_work = NULL;
+	/* Find active cores that are free and not under APM */
+	uint8_t apm_core_mask = vha->active_core_mask &
+			vha->hw_sched_status.free_core_mask &
+			~vha->apm_core_mask;
+	int id;
+
+	/* Skip if nothing has changed */
+	if (!apm_core_mask)
+		return;
+
+	dev_dbg(vha->dev, "%s core mask:%#x\n", __func__, apm_core_mask);
+
+	/* Schedule for all cores, separately  */
+	for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+		if (apm_core_mask & (1 << id)) {
+			apm_work = &vha->apm_dworks[id];
+			apm_work->core_mask = 1 << id;
+			apm_work->delay_ms = vha->pm_delay;
+			vha_sched_apm(vha, apm_work);
+		}
+	}
+
+	/* Record actual status */
+	vha->apm_core_mask |= apm_core_mask;
+}
+//#define MTSTK_MEASURE_MULTI_PRI
+#ifdef MTSTK_MEASURE_MULTI_PRI
+static uint8_t get_active_pri_count(struct vha_dev *vha)
+{
+	uint8_t pris;
+	uint8_t pri_count = 0;
+
+	/* Calculate the number of priority levels with anything to schedule. */
+	for (pris = 0; pris < VHA_MAX_PRIORITIES; pris++)
+		if (vha->pri_q_counters[pris] > 0) {
+			pri_count++;
+		}
+
+	return pri_count;
+}
+#endif
+
+static void vha_get_time_span(struct TIMESPEC *start, struct TIMESPEC *end,
+	struct TIMESPEC *span)
+{
+	if (start == NULL || end == NULL || span == NULL)
+		return;
+
+	/* Calculate the seconds span. */
+	span->tv_sec = end->tv_sec - start->tv_sec;
+	/* If there more than a second span, move one second to nanoseconds
+	 * to avoid potential negative nanosecond values. */
+	if (span->tv_sec > 0) {
+		span->tv_sec--;
+		end->tv_nsec += 1000000000;
+	}
+	/* Calculate the nanoseconds span. */
+	span->tv_nsec = end->tv_nsec - start->tv_nsec;
+	/* If nanoseconds include a second, move it to seconds. */
+	if (span->tv_nsec > 1000000000) {
+		span->tv_sec++;
+		span->tv_nsec -= 1000000000;
+	}
+}
+
+static uint8_t vha_scheduler_get_priority(struct vha_dev *vha)
+{
+	uint8_t ret_pri = 0;
+	uint8_t pris;
+	uint8_t pri_count = 0;
+	uint32_t curr_window = 0;
+	uint32_t curr_limit = 0;
+	uint32_t rand_val;
+
+	/* Calculate current total window width. */
+	for (pris = 0; pris < VHA_MAX_PRIORITIES; pris++)
+		if (vha->pri_q_counters[pris] > 0) {
+			curr_window += pri_windows[pris];
+			ret_pri = pris;
+			pri_count++;
+		}
+
+	/* If there's no priority with WLs to schedule, just return 0. */
+	if (pri_count == 0)
+		return VHA_INVALID_PRI;
+
+	/* If there's only one priority with WLs to schedule, just return it. */
+	if (pri_count == 1)
+		return ret_pri;
+
+	/* If starvation avoidance is disabled, just return the highest priority
+	 *  with WLs to schedule. */
+	if (curr_window == 0)
+		return ret_pri;
+
+	/* If starvation avoidance is enabled, use 'lottery' based approach. */
+
+	/* Generate random value within the current window. */
+	vha_mt19937_gen_range(vha->hw_sched_status.sched_data->rand_gen_handle,
+							0, curr_window, &rand_val);
+
+	/* Choose priority based on the value generated and available priorities. */
+	for (pris = 0; pris < VHA_MAX_PRIORITIES; pris++)
+		if (vha->pri_q_counters[pris] > 0) {
+			curr_limit += pri_windows[pris];
+			if (rand_val <= curr_limit) {
+				ret_pri = pris;
+				break;
+			}
+		}
+
+	return ret_pri;
+}
+
+static void vha_scheduler_set_starting_session(struct vha_dev *vha,
+	uint8_t priority, struct vha_session *session)
+{
+	/* Set a starting point session for next scheduling round. */
+	if (session != list_entry(&vha->sched_sessions[priority],
+								struct vha_session, sched_list[priority]))
+		while(list_first_entry(&vha->sched_sessions[priority],
+								struct vha_session, sched_list[priority]) != session)
+			list_rotate_left(&vha->sched_sessions[priority]);
+}
+
+void vha_scheduler_loop(struct vha_dev *vha)
+{
+	struct vha_cmd *cmd, *tmp;
+	struct vha_session *session = NULL;
+	bool scheduled = false;
+	enum do_cmd_status cmd_status = CMD_OK;
+	uint8_t current_pri = VHA_DEFAULT_PRI;
+
+	bool log_pri_sched_info = true;
+
+	if (vha_dev_schedule_cmd(vha, NULL) != 0) {
+		/* Postpone worker task if nothing can be scheduled. */
+		dev_dbg(vha->dev, "%s Nothing can be scheduled at the moment. "
+				"Postpone worker task!\n", __func__);
+		return;
+	}
+
+#ifdef MTSTK_MEASURE_MULTI_PRI
+	log_pri_sched_info = (get_active_pri_count(vha) > 1) ? true : false;
+#endif
+
+	/* Main scheduling loop. */
+	do {
+		scheduled = false;
+		current_pri = vha_scheduler_get_priority(vha);
+		if (current_pri == VHA_INVALID_PRI)
+			break;
+		list_for_each_entry(session, &vha->sched_sessions[current_pri], sched_list[current_pri]) {
+			list_for_each_entry_safe(cmd, tmp, &session->cmds[current_pri], list[current_pri]) {
+#if defined(VHA_ENHANCED_APM)
+				/* Schedule APM/power down cores in the middle if possible */
+				if (!VHA_IS_DUMMY(vha)) {
+					if (!vha->no_clock_disable) {
+						if (!vha->pm_delay) {
+							if (vha_dev_stop(vha, false)) {
+								dev_warn(vha->dev, "%s: Failed to soft stop device. Trying harder with reset",
+											__func__);
+								if (vha_dev_stop(vha, true))
+									dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
+							}
+						} else
+							sched_apm_multi(vha);
+					}
+				}
+#endif
+				/* Skip this workload as it's already scheduled. */
+				if (cmd->hw_sched_info.core_mask && !cmd->hw_sched_info.freed)
+					continue;
+
+				/* Attempt to schedule command for execution. */
+				cmd_status = vha_do_cmd(cmd);
+
+				if ((cmd_status == CMD_OK) || (cmd_status == CMD_HW_BUSY)) {
+					if (cmd_status == CMD_OK) {
+						scheduled = true;
+						if (log_pri_sched_info && !cmd->rolled_back) {
+							struct TIMESPEC sched_ts, sched_span = {0};
+							GETNSTIMEOFDAY(&sched_ts);
+							vha_get_time_span(&cmd->submit_ts, &sched_ts, &sched_span);
+
+#ifdef LOG_PRI_SCHEDULING_INFO
+							dev_info(vha->dev, "@@@ scheduled 0x%08x/%u/%u, span: %llu\n",
+									cmd->user_cmd.cmd_id, session->id, cmd->user_cmd.priority,
+									(uint64_t)sched_span.tv_sec * 1000000000ULL +
+																(uint64_t)sched_span.tv_nsec);
+#endif
+
+							VHA_UPDATE_SCHED_STAT_MTSTK(vha, cmd, &sched_span);
+						}
+						session = list_next_entry(session, sched_list[current_pri]);
+					}
+					vha_scheduler_set_starting_session(vha, current_pri, session);
+					goto exit_session_loop;
+				}
+			}
+		}
+exit_session_loop:;
+	/* Iterate until a workload was scheduled and no other can be scheduled. */
+	} while (vha_dev_schedule_cmd(vha, NULL) == 0 && scheduled);
+
+	/* Schedule APM/power down cores if possible at end */
+	if (!VHA_IS_DUMMY(vha)) {
+		bool skip = vha->no_clock_disable;
+#if !defined(VHA_ENHANCED_APM)
+		skip |= vha_is_busy(vha);
+#endif
+		if (!skip) {
+			if (!vha->pm_delay) {
+				if (vha_dev_stop(vha, false)) {
+					dev_warn(vha->dev, "%s: Failed to soft stop device. Trying harder with reset",
+							__func__);
+					if (vha_dev_stop(vha, true))
+						dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
+				}
+			} else
+				sched_apm_multi(vha);
+		}
+	}
+#ifdef CONFIG_VHA_DUMMY
+	else if (!vha_is_busy(vha))
+		vha_dummy_dev_stop(vha);
+#endif
+}
+
+bool vha_rm_session_cmds(struct vha_session *session)
+{
+	struct vha_dev *vha = session->vha;
+	bool reschedule = false;
+	uint32_t wm_id;
+	struct vha_hw_sched_info sched_info = {0};
+	struct vha_cmd *cur_cmd, *tmp_cmd;
+	uint8_t pri;
+
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		bool pend_removed = false;
+		bool queued_removed = false;
+
+		/* Check if pend/queued WLs will be removed. */
+		if (vha->pendcmd[wm_id].cmd &&
+				vha->pendcmd[wm_id].cmd->session == session) {
+			dev_warn(vha->dev,
+					"Removing a session while cnn cmd is still pending\n");
+			pend_removed = true;
+			sched_info = vha->pendcmd[wm_id].cmd->hw_sched_info;
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+			cancel_delayed_work(&vha->dummy_dworks[wm_id].dummy_dwork);
+#endif
+		}
+		if (vha->queuedcmd[wm_id].cmd &&
+				vha->queuedcmd[wm_id].cmd->session == session) {
+			dev_warn(vha->dev,
+					"Removing a session while cnn cmd is still queued\n");
+			queued_removed = true;
+			sched_info = vha->queuedcmd[wm_id].cmd->hw_sched_info;
+		}
+
+		/* Update session scheduling. */
+		if (vha->queuedcmd[wm_id].cmd &&
+				(pend_removed && !queued_removed)) {
+			if (vha->queuedcmd[wm_id].cmd->session !=
+					list_entry(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority],
+							struct vha_session, sched_list[vha->queuedcmd[wm_id].cmd->user_cmd.priority]))
+				while(list_first_entry(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority],
+						struct vha_session, sched_list[vha->queuedcmd[wm_id].cmd->user_cmd.priority]) !=
+											vha->queuedcmd[wm_id].cmd->session)
+					list_rotate_left(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority]);
+		}
+
+		/* Remove pend/queued WLs if needed. */
+		if (pend_removed || queued_removed) {
+			uint64_t wm_mask = VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM, VHA_WM_ID_TO_MASK(wm_id));
+			/* Reset WM/cores. */
+			vha_wm_reset(vha, &sched_info);
+			VHA_LOCK_WM();
+			VHA_SELECT_WM(wm_id);
+			/* Remove WM related interrupt info if it happens to be set. */
+			if (vha->irq_status.event_source & wm_mask)
+			{
+				/* Unset the WM related source bit. */
+				vha->irq_status.event_source &= ~wm_mask;
+				/* Clear all WM related events. */
+				IOWRITE64_CR_REGIO(vha->irq_status.wm_events[wm_id] & VHA_WM_EVENTS_DEFAULT,
+									WM_EVENT_CLEAR);
+				vha->irq_status.wm_events[wm_id] = 0ULL;
+			}
+			/* Re-enable WM events here as this WM will not be handled further. */
+			IOWRITE64_CR_REGIO(VHA_WM_EVENTS_DEFAULT, WM_EVENT_ENABLE);
+			VHA_UNLOCK_WM();
+			/* Rollback all WLs from this WM. */
+			vha_rollback_wm_cmds(vha, wm_id, true);
+			/* Need to reschedule too. */
+			reschedule = true;
+		}
+	}
+
+	/* Remove session related commands. */
+	for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
+		list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
+			/* rsp didn't make it to rsps list, free it now */
+			kfree(cur_cmd->rsp);
+
+			list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
+			vha->pri_q_counters[cur_cmd->user_cmd.priority]--;
+			kfree(cur_cmd);
+		}
+	}
+
+	return reschedule;
+}
+
+bool vha_rm_session_cmds_masked(struct vha_session *session, uint32_t cmd_id,
+		uint32_t cmd_id_mask)
+{
+	struct vha_dev *vha = session->vha;
+	bool reschedule = false;
+	uint32_t wm_id;
+	struct vha_hw_sched_info sched_info = {0};
+
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		bool pend_removed = false;
+		bool queued_removed = false;
+
+		/* Check if pend/queued WLs will be removed. */
+		if (vha->pendcmd[wm_id].cmd &&
+				(vha->pendcmd[wm_id].cmd->session == session) &&
+				(vha->pendcmd[wm_id].cmd->user_cmd.cmd_id & cmd_id_mask)
+																	== cmd_id) {
+			pend_removed = true;
+			sched_info = vha->pendcmd[wm_id].cmd->hw_sched_info;
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+			cancel_delayed_work(&vha->dummy_dworks[wm_id].dummy_dwork);
+#endif
+			VHA_INC_WL_STAT(vha, kicks_cancelled, vha->pendcmd[wm_id].cmd);
+			vha->stats.cnn_kicks_cancelled++;
+		}
+		if (vha->queuedcmd[wm_id].cmd &&
+				(vha->queuedcmd[wm_id].cmd->session == session) &&
+				(vha->queuedcmd[wm_id].cmd->user_cmd.cmd_id & cmd_id_mask)
+																	== cmd_id) {
+			sched_info = vha->queuedcmd[wm_id].cmd->hw_sched_info;
+			queued_removed = true;
+			if (vha->low_latency == VHA_LL_SELF_KICK) {
+				VHA_INC_WL_STAT(vha, kicks_cancelled, vha->queuedcmd[wm_id].cmd);
+				vha->stats.cnn_kicks_cancelled++;
+			}
+		}
+
+		/* Update session scheduling. */
+		if (vha->queuedcmd[wm_id].cmd &&
+				(pend_removed && !queued_removed)) {
+			if (vha->queuedcmd[wm_id].cmd->session !=
+						list_entry(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority],
+								struct vha_session, sched_list[vha->queuedcmd[wm_id].cmd->user_cmd.priority]))
+				while(list_first_entry(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority],
+						struct vha_session, sched_list[vha->queuedcmd[wm_id].cmd->user_cmd.priority]) !=
+								vha->queuedcmd[wm_id].cmd->session)
+					list_rotate_left(&vha->sched_sessions[vha->queuedcmd[wm_id].cmd->user_cmd.priority]);
+		}
+
+		/* Remove pend/queued WLs if needed. */
+		if (pend_removed || queued_removed) {
+			uint64_t wm_mask = VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM, VHA_WM_ID_TO_MASK(wm_id));
+			/* Reset WM/cores. */
+			vha_wm_reset(vha, &sched_info);
+			VHA_LOCK_WM();
+			VHA_SELECT_WM(wm_id);
+			/* Remove WM related interrupt info if it happens to be set. */
+			if (vha->irq_status.event_source & wm_mask)
+			{
+				/* Unset the WM related source bit. */
+				vha->irq_status.event_source &= ~wm_mask;
+				/* Clear all WM related events. */
+				IOWRITE64_CR_REGIO(vha->irq_status.wm_events[wm_id] & VHA_WM_EVENTS_DEFAULT,
+									WM_EVENT_CLEAR);
+				vha->irq_status.wm_events[wm_id] = 0ULL;
+			}
+			/* Re-enable WM events here as this WM will not be handled further. */
+			IOWRITE64_CR_REGIO(VHA_WM_EVENTS_DEFAULT, WM_EVENT_ENABLE);
+			VHA_UNLOCK_WM();
+			/* Rollback all WLs from this WM. */
+			vha_rollback_wm_cmds(vha, wm_id, true);
+			/* Correct aborted stats. */
+			if (queued_removed) {
+				VHA_UPDATE_WM_STAT(vha, kicks_aborted, sched_info.wm_id, -1);
+				VHA_UPDATE_CORE_GROUP_STAT(vha, kicks_aborted, sched_info.core_mask, -1);
+				vha->stats.cnn_kicks_aborted--;
+			}
+			if (pend_removed) {
+				VHA_UPDATE_WM_STAT(vha, kicks_aborted, sched_info.wm_id, -1);
+				VHA_UPDATE_CORE_GROUP_STAT(vha, kicks_aborted, sched_info.core_mask, -1);
+				vha->stats.cnn_kicks_aborted--;
+			}
+			reschedule = true;
+		}
+	}
+
+	return reschedule;
+}
+
+int vha_rm_cmds(struct vha_session *session, uint32_t cmd_id,
+		uint32_t cmd_id_mask, bool respond)
+{
+	struct vha_dev *vha = session->vha;
+	struct vha_cmd *cur_cmd, *tmp_cmd;
+	struct vha_rsp *cur_rsp, *tmp_rsp;
+	bool reschedule = false;
+	bool respond_aux = false;
+	int ret = 0;
+	uint8_t pri;
+
+	mutex_lock(&vha->lock);
+
+	/* Remove pend/queued session commands that match the cmd_id. */
+	reschedule = vha_rm_session_cmds_masked(session, cmd_id, cmd_id_mask);
+
+	/* Remove session related commands matching command id template. */
+	for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
+		list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
+			if ((cur_cmd->user_cmd.cmd_id & cmd_id_mask) == cmd_id) {
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+				switch (cur_cmd->user_cmd.cmd_type)
+				{
+				case VHA_CMD_CNN_SUBMIT_MULTI:
+				{
+					struct vha_user_cnn_submit_multi_cmd *cnn_cmd =
+							(struct vha_user_cnn_submit_multi_cmd *)&cur_cmd->user_cmd;
+					int j;
+					for (j = 0; j < (cnn_cmd->msg.num_bufs - cnn_cmd->num_cores); j++) {
+						struct vha_buffer *buf = vha_find_bufid(session, cnn_cmd->bufs[j]);
+						if (buf == NULL) {
+							dev_warn(vha->dev, "%s: could not find buf %x\n", __func__,
+											cnn_cmd->bufs[j]);
+						} else {
+							vha_rm_buf_fence(session, buf);
+						}
+					}
+					break;
+				}
+				default:
+					dev_warn(vha->dev, "%s: invalid cmd type %x\n", __func__,
+								cur_cmd->user_cmd.cmd_type);
+					break;
+				}
+#endif
+
+				/* rsp didn't make it to rsps list; free it now. */
+				kfree(cur_cmd->rsp);
+
+				list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
+				vha->pri_q_counters[cur_cmd->user_cmd.priority]--;
+				kfree(cur_cmd);
+
+				/* There were commands matching command id template in the list,
+				 * so respond to wake user space. */
+				respond_aux = true;
+			}
+		}
+	}
+
+	/* Remove responses for session related commands
+	 * matching command id template. */
+	list_for_each_entry_safe(cur_rsp, tmp_rsp, &session->rsps, list) {
+		if ((cur_rsp->user_rsp.cmd_id & cmd_id_mask) == cmd_id) {
+			list_del(&cur_rsp->list);
+			kfree(cur_rsp);
+			respond_aux = true;
+		}
+	}
+
+	/* Reset hardware if required. */
+	if (reschedule)
+		ret = vha_dev_stop(vha, reschedule);
+
+	/* Generate "cancel" response if any commands matching command id template
+	 * were removed. */
+	if (respond_aux && respond) {
+		/* Calculate space for the response. */
+		size_t sz = sizeof(struct vha_rsp)
+			+ sizeof(struct vha_user_cnn_submit_rsp)
+			- sizeof(struct vha_user_rsp);
+		/* Allocate space for standard response. */
+		struct vha_rsp *rsp = kzalloc(sz, GFP_KERNEL);
+		if (rsp == NULL) {
+			dev_crit(session->vha->dev,
+					"Failed to allocate memory to notify cancel for cmds 0x%08x\n", cmd_id);
+			session->oom = true;
+		} else {
+			rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
+			rsp->user_rsp.cmd_id = cmd_id;
+			list_add_tail(&rsp->list, &session->rsps);
+		}
+		wake_up(&session->wq);
+	}
+
+	mutex_unlock(&vha->lock);
+
+	/* Just return in case of oom. */
+	if (session->oom)
+		return -ENOMEM;
+
+	/* Reschedule once all commands matching command id template are removed. */
+	if (reschedule)
+		vha_chk_cmd_queues(vha, true);
+
+	return ret;
+}
+
+bool vha_is_busy(struct vha_dev *vha)
+{
+	return (vha->hw_sched_status.num_cores_free < vha->hw_props.num_cnn_core_devs);
+}
+
+/* check all input buffers are filled and ready to go */
+bool vha_is_waiting_for_inputs(struct vha_session *session,
+	struct vha_cmd *cmd)
+{
+	if (!cmd->inbufs_ready) {
+		const struct vha_user_cnn_submit_multi_cmd *user_cmd =
+			(struct vha_user_cnn_submit_multi_cmd *)&cmd->user_cmd;
+		int i;
+
+		for (i = 0; i < cmd->user_cmd.num_inbufs - user_cmd->num_cores; i++) {
+			struct vha_buffer *buf = vha_find_bufid(session, user_cmd->bufs[i]);
+
+			if (buf && buf->status == VHA_BUF_UNFILLED) {
+				dev_dbg(session->vha->dev,
+					"%s: cmd %u waiting for input "
+					"buf %d to be ready\n",
+					__func__,
+					cmd->user_cmd.cmd_id,
+					buf->id);
+				return true;
+			}
+		}
+	}
+
+	cmd->inbufs_ready = true;
+	return false;
+}
+
+void vha_dev_apm_stop(struct vha_dev *vha, struct vha_apm_work *apm_work)
+{
+	/* Find active cores that are not busy and under APM for this apm request */
+	uint8_t apm_core_mask = vha->active_core_mask &
+			vha->hw_sched_status.free_core_mask &
+			vha->apm_core_mask &
+			apm_work->core_mask;
+
+	vha->apm_core_mask &= ~apm_core_mask;
+
+	if (vha->do_calibration)
+		return;
+
+	if (apm_core_mask) {
+		dev_dbg(vha->dev, "%s core mask:%#x\n", __func__, apm_core_mask);
+		if (vha_dev_stop_cores(vha, apm_core_mask, false)) {
+			dev_warn(vha->dev, "%s: Failed to soft stop cores. Trying harder with reset",
+				__func__);
+			if (vha_dev_stop_cores(vha, apm_core_mask, true))
+				dev_err(vha->dev, "%s: Failed to stop cores with reset!", __func__);
+		}
+	}
+}
+
+int vha_dev_get_props(struct vha_dev *vha, uint32_t onchipmem_size)
+{
+	struct vha_hw_props *props = &vha->hw_props;
+	uint64_t ip_config;
+	uint32_t locm_size_kb = 0;
+	uint32_t socm_size_kb = 0;
+	uint8_t socm_num_sb, socm_num_ba, socm_num_bg;
+	uint8_t ext_mem_bus_width;
+
+	memset(props, 0, sizeof(*props));
+
+#ifdef CONFIG_VHA_DUMMY
+	/* Note: dummy dev always reads zeroes from registers */
+	props->product_id = 0x8070605040302010ULL;
+	props->core_id = (long)HW_SERIES << (int)VHA_CR_CORE_ID_BRANCH_ID_SHIFT;
+	props->core_id += 0x010203040506ULL;   // provide a dummy core id
+	props->dummy_dev = true;
+	props->num_cnn_core_devs = VHA_NUM_CORES;
+#else
+	props->product_id = IOREAD64_CR_REGIO(PRODUCT_ID);
+	props->core_id = IOREAD64_CR_REGIO(CORE_ID);
+#endif
+	props->skip_bvnc_check = false;
+	/*
+	 * New mmu version 3 and onwards operates on 40bit physical & virtual addresses
+	 */
+	props->mmu_width = 40;
+
+	/* HW from 1.1 onwards */
+	ip_config = IOREAD64_CR_REGIO(CORE_IP_CONFIG);
+#ifdef HW_AX3
+	props->mmu_ver = VHA_CR_GETBITS(CORE_IP_CONFIG, MMU_VERSION, ip_config);
+#endif
+	/* Mirage uses MMU version 3 hardware */
+	if (!props->mmu_ver)
+		props->mmu_ver = 3;
+			;
+	/* Read num cores supported (number of WMs must be the same). */
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, CNN_SUPPORTED, ip_config)) {
+		uint64_t ip_config1 = IOREAD64_CR_REGIO(CORE_IP_CONFIG1);
+		props->num_cnn_core_devs =
+				1 + VHA_CR_GETBITS(CORE_IP_CONFIG1, NUM_CORES_MIN1, ip_config1);
+	}
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, RTM_SUPPORTED, ip_config))
+		props->supported.rtm = 1;
+#ifdef HW_AX3
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, PARITY_REGISTERS, ip_config))
+		props->supported.parity = 1;
+
+#if defined(CONFIG_VHA_DUMMY) && defined(VHA_SCF)
+	/* Force parity for pdump generation */
+	props->supported.parity = 1;
+#endif
+#endif
+
+	if ((props->num_cnn_core_devs == 0)
+		|| VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id) != HW_SERIES) {
+		dev_err(vha->dev, "%s: Wrong core configuration detected. "
+			"Expected BVNC %d.x.x.x, got %llu.x.x.x. "
+			"Maybe kernel module was built with wrong params.\n",
+			__func__, HW_SERIES,
+			VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id));
+		return -ENODEV;
+	}
+
+	dev_info(vha->dev, "%s: Product id: %#llx\n",
+			__func__, props->product_id);
+	dev_info(vha->dev, "%s: Core id: %#llx\n",
+			__func__, props->core_id);
+	dev_info(vha->dev, "%s: MMU version:%d (%dbit)\n",
+			__func__, props->mmu_ver, props->mmu_width);
+	dev_dbg(vha->dev, "%s: supported: %#x\n",
+			__func__, props->features);
+	{
+		uint64_t tmp = IOREAD64_CR_REGIO(CORE_IP_INTEGRATOR_ID);
+		dev_dbg(vha->dev, "%s: ip integrator id: %#llx\n",
+				__func__, tmp);
+		tmp = IOREAD64_CR_REGIO(CORE_IP_CHANGELIST);
+		dev_dbg(vha->dev, "%s: ip change list: %llu\n", __func__, tmp);
+	}
+
+	/* Read OCM info */
+	{
+		uint64_t ip_config1 = IOREAD64_CR_REGIO(CORE_IP_CONFIG1);
+		/* Power of 2 Look-up table */
+		uint8_t pow_2_lut[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
+
+		/* LOCM per core size */
+		locm_size_kb = VHA_CR_GETBITS(CORE_IP_CONFIG1, CORE_OCM_RAM_SIZE_4KB, ip_config1) * 4;
+		/* SOCM total size */
+		socm_size_kb = VHA_CR_GETBITS(CORE_IP_CONFIG1, SYS_OCM_RAM_SIZE_4KB, ip_config1) * 4;
+		/* SOCM number of subbanks per bank array which is stored in hw reg as Log2  */
+		socm_num_sb = pow_2_lut[VHA_CR_GETBITS(CORE_IP_CONFIG1, SYS_OCM_NUM_SUBBANKS_LOG2, ip_config1)];
+		/* SOCM number of arrays per bank group */
+		socm_num_ba = 1 + VHA_CR_GETBITS(CORE_IP_CONFIG1, SYS_OCM_NUM_BANK_ARRAYS_MIN1, ip_config1);
+		/* SOCM number of bank groups */
+		socm_num_bg = 1 + VHA_CR_GETBITS(CORE_IP_CONFIG1, SYS_OCM_NUM_BANK_GROUPS_MIN1, ip_config1);
+		/* External memory interface width which is stored in hw reg as 8 * Log2 */
+		ext_mem_bus_width = 8 * pow_2_lut[VHA_CR_GETBITS(CORE_IP_CONFIG1, EXT_MEM_BUS_WIDTH, ip_config1)];
+	}
+
+	if (locm_size_kb) {
+		props->locm_size_bytes = locm_size_kb * 1024;
+		/* User may wanted to limit local OCM ... */
+		if (onchipmem_size) {
+			if (onchipmem_size < props->locm_size_bytes) {
+				dev_warn(vha->dev, "%s:Limiting local onchip memory to %u bytes (available:%u)\n",
+						__func__, onchipmem_size, props->locm_size_bytes);
+				props->locm_size_bytes = onchipmem_size;
+			} else if (onchipmem_size > props->locm_size_bytes) {
+				dev_warn(vha->dev, "%s: User defined local onchip memory size exceeded (%u > %u))\n",
+						__func__, onchipmem_size, props->locm_size_bytes);
+			}
+		}
+	} else {
+		props->locm_size_bytes = onchipmem_size;
+	}
+
+	if (socm_size_kb) {
+		props->socm_size_bytes = socm_size_kb * 1024;
+		/* User may wanted to limit shared OCM ... */
+		if (shared_onchipmem_size) {
+			if (shared_onchipmem_size < props->socm_size_bytes) {
+				dev_warn(vha->dev, "%s:Limiting shared onchip memory to %u bytes (available:%u)\n",
+						__func__, shared_onchipmem_size, props->socm_size_bytes);
+				props->socm_size_bytes = shared_onchipmem_size;
+			} else if (shared_onchipmem_size > props->socm_size_bytes) {
+				dev_warn(vha->dev, "%s: User defined shared onchip memory size exceeded (%u > %u))\n",
+						__func__, shared_onchipmem_size, props->socm_size_bytes);
+			}
+		}
+		{
+			/* SOCM per core must be must be a multiple of socm_total_sb & ext_mem_bus_width */
+			uint16_t socm_total_sb = socm_num_sb * socm_num_ba * socm_num_bg;
+			if (socm_total_sb && ext_mem_bus_width) {
+				/* The below division will round down */
+				props->socm_core_size_bytes = props->socm_size_bytes /
+						(props->num_cnn_core_devs * socm_total_sb * ext_mem_bus_width);
+				/* Scale it back */
+				props->socm_core_size_bytes *= socm_total_sb * ext_mem_bus_width;
+			} else {
+				/* Divide by number of cores as for dummy driver */
+				props->socm_core_size_bytes = shared_onchipmem_size / props->num_cnn_core_devs;
+				dev_warn(vha->dev, "%s: Shared onchip memory size per core can't be rounded"
+						" based on SB:%d BA:%d BG:%d BW:%d!\n", __func__,
+						socm_num_sb, socm_num_ba, socm_num_bg, ext_mem_bus_width);
+			}
+		}
+	} else {
+		props->socm_size_bytes = shared_onchipmem_size;
+		/* Just divide by number of cores (dummy driver) */
+		props->socm_core_size_bytes = shared_onchipmem_size / props->num_cnn_core_devs;
+	}
+
+	dev_info(vha->dev, "%s: Total onchip memory, Local: %u [kB], Shared total: %u [kB]"
+			" per core: %u [kB]\n", __func__, props->locm_size_bytes / 1024,
+			props->socm_size_bytes / 1024, props->socm_core_size_bytes / 1024);
+
+	dev_info(vha->dev, "%s: Devices: DUMMY:%u CNN:%u\n", __func__,
+			props->dummy_dev ? props->num_cnn_core_devs : 0,
+			props->dummy_dev ? 0 : props->num_cnn_core_devs);
+
+	return 0;
+}
+
+/* prepare CRC and DEBUG data buffers */
+void vha_dbg_prepare_hwbufs(struct vha_session *session, struct vha_cmd *cmd,
+		struct vha_crc_config_regs *regs)
+{
+	struct vha_dev *vha = session->vha;
+	uint8_t mask = cmd->hw_sched_info.core_mask;
+
+	if (session->cnn_dbg.cnn_crc_buf[0] || vha->cnn_combined_crc_enable) {
+		uint8_t id;
+
+		/* Note: all buffers have the same size */
+		img_pdump_printf("-- Select cores\n");
+		IOWRITE64_CR_PDUMP((uint64_t)mask, CORE_CTRL_INDIRECT);
+
+		/* enable CRC: address + mode */
+		if (session->cnn_dbg.cnn_crc_buf[0])
+			regs->crc_control |= VHA_CR_SETBITS(OS0_CNN_CRC_CONTROL, CNN_CRC_ENABLE,
+								 session->cnn_dbg.cnn_crc_mode);
+		if (vha->cnn_combined_crc_enable)
+			regs->crc_control |= VHA_CR_SETBITS(OS0_CNN_CRC_CONTROL, COMBINED_CNN_CRC_ENABLE, 1);
+		img_pdump_printf("-- CRC_CONTROL=%llx buf 'CRC' size=%zx\n",
+				regs->crc_control,
+				session->cnn_dbg.cnn_crc_buf[0] ? session->cnn_dbg.cnn_crc_buf[0]->size : 0);
+		IOWRITE64_CR_PDUMP(regs->crc_control, OS0_CNN_CRC_CONTROL);
+		img_pdump_printf("-- CRC_MASK=%#x\n", session->cnn_dbg.cnn_crc_mask);
+		IOWRITE64_CR_PDUMP(session->cnn_dbg.cnn_crc_mask, OS0_CNN_CRC_MASK_CTRL);
+		regs->crc_mask_ctrl = session->cnn_dbg.cnn_crc_mask;
+
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (mask & (1 << id)) {
+				/* Select core to be set */
+				IOWRITE64_CR_PDUMP(VHA_CR_SETBITS(CORE_CTRL_INDIRECT, MASK, (1 << id)),
+										 CORE_CTRL_INDIRECT);
+				if (session->cnn_dbg.cnn_crc_buf[0]) {
+					struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[id];
+					IOWRITE_PDUMP_BUFADDR(session, buf, 0, VHA_CR_OS0_CNN_CRC_ADDRESS);
+					SET_BUFADDR(session, buf, 0, &regs->crc_address[id]);
+				}
+
+				if (vha->cnn_combined_crc_enable) {
+					struct vha_buffer *buf = session->cnn_dbg.cnn_combined_crc;
+					IOWRITE_PDUMP_BUFADDR(session, buf, id * VHA_COMBINED_CRC_CORE_OFFSET,
+						VHA_CR_OS0_COMBINED_CNN_CRC_ADDRESS);
+					SET_BUFADDR(session, buf, id * VHA_COMBINED_CRC_CORE_OFFSET, &regs->crc_combined_address[id]);
+				}
+			}
+		}
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
+		uint64_t val64;
+		uint8_t id;
+
+		/* Note: all buffers have the same size */
+		img_pdump_printf("-- Select cores\n");
+		IOWRITE64_CR_PDUMP((uint64_t)mask, CORE_CTRL_INDIRECT);
+
+		/* enable DEBUG: address, perf mode, band mode */
+		img_pdump_printf("-- DEBUG_CONTROL=%u,%u buf 'DBG' size=%zx\n",
+				GET_CNN_DBG_MODE(PERF, session), GET_CNN_DBG_MODE(BAND, session),
+				session->cnn_dbg.cnn_dbg_buf[0]->size);
+
+		val64 = VHA_CR_ALIGN_SETBITS(OS0_CNN_DEBUG_SIZE_LEGACY,
+						CNN_DEBUG_SIZE, session->cnn_dbg.cnn_dbg_buf[0]->size);
+		IOWRITE64_CR_PDUMP(val64, OS0_CNN_DEBUG_SIZE_LEGACY);
+
+		/* Set the CONTROL register only if requested */
+		if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
+
+			val64 = VHA_CR_SETBITS(OS0_CNN_DEBUG_CONTROL, CNN_PERF_ENABLE,
+									GET_CNN_DBG_MODE(PERF, session));
+			val64 |= VHA_CR_SETBITS(OS0_CNN_DEBUG_CONTROL, CNN_BAND_ENABLE,
+									GET_CNN_DBG_MODE(BAND, session));
+			img_pdump_printf("IF DUMP_DBG\n");
+			IOWRITE64_CR_PDUMP(val64, OS0_CNN_DEBUG_CONTROL);
+			img_pdump_printf("FI DUMP_DBG\n");
+		}
+
+		for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
+			if (mask & (1 << id)) {
+				struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[id];
+				/* Select core to be set */
+				IOWRITE64_CR_PDUMP(VHA_CR_SETBITS(CORE_CTRL_INDIRECT, MASK, (1 << id)),
+												 CORE_CTRL_INDIRECT);
+				IOWRITE_PDUMP_BUFADDR(session, buf, 0, VHA_CR_OS0_CNN_DEBUG_ADDRESS);
+			}
+		}
+	}
+
+	/* WM Performance & Bandwidth measurement */
+	if (WM_DBG_MODE_ON(PERF) || WM_DBG_MODE_ON(BAND)) {
+		uint64_t dbg_ctrl = 0;
+
+		img_pdump_printf("IF CHECK_PERF_BW\n");
+		if (WM_DBG_MODE_ON(PERF)) /* PERF */
+			dbg_ctrl = VHA_SET_FIELD_SIMPLE_VAL(WM_DEBUG_CONTROL, PERF_ENABLE, EN);
+		if (WM_DBG_MODE_ON(BAND)) { /* BW */
+			uint64_t hw_brns = cmd->user_cmd.cmd_type == VHA_CMD_CNN_SUBMIT_MULTI ?
+					((struct vha_user_cnn_submit_multi_cmd*)&cmd->user_cmd)->hw_brns :
+					((struct vha_user_cnn_submit_cmd*)&cmd->user_cmd)->hw_brns;
+
+			IOWRITE64_CR_PDUMP(VHA_CR_NOC_BWM_CONTROL_MASKFULL, NOC_BWM_CONTROL);
+			dbg_ctrl |= VHA_SET_FIELD_SIMPLE_VAL(WM_DEBUG_CONTROL, BW_ENABLE, EN);
+			if (VHA_IS_BRN(hw_brns, 71649)) {
+				img_pdump_printf("-- BRN71649_START\n");
+				IOWRITE64_CR_PDUMP(16, IDLE_HYSTERESIS_COUNT);
+				IOWRITE64_CR_PDUMP(16, PWR_MAN_HYSTERESIS);
+				img_pdump_printf("-- BRN71649_END\n");
+			}
+		}
+
+		IOWRITE64_CR_PDUMP(dbg_ctrl, WM_DEBUG_CONTROL);
+		img_pdump_printf("FI CHECK_PERF_BW\n");
+	}
+}
+
+/* flush CRC and DEBUG data buffers */
+void vha_dbg_flush_hwbufs(struct vha_session *session, char checkpoint, uint8_t mask)
+{
+	struct vha_dev *vha = session->vha;
+
+	if (session->cnn_dbg.cnn_dbg_flush != checkpoint)
+		return;
+
+	if (session->cnn_dbg.cnn_crc_buf[0] || vha->cnn_combined_crc_enable) {
+		int id;
+		/* Note: all buffers have the same size */
+		/*
+		 * TOBEDONE: calculate CRC buffer size based
+		 * on num passes, num layers, etc
+		 */
+		img_pdump_printf("-- Save signatures\n");
+		img_pdump_printf("IF SKIP_CHECK_CRCS\n");
+		img_pdump_printf("COM Not checking CRCs!\n");
+		img_pdump_printf("ELSE SKIP_CHECK_CRCS\n");
+		img_pdump_printf("COM Checking CRCs ...\n");
+		if (session->cnn_dbg.cnn_crc_buf[0]) {
+			for (id = 0; id < session->vha->hw_props.num_cnn_core_devs; id++) {
+				if (mask & (1 << id)) {
+					struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[id];
+					vha_pdump_sab_buf(session, PDUMP_CRC, buf, 0, buf->size);
+				}
+			}
+		}
+		if (vha->cnn_combined_crc_enable) {
+			struct vha_buffer *buf = session->cnn_dbg.cnn_combined_crc;
+			vha_pdump_sab_buf(session, PDUMP_CRC_CMB, buf, 0, buf->size);
+		}
+		img_pdump_printf("FI SKIP_CHECK_CRCS\n");
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
+		int id;
+
+		img_pdump_printf("-- Save DEBUG info\n");
+		img_pdump_printf("IF DUMP_DBG\n");
+		img_pdump_printf("COM Dumping debug data ...\n");
+		for (id = 0; id < session->vha->hw_props.num_cnn_core_devs; id++) {
+			if (mask & (1 << id)) {
+				struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[id];
+				vha_pdump_sab_buf(session, PDUMP_DBG, buf, 0, buf->size);
+			}
+		}
+		img_pdump_printf("ELSE DUMP_DBG\n");
+		img_pdump_printf("COM Not dumping debug data!\n");
+		img_pdump_printf("FI DUMP_DBG\n");
+	}
+}
+
+/* stop capturing CRC and DEBUG data */
+void vha_dbg_stop_hwbufs(struct vha_session *session, uint8_t mask)
+{
+	struct vha_dev *vha = session->vha;
+
+	/* Flush hw debug buffers */
+	vha_dbg_flush_hwbufs(session, 0, mask);
+
+	if (session->cnn_dbg.cnn_crc_buf[0]) {
+		img_pdump_printf("-- Select cores\n");
+		IOWRITE64_CR_PDUMP((uint64_t)mask, CORE_CTRL_INDIRECT);
+		IOWRITE64_CR_PDUMP(0, OS0_CNN_CRC_CONTROL);
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0]) {
+		uint64_t size = 0;
+		int id;
+
+		for (id = 0; id < session->vha->hw_props.num_cnn_core_devs; id++) {
+			uint64_t val;
+			if (mask & (1 << id)) {
+				val = IOREAD64_CR_REGIO(OS0_CNN_DEBUG_STATUS);
+				if (val > size)
+					size = val;
+			}
+		}
+
+		if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
+			img_pdump_printf("IF DUMP_DBG\n");
+			img_pdump_printf("-- Select cores\n");
+			IOWRITE64_CR_PDUMP((uint64_t)mask, CORE_CTRL_INDIRECT);
+			IOWRITE64_CR_PDUMP(0, OS0_CNN_DEBUG_CONTROL);
+			/* just give a hint in the pdump:
+			 * dummy device returns 0 */
+			img_pdump_printf(
+				"-- POL64 :REG:%#x 0 0 0 1 1 -- DEBUG_STATUS=%llx\n",
+				VHA_CR_OS0_CNN_DEBUG_STATUS,
+				size);
+			img_pdump_printf("FI DUMP_DBG\n");
+		}
+	}
+}
+
+uint64_t vha_dbg_rtm_read(struct vha_dev *vha, uint64_t addr)
+{
+	
+	return 0ULL;
+}
+
+const struct vha_reg vha_regs[] = {
+#define REG_DESC(reg) VHA_CR_##reg, VHA_CR_##reg##_MASKFULL
+	{"product_id           ", REG_DESC(PRODUCT_ID)},
+	{"core_id              ", REG_DESC(CORE_ID)},
+	{"integrator_id        ", REG_DESC(CORE_IP_INTEGRATOR_ID)},
+	{"ip_changelist        ", REG_DESC(CORE_IP_CHANGELIST)},
+	{"core_ip_config       ", REG_DESC(CORE_IP_CONFIG)},
+#undef REG_DESC
+	{NULL                   , 0},
+};
+
+#ifdef VHA_SCF
+void wd_timer_callback(struct work_struct *work)
+{
+	struct vha_dev *vha =
+			container_of(work, struct vha_dev, swd_dwork.work);
+	struct vha_cmd *cmd = NULL;
+	unsigned int wm_id;
+
+	mutex_lock(&vha->lock);
+
+	for (wm_id = 0; wm_id < vha->hw_props.num_cnn_core_devs; wm_id++) {
+		cmd = vha->pendcmd[wm_id].cmd;
+		if (cmd) {
+			uint8_t core_mask = vha_wm_get_cores(vha, wm_id);
+			uint8_t layer_count;
+			uint8_t pass_count;
+			bool lockup = false;
+			uint64_t exec_time_us;
+			uint64_t cmd_time_us;
+			struct TIMESPEC now;
+
+			GETNSTIMEOFDAY(&now);
+
+			if (cmd->user_cmd.flags & VHA_EXEC_TIME_SET) {
+				struct vha_user_cnn_submit_multi_cmd *cnn_user_cmd =
+					(struct vha_user_cnn_submit_multi_cmd *)&cmd->user_cmd;
+				cmd_time_us = cnn_user_cmd->exec_time;
+			} else if (vha->swd_timeout_default)
+				cmd_time_us = vha->swd_timeout_default;
+			else //SW WDT disabled for this cmd
+				continue;
+
+			cmd_time_us *= vha->swd_timeout_m0;
+
+			if (get_timespan_us(&cmd->hw_proc_start, &now, &exec_time_us)) {
+				uint64_t expected_exec_time = do_div(cmd_time_us, 100) + vha->swd_timeout_m1;
+
+				if (exec_time_us > expected_exec_time) {
+					lockup = true;
+					dev_err(vha->dev, "SW WDT lockup detected\n"
+									  "    measured time: %llu\n"
+									  "    cmd time: %llu\n"
+									  "    cmd expected_exec_time: %llu\n",
+									  exec_time_us, cmd_time_us, expected_exec_time);
+				}
+			}
+
+			while (core_mask != 0 && !lockup) {
+				uint32_t core_id = VHA_CORE_MASK_TO_ID(core_mask);
+				uint64_t cnn_status;
+				uint64_t cnn_status2;
+
+				core_mask &= ~(VHA_CORE_ID_TO_MASK(core_id));
+
+				IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT, MASK, (1 << core_id)),
+										 CORE_CTRL_INDIRECT);
+
+				cnn_status = IOREAD64_CR_REGIO(OS0_CNN_STATUS);
+				cnn_status2 = IOREAD64_CR_REGIO(OS0_CNN_STATUS2);
+
+				layer_count = VHA_CR_GETBITS_OS(CNN_STATUS, LAYER_COUNT, cnn_status);
+				pass_count = VHA_CR_GETBITS_OS(CNN_STATUS2, PASS_COUNT, cnn_status2);
+
+				if (cmd->layer_count[core_id] == layer_count &&
+					cmd->pass_count[core_id] == pass_count) {
+					lockup = true;
+					dev_err(vha->dev, "SW WDT lockup detected\n"
+									  "    layer_count:  %d\n"
+									  "    pass_count: %d\n", layer_count, pass_count);
+				}
+
+				cmd->layer_count[core_id] = layer_count;
+				cmd->pass_count[core_id] = pass_count;
+			}
+
+			if (lockup) {
+				if (vha_observers.error)
+					vha_observers.error(vha->id, cmd->session->id, cmd->user_cmd.cmd_id,  -EIO);
+
+				/* Update stats. */
+				vha->stats.total_failures++;
+				vha->stats.cnn_kicks_completed++;
+				VHA_INC_WL_STAT(vha, kicks_completed, cmd);
+				vha_wm_reset(vha, &cmd->hw_sched_info);
+				/* Free command resources. */
+				vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+				vha_dev_free_cmd_res(vha, cmd, true);
+
+				/* Move command queue. */
+				vha_do_queued_cmd(vha, wm_id);
+				/* Handle actual command */
+				vha_handle_cmd(vha, wm_id,  -EIO, -EIO, VHA_RSP_ERROR(SW_WDT_EXPIRED));
+			}
+		}
+	}
+
+	if (vha->state == VHA_STATE_ON)
+		schedule_delayed_work(&vha->swd_dwork, msecs_to_jiffies(vha->swd_period));
+
+	mutex_unlock(&vha->lock);
+}
+#endif

+ 261 - 0
driver/vha/multi/vha_mmu.c

@@ -0,0 +1,261 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include <uapi/vha.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include <vha_regs.h>
+
+int vha_mmu_flush_ctx(struct vha_dev *vha, int ctx_id)
+{
+	int ret;
+	uint64_t inval =
+			VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL, PC, EN) |
+			VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL, PD, EN) |
+			VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL, PT, EN);
+	uint64_t pend = VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL_STATUS, PENDING, EN);
+
+	/* No need to handle mmu cache, when core is already offline */
+	if (vha->state == VHA_STATE_OFF)
+		return 0;
+
+#ifdef VHA_SCF
+	if (vha->hw_props.supported.parity && !vha->parity_disable) {
+		/* If pending bit is set then parity bit must be set as well ! */
+		pend |= VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL_STATUS, PARITY, EN);
+	}
+#endif
+	ret = IOPOLL64_CR_PDUMP_PARITY(0, 30, 150, pend, OS0_MMU_CTRL_INVAL_STATUS);
+	if (ret) {
+		dev_err(vha->dev, "Error during MMU ctx %d flush\n", ctx_id);
+	} else {
+		if (unlikely(ctx_id == VHA_INVALID_ID))
+			inval |= VHA_SET_FIELD_SIMPLE_VAL(OS0_MMU_CTRL_INVAL, ALL_CONTEXTS, EN);
+		else {
+			inval |= VHA_CR_SETBITS(OS0_MMU_CTRL_INVAL, CONTEXT, (uint64_t)ctx_id);
+		}
+		inval |= VHA_CR_SETBITS(OS0_MMU_CTRL_INVAL, CONTEXT, (uint64_t)ctx_id);
+		dev_dbg(vha->dev, "%s: ctx_id:%d (0x%llx)\n", __func__, ctx_id, inval);
+
+		img_pdump_printf("-- MMU invalidate TLB caches\n");
+		IOWRITE64_CR_PDUMP(inval, OS0_MMU_CTRL_INVAL);
+	}
+
+	return ret;
+}
+
+/* this function is called from img_mmu, to handle cache issues */
+int vha_mmu_callback(enum img_mmu_callback_type callback_type,
+			int buf_id, void *data)
+{
+	struct vha_session *session = data;
+	struct vha_dev *vha = session->vha;
+	int ctx_id;
+	int ret = 0;
+
+	if (!vha)
+		return 0;
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
+		ret |= vha_mmu_flush_ctx(vha, session->mmu_ctxs[ctx_id].hw_id);
+
+	if (ret) {
+		dev_err(vha->dev, "Error during MMU flush (2), resetting the HW...\n");
+		/* Rollback commands being processed */
+		vha_rollback_cmds(vha);
+		/* Perform full reset */
+		vha_dev_stop(vha, true);
+		/* Reschedule */
+		vha_chk_cmd_queues(vha, true);
+	}
+
+	return ret;
+}
+
+static void do_mmu_ctx_setup(struct vha_dev *vha,
+			uint8_t hw_id, int pc_bufid, uint32_t pc_baddr)
+{
+	img_pdump_printf("-- Setup MMU context:%d\n", hw_id);
+	IOWRITE64_CR_PDUMP(hw_id, OS0_MMU_CBASE_MAPPING_CONTEXT);
+
+	if (!vha->mmu_base_pf_test) {
+		IOWRITE64(vha->reg_base, VHA_CR_OS0_MMU_CBASE_MAPPING, pc_baddr);
+
+		/* This is physical address so we need use MEM_OS0:BLOCK tag
+		 * when pdump'ing. */
+		img_pdump_printf("-- Setup MMU base address\n"
+				"WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:0 -- 'PC'\n"
+				"SHR "_PMEM_":$0 "_PMEM_":$0 %d\n"
+				"WRW64 :REG:%#x "_PMEM_":$0\n", pc_bufid,
+				IMG_MMU_PC_ADDR_SHIFT,
+				VHA_CR_OS0_MMU_CBASE_MAPPING);
+		dev_dbg(vha->dev, "%s: setting hardware ctx id:%u\n", __func__, hw_id);
+	} else
+		dev_info(vha->dev, "Bringup test: force MMU base page fault\n");
+}
+
+int vha_mmu_setup(struct vha_session *session)
+{
+	struct vha_dev *vha = session->vha;
+	int ctx_id;
+	int ret = 0;
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
+		dev_dbg(vha->dev,
+				"%s: mode:%d session ctxid:%x active ctxid:%x\n",
+				__func__, vha->mmu_mode,
+				session->mmu_ctxs[ctx_id].id,
+				vha->active_mmu_ctx);
+
+	if (vha->mmu_mode == VHA_MMU_DISABLED) {
+		img_pdump_printf("-- MMU bypass ON\n");
+		IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
+			VHA_CR_OS(MMU_CTRL));
+		return 0;
+	}
+
+	/* Using model context to track active context */
+	if (session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id == vha->active_mmu_ctx)
+		return 0;
+
+	img_pdump_printf("-- MMU_SETUP_BEGIN\n");
+	img_pdump_printf("-- MMU bypass OFF\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(MMU_CTRL));
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
+		do_mmu_ctx_setup(vha, session->mmu_ctxs[ctx_id].hw_id,
+				session->mmu_ctxs[ctx_id].pc_bufid,
+				session->mmu_ctxs[ctx_id].pc_baddr);
+
+		/* If there are multiple sessions using the same mmu hardware context
+		 * we need to flush caches for the old context (id is the same).
+		 * This will happen when number of processes is > VHA_MMU_MAX_HW_CTXS */
+		if (vha->mmu_ctxs[session->mmu_ctxs[ctx_id].hw_id] > 1) {
+			dev_dbg(vha->dev, "%s: flushing shared ctx id:%u\n",
+						__func__, session->mmu_ctxs[ctx_id].hw_id);
+			ret = vha_mmu_flush_ctx(vha, session->mmu_ctxs[ctx_id].hw_id);
+			if (ret) {
+				dev_err(vha->dev, "Error during MMU flush, resetting the HW...\n");
+				goto mmu_setup_err;
+			}
+		}
+	}
+
+	/* Using model context to track context change */
+	vha->active_mmu_ctx = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id;
+	dev_dbg(vha->dev, "%s: update ctx id active:%x pc:%#x\n",
+			__func__, vha->active_mmu_ctx,
+			session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_baddr <<
+			VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT);
+mmu_setup_err:
+	img_pdump_printf("-- MMU_SETUP_END\n");
+	return ret;
+}
+
+void vha_mmu_status(struct vha_dev *vha, uint8_t core_mask)
+{
+	const char levels[][5] = {"PT", "PD", "PC", "BASE"};
+	uint32_t core_mmu_fault_reg_set_base = VHA_CR_CORE0_MMU_FAULT_STATUS1;
+	uint32_t core_mmu_fault_reg_set_offset =
+			VHA_CR_CORE1_MMU_FAULT_STATUS1 - VHA_CR_CORE0_MMU_FAULT_STATUS1;
+	uint32_t core_mmu_fault_status1_offset =
+			VHA_CR_CORE0_MMU_FAULT_STATUS1 - core_mmu_fault_reg_set_base;
+	uint32_t core_mmu_fault_status2_offset =
+			VHA_CR_CORE0_MMU_FAULT_STATUS2 - core_mmu_fault_reg_set_base;
+
+	while (core_mask) {
+		uint8_t id = ffs(core_mask) - 1;
+		uint64_t status1 = IOREAD64_REGIO(core_mmu_fault_reg_set_base +
+										id* core_mmu_fault_reg_set_offset +
+										core_mmu_fault_status1_offset);
+		uint64_t status2 = IOREAD64_REGIO(core_mmu_fault_reg_set_base +
+										id* core_mmu_fault_reg_set_offset +
+										core_mmu_fault_status2_offset);
+
+#define MMU_FAULT_GETBITS(sreg, field, val) \
+	_get_bits(val, VHA_CR_CORE0_MMU_FAULT_ ## sreg ## _ ## field ## _SHIFT, \
+			~VHA_CR_CORE0_MMU_FAULT_ ## sreg ## _ ## field ## _CLRMSK)
+
+		uint64_t addr  = MMU_FAULT_GETBITS(STATUS1, ADDRESS, status1);
+		uint8_t level  = MMU_FAULT_GETBITS(STATUS1, LEVEL, status1);
+		uint8_t req_id = MMU_FAULT_GETBITS(STATUS1, REQ_ID, status1);
+		uint8_t ctx    = MMU_FAULT_GETBITS(STATUS1, CONTEXT, status1);
+		uint8_t rnw    = MMU_FAULT_GETBITS(STATUS1, RNW, status1);
+		uint8_t type   = MMU_FAULT_GETBITS(STATUS1, TYPE, status1);
+		uint8_t fault  = MMU_FAULT_GETBITS(STATUS1, FAULT, status1);
+
+		uint8_t bif_id    = MMU_FAULT_GETBITS(STATUS2, BIF_ID, status2);
+		uint8_t tlb_entry = MMU_FAULT_GETBITS(STATUS2, TLB_ENTRY, status2);
+		uint8_t slc_bank  = MMU_FAULT_GETBITS(STATUS2, BANK, status2);
+		uint64_t mapping  = 0;
+
+#undef MMU_FAULT_GETBITS
+
+		/* Select context and read current pc */
+		IOWRITE64_CR_REGIO(ctx, OS0_MMU_CBASE_MAPPING_CONTEXT);
+		mapping = IOREAD64_CR_REGIO(OS0_MMU_CBASE_MAPPING);
+
+		/* false alarm ? */
+		if (!fault)
+			return;
+
+		dev_dbg(vha->dev, "%s: Core%u MMU FAULT: s1:%llx s2:%llx\n",
+				__func__, id, status1, status2);
+
+		dev_warn(vha->dev, "%s: MMU fault while %s @ 0x%llx\n",
+				__func__, (rnw) ? "reading" : "writing", addr << 4);
+		dev_warn(vha->dev, "%s: level:%s Requestor:%x Context:%x Type:%s\n",
+				__func__, levels[level], req_id, ctx,
+				(type == 0) ? "VALID" :
+				(type == 2) ? "READ-ONLY" :
+				"UNKNOWN");
+		dev_warn(vha->dev, "%s: bif_id:%x tlb_entry:%x slc_bank:%x\n",
+				__func__, bif_id, tlb_entry, slc_bank);
+		dev_warn(vha->dev, "%s: current mapping@context%d:%#llx\n",
+				__func__, ctx,
+				mapping <<
+				VHA_CR_OS0_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT);
+
+		core_mask &= ~(VHA_CORE_ID_TO_MASK(id));
+	}
+}

+ 229 - 0
driver/vha/multi/vha_mt19937.c

@@ -0,0 +1,229 @@
+/*
+	 A C-program for MT19937, with initialization improved 2002/1/26.
+	 Coded by Takuji Nishimura and Makoto Matsumoto.
+
+	 Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+	 All rights reserved.
+	 Copyright (C) 2005, Mutsuo Saito,
+	 All rights reserved.
+
+	 Redistribution and use in source and binary forms, with or without
+	 modification, are permitted provided that the following conditions
+	 are met:
+
+		 1. Redistributions of source code must retain the above copyright
+				notice, this list of conditions and the following disclaimer.
+
+		 2. Redistributions in binary form must reproduce the above copyright
+				notice, this list of conditions and the following disclaimer in the
+				documentation and/or other materials provided with the distribution.
+
+		 3. The names of its contributors may not be used to endorse or promote
+				products derived from this software without specific prior written
+				permission.
+
+	 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+	 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+	 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+	 A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+	 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+	 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+	 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+	 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+	 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+	 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+	 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+	 Any feedback is very welcome.
+	 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
+	 email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
+
+	 Original code available at:
+	 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/emt19937ar.html
+*/
+
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vha_mt19937.h"
+
+/* Period parameters */
+#define N 624
+#define M 397
+#define MATRIX_A 0x9908b0dfUL   /* constant vector a */
+#define UPPER_MASK 0x80000000UL /* most significant w-r bits */
+#define LOWER_MASK 0x7fffffffUL /* least significant r bits */
+
+struct vha_mt19937_ctx
+{
+	uint32_t mt[N];    /*!< The array for the state vector.    */
+	int32_t  mti;      /*!<                                    */
+	uint32_t mag01[2]; /*!< mag01[x] = x * MATRIX_A  for x=0,1 */
+};
+
+int vha_mt19937_init(uint32_t seed, void **handle)
+{
+	struct vha_mt19937_ctx *ctx;
+
+	/* Check input params. */
+	if (NULL == handle)	{
+		pr_err("%s: invalid handle\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Allocate SFMT context. */
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx == NULL) {
+		pr_err("%s: failed to allocate context\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Initialise SFMT context with the seed. */
+	ctx->mt[0]= seed & 0xffffffffUL;
+	for (ctx->mti = 1; ctx->mti < N; ctx->mti++) {
+		ctx->mt[ctx->mti] =
+			(1812433253UL * (ctx->mt[ctx->mti - 1] ^ (ctx->mt[ctx->mti - 1] >> 30)) + ctx->mti);
+		/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
+		/* In the previous versions, MSBs of the seed affect   */
+		/* only MSBs of the array mt[].                        */
+		/* 2002/01/09 modified by Makoto Matsumoto             */
+		ctx->mt[ctx->mti] &= 0xffffffffUL;
+		/* for >32 bit machines */
+	}
+
+	ctx->mag01[0] = 0x0UL;
+	ctx->mag01[1] = MATRIX_A;
+
+	*handle = ctx;
+
+	return 0;
+}
+
+int vha_mt19937_deinit(void *handle)
+{
+	struct vha_mt19937_ctx *ctx = (struct vha_mt19937_ctx*)handle;
+
+	/* Check input params. */
+	if (NULL == handle)	{
+		pr_err("%s: invalid handle\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Free the SFMT context. */
+	kfree(ctx);
+
+	return 0;
+}
+
+int vha_mt19937_gen_uint32(void *handle, uint32_t *rand_val)
+{
+	struct vha_mt19937_ctx *ctx = (struct vha_mt19937_ctx*)handle;
+	uint32_t y;
+
+	/* Check input params. */
+	if (NULL == handle) {
+		pr_err("%s: invalid handle\n", __func__);
+		return -EINVAL;
+	}
+	if (NULL == rand_val) {
+		pr_err("%s: invalid rand_val\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Generate N words at one time. */
+	if (ctx->mti >= N) {
+		int kk;
+
+		for (kk = 0; kk < (N - M); kk++) {
+			y = (ctx->mt[kk] & UPPER_MASK) | (ctx->mt[kk + 1] & LOWER_MASK);
+			ctx->mt[kk] = ctx->mt[kk + M] ^ (y >> 1) ^ ctx->mag01[y & 0x1UL];
+		}
+		for (; kk < (N - 1); kk++) {
+			y = (ctx->mt[kk] & UPPER_MASK) | (ctx->mt[kk + 1] & LOWER_MASK);
+			ctx->mt[kk] = ctx->mt[kk + (M - N)] ^ (y >> 1) ^ ctx->mag01[y & 0x1UL];
+		}
+		y = (ctx->mt[N - 1] & UPPER_MASK) | (ctx->mt[0] & LOWER_MASK);
+		ctx->mt[N - 1] = ctx->mt[M - 1] ^ (y >> 1) ^ ctx->mag01[y & 0x1UL];
+
+		ctx->mti = 0;
+	}
+
+	y = ctx->mt[ctx->mti++];
+
+	/* Tempering */
+	y ^= (y >> 11);
+	y ^= (y << 7) & 0x9d2c5680UL;
+	y ^= (y << 15) & 0xefc60000UL;
+	y ^= (y >> 18);
+
+	*rand_val = y;
+
+	return 0;
+}
+
+int vha_mt19937_gen_range(void *handle, uint32_t min, uint32_t max, uint32_t *rand_val)
+{
+	int ret;
+	uint32_t range_width;
+
+	/* Generate 32-bit random value. */
+	ret = vha_mt19937_gen_uint32(handle, rand_val);
+	if (ret != 0)
+		return ret;
+
+	/* Calculate the range. */
+	range_width = abs(max - min);
+	if (min > max)
+	{
+		min = max;
+	}
+
+	/* Calculate the random value within range. */
+	*rand_val = (range_width > 0) ? ((*rand_val % (range_width + 1)) + min) : min;
+
+	return 0;
+}
+
+

+ 93 - 0
driver/vha/multi/vha_mt19937.h

@@ -0,0 +1,93 @@
+/*
+   A C-program for MT19937, with initialization improved 2002/1/26.
+   Coded by Takuji Nishimura and Makoto Matsumoto.
+
+   Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+   All rights reserved.
+   Copyright (C) 2005, Mutsuo Saito
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+     1. Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+
+     2. Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+     3. The names of its contributors may not be used to endorse or promote
+        products derived from this software without specific prior written
+        permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+   CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+   EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+   PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+   PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+   LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+   Any feedback is very welcome.
+   http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
+   email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
+
+   Original code available at:
+   http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/emt19937ar.html
+*/
+
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/types.h>
+
+int vha_mt19937_init(uint32_t seed, void **handle);
+int vha_mt19937_deinit(void *handle);
+int vha_mt19937_gen_uint32(void *handle, uint32_t *rand_val);
+int vha_mt19937_gen_range(void *handle, uint32_t min, uint32_t max, uint32_t *rand_val);
+

+ 391 - 0
driver/vha/multi/vha_regs.h

@@ -0,0 +1,391 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#ifndef VHA_REGS_H
+#define VHA_REGS_H
+
+#include "../vha_io.h"
+
+#if defined(HW_AX3)
+#include <hwdefs/vha_cr_magna.h>
+#else
+#error "No HW layout defined"
+#endif
+
+#if defined(CFG_SYS_MAGNA)
+#include <hwdefs/magna_system.h>
+#endif
+
+#define HW_SERIES (28U)
+
+/* General register macros */
+#define VHA_GET_FIELD_FULL_MASK(reg, field) \
+		(VHA_CR_BITMASK(reg, field) >> VHA_CR_##reg##_##field##_SHIFT)
+#define VHA_SET_FIELD_SIMPLE_VAL(reg, field, type) \
+		(VHA_CR_##reg##_##field##_##type)
+#define VHA_SET_FIELD_SIMPLE_FULL(reg, field) \
+		VHA_CR_BITMASK(reg, field)
+
+#define VHA_WM_ID_TO_MASK(i)   (1 << i)
+#define VHA_CORE_ID_TO_MASK(i) (1 << i)
+#define VHA_WM_MASK_TO_ID(m) ({	\
+		uint8_t _ret_ = m;		\
+		WARN_ON(!_ret_);	\
+		do {		\
+			if (!_ret_) break;	\
+			_ret_ = ffs(_ret_) - 1; \
+		} while (0);	\
+		_ret_;	\
+	})
+#define VHA_CORE_MASK_TO_ID(m) VHA_WM_MASK_TO_ID(m)
+#define VHA_CORE_MASK_TO_NUM(m) ({	\
+		uint8_t _ret_ = m;		\
+		WARN_ON(!_ret_);	\
+		do {		\
+			if (!_ret_) break;	\
+			_ret_ = _ret_ - ((_ret_ >> 1) & 0x55);	\
+			_ret_ = (_ret_ & 0x33) + ((_ret_ >> 2) & 0x33);	\
+			_ret_ = (((_ret_ + (_ret_ >> 4)) & 0x0F) * 0x01);	\
+		} while (0);	\
+		_ret_;	\
+	})
+
+#if defined(CONFIG_VHA_DUMMY)
+#define VHA_LOCK_WM()
+#define VHA_UNLOCK_WM()
+#else
+#define VHA_LOCK_WM() spin_lock_irq(&vha->irq_lock)
+#define VHA_UNLOCK_WM() spin_unlock(&vha->irq_lock)
+#endif
+
+#define VHA_SELECT_WM(wm) ({	\
+		uint64_t reg = 0;\
+		uint8_t c = 10;\
+		IOWRITE64_CR_PDUMP(VHA_CR_SETBITS(TLC_WM_INDIRECT, ADDRESS, (uint64_t)wm), \
+											 TLC_WM_INDIRECT);	\
+		do {	\
+			reg = IOREAD64_CR_REGIO(TLC_WM_INDIRECT); \
+			c--;\
+		} while ((reg != wm) && c > 0);	\
+		WARN_ON(c == 0);	\
+	})
+
+/* Clock calibration defines. */
+#define VHA_CALIBRATION_WM_ID     0
+#define VHA_CALIBRATION_CORE_ID   0
+#define VHA_CALIBRATION_CORE_MASK (1 << VHA_CALIBRATION_CORE_ID)
+
+/* Event macro definitions */
+#define VHA_SYS_EVENT_TYPE(name) \
+		VHA_CR_BITMASK(SYS_EVENT_TYPE, name)
+#define VHA_WM_EVENT_TYPE(name) \
+		VHA_CR_BITMASK(WM_EVENT_TYPE, name)
+#define VHA_CORE_EVENT_TYPE(name) \
+		VHA_CR_BITMASK(CORE_EVENT_TYPE, name)
+#define VHA_IC_EVENT_TYPE(name) \
+		VHA_CR_BITMASK(INTERCONNECT_EVENT_TYPE, name)
+
+#define VHA_SYS_EVENTS ( \
+		VHA_SYS_EVENT_TYPE(RAM_INIT_DONE    ) | \
+		VHA_SYS_EVENT_TYPE(MEMBUS_RESET_DONE))
+#ifdef VHA_SCF
+#define VHA_SYS_SCF_ERR_EVENTS ( \
+		VHA_SYS_EVENT_TYPE(LOGIC_ERROR  ) | \
+		VHA_SYS_EVENT_TYPE(RAM_CORRECTION  ) | \
+		VHA_SYS_EVENT_TYPE(RAM_DETECTION   ) | \
+		VHA_SYS_EVENT_TYPE(MMU_PARITY_ERROR) | \
+		VHA_SYS_EVENT_TYPE(AXI_MEMORY_PARITY_ERROR))
+
+#else
+#define VHA_SYS_SCF_ERR_EVENTS (0)
+#endif
+#define VHA_SYS_ERR_EVENTS ( \
+		VHA_SYS_SCF_ERR_EVENTS | \
+		VHA_SYS_EVENT_TYPE(LSYNC_INV_REQ   ) | \
+		VHA_SYS_EVENT_TYPE(SYS_MEM_WDT     ) | \
+		VHA_SYS_EVENT_TYPE(MMU_PAGE_FAULT  ) | \
+		VHA_SYS_EVENT_TYPE(AXI_ERROR       ))
+#define VHA_SYS_EVENTS_DEFAULT ( \
+		VHA_SYS_EVENTS | \
+		VHA_SYS_ERR_EVENTS)
+
+#define VHA_WM_EVENTS ( \
+		VHA_WM_EVENT_TYPE(RESPONSE_FIFO_READY))
+#ifdef VHA_SCF
+#define VHA_WM_SCF_ERR_EVENTS ( \
+		VHA_WM_EVENT_TYPE(LOGIC_FAULT   ))
+#else
+#define VHA_WM_SCF_ERR_EVENTS (0)
+#endif
+#define VHA_WM_ERR_EVENTS ( \
+		VHA_WM_SCF_ERR_EVENTS | \
+		VHA_WM_EVENT_TYPE(WM_WL_WDT     ) | \
+		VHA_WM_EVENT_TYPE(WM_WL_IDLE_WDT) | \
+		VHA_WM_EVENT_TYPE(WM_SOCIF_WDT  ))
+#define VHA_WM_EVENTS_DEFAULT ( \
+		VHA_WM_EVENTS | \
+		VHA_WM_ERR_EVENTS)
+
+#define VHA_CORE_EVENTS ( \
+		VHA_CORE_EVENT_TYPE(CNN_COMPLETE     ))
+#ifdef VHA_SCF
+#define VHA_CORE_SCF_ERR_EVENTS ( \
+		VHA_CORE_EVENT_TYPE(RAM_CORRECTION  ) | \
+		VHA_CORE_EVENT_TYPE(RAM_DETECTION   ) | \
+		VHA_CORE_EVENT_TYPE(LOGIC_ERROR     ))
+#else
+#define VHA_CORE_SCF_ERR_EVENTS (0)
+#endif
+#define VHA_CORE_ERR_EVENTS ( \
+		VHA_CORE_SCF_ERR_EVENTS | \
+		VHA_CORE_EVENT_TYPE(CNN_ERROR        ) | \
+		VHA_CORE_EVENT_TYPE(CORE_SYNC_ERROR ) | \
+		VHA_CORE_EVENT_TYPE(CORE_WDT     ) | \
+		VHA_CORE_EVENT_TYPE(CORE_MEM_WDT    ))
+#define VHA_CORE_EVENTS_DEFAULT ( \
+		VHA_CORE_EVENTS | \
+		VHA_CORE_ERR_EVENTS)
+
+#define VHA_IC_EVENTS (0)
+#ifdef VHA_SCF
+#define VHA_IC_SCF_ERR_EVENTS ( \
+		VHA_IC_EVENT_TYPE(LOGIC_ERROR     ))
+#else
+#define VHA_IC_SCF_ERR_EVENTS (0)
+#endif
+#define VHA_IC_ERR_EVENTS ( \
+		VHA_IC_SCF_ERR_EVENTS | \
+		VHA_IC_EVENT_TYPE(LOCKSTEP_ERROR) | \
+		VHA_IC_EVENT_TYPE(SOCIF_READ_MISMATCH) | \
+		VHA_IC_EVENT_TYPE(SOCIF_READ_UNRESPONSIVE))
+#define VHA_IC_EVENTS_DEFAULT ( \
+		VHA_IC_EVENTS | \
+		VHA_IC_ERR_EVENTS)
+
+/* Clock macro definitions */
+#define VHA_CLOCKS_MULTI_ALL 0xff
+#define VHA_SPREAD_MASK(m) \
+		(((m * 0x0101010101010101ULL & 0x8040201008040201ULL) * \
+				0x0102040810204081ULL >> 49) & 0x5555)
+#define VHA_SET_CLOCKS(mask, mode) \
+		(VHA_SPREAD_MASK(mask) * VHA_CR_SYS_CLK_CTRL0_MODE_##mode)
+
+/* As REGBANK is a NOTOFF register, define its OFF to be AUTO by default. */
+#define VHA_CR_SYS_CLK_CTRL0_REGBANK_OFF \
+		VHA_CR_SYS_CLK_CTRL0_REGBANK_AUTO
+
+#define VHA_SYS_CLOCK_MODE(name, mode) \
+		VHA_CR_SYS_CLK_CTRL0_##name##_##mode
+
+#define VHA_SYS_CLOCK_MODE_MULTI(name, mode, mask) \
+		(VHA_SET_CLOCKS(mask, mode) << VHA_CR_SYS_CLK_CTRL0_##name##0_SHIFT)
+
+#define VHA_SYS_CLOCK_MODE_MULTI_ALL(name, mode) \
+		(VHA_SET_CLOCKS(VHA_CLOCKS_MULTI_ALL, mode) << \
+				VHA_CR_SYS_CLK_CTRL0_##name##0_SHIFT)
+
+#define VHA_SYS_CLOCKS_DEFAULT(mode) ( (\
+			VHA_SYS_CLOCK_MODE(REGBANK,        mode) | \
+			VHA_SYS_CLOCK_MODE(SOCM,           mode) | \
+			VHA_SYS_CLOCK_MODE(LSYNC,          mode) | \
+			VHA_SYS_CLOCK_MODE(SLC,            mode) | \
+			VHA_SYS_CLOCK_MODE(AXI,            mode) | \
+			VHA_SYS_CLOCK_MODE(INTERCONNECT,   mode) | \
+			VHA_SYS_CLOCK_MODE_MULTI_ALL(WM,   mode) | \
+			VHA_SYS_CLOCK_MODE_MULTI_ALL(NOC,  mode) | \
+			VHA_SYS_CLOCK_MODE_MULTI_ALL(CORE, mode)   \
+			) & VHA_CR_SYS_CLK_CTRL0_MASKFULL)
+
+#define VHA_SYS_CLOCKS_RESET(mode) ( (\
+			VHA_SYS_CLOCK_MODE(REGBANK,        mode) | \
+			VHA_SYS_CLOCK_MODE(SOCM,           mode) | \
+			VHA_SYS_CLOCK_MODE(LSYNC,          mode) | \
+			VHA_SYS_CLOCK_MODE(SLC,            mode) | \
+			VHA_SYS_CLOCK_MODE(AXI,            mode) | \
+			VHA_SYS_CLOCK_MODE(INTERCONNECT,   mode) | \
+			VHA_SYS_CLOCK_MODE_MULTI_ALL(WM,   mode) | \
+			VHA_SYS_CLOCK_MODE_MULTI_ALL(NOC,  mode)   \
+			) & VHA_CR_SYS_CLK_CTRL0_MASKFULL)
+
+#define VHA_SYS_CLOCKS_CORE_FULL_MASK ( \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE7_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE6_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE5_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE4_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE3_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE2_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE1_CLRMSK) | \
+			~(VHA_CR_SYS_CLK_CTRL0_CORE0_CLRMSK))
+
+#define VHA_MAIN_CLOCK_MODE(name, mode) \
+		VHA_CR_CLK_CTRL0_##name##_##mode \
+
+#define VHA_MAIN_CLOCKS_DEFAULT(mode) ( (\
+			VHA_MAIN_CLOCK_MODE(CNN_CORE_XBAR, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_MMM,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_EWO,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_PACK,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_OIN,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_POOL,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_SB,        mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_XBAR,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_NORM,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACT,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACCUM,     mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CNV,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CBUF,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_IBUF,      mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CMD,       mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN,           mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_TRS_A,     mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_TRS_B,     mode) | \
+			VHA_MAIN_CLOCK_MODE(MEMBUS_RESET,  mode) | \
+			VHA_MAIN_CLOCK_MODE(BWM,           mode) | \
+			VHA_MAIN_CLOCK_MODE(LOCM,          mode) | \
+			VHA_MAIN_CLOCK_MODE(NOC,           mode) | \
+			VHA_MAIN_CLOCK_MODE(ARB,           mode) | \
+			VHA_MAIN_CLOCK_MODE(BIF,           mode) \
+			) & VHA_CR_CLK_CTRL0_MASKFULL)
+
+/* Response status macro definitions. */
+#define VHA_WM_RESPONSE_STATUS(name) \
+		VHA_CR_BITMASK(WM_RESPONSE_FIFO_WL_STATUS, name)
+
+#define VHA_WM_RESPONSE_SUCCESS ( \
+		VHA_WM_RESPONSE_STATUS(SUCCESS))
+
+#define VHA_WM_RESPONSE_ERROR_CODE(name) \
+		(VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_ERROR_CODE_##name)
+
+#define VHA_WM_RESPONSE_GET_ERROR_CODE(s) \
+		VHA_CR_GETBITS(WM_RESPONSE_FIFO_WL_STATUS, ERROR_CODE, s)
+
+#define VHA_WM_RESPONSE_GET_FAILED_CORE_IDX(s) \
+		VHA_CR_GETBITS(WM_RESPONSE_FIFO_WL_STATUS, FAILED_CORE_IDX, s)
+
+/* Core status macro definitions. */
+#define VHA_CORE_STATUS(name) \
+		VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, name)
+
+/* IC status macro definitions. */
+#define VHA_IC_STATUS(name) \
+		VHA_CR_BITMASK(INTERCONNECT_EVENT_HOST_STATUS, name)
+
+/* Confirmation writes error indication. */
+/* There is a fake error bit set in the VHA_CR_WM_RESPONSE_FIFO_WL_STATUS reg
+ * to indicate confirmation writes error detected with software. */
+#define VHA_REG_CONF_ERROR_SHIFT   (63)
+#define VHA_REG_CONF_ERROR_CLRMSK  (0x7fffffffffffffffULL)
+#define VHA_REG_CONF_ERROR_EN      (0x8000000000000000ULL)
+#define VHA_REG_SET_CONF_ERROR(r) \
+	(r |= VHA_REG_CONF_ERROR_EN)
+#define VHA_REG_CLR_CONF_ERROR(r) \
+	(r &= VHA_REG_CONF_ERROR_CLRMSK)
+#define VHA_REG_GET_CONF_ERROR(r) \
+	((r & VHA_REG_CONF_ERROR_EN) >> VHA_REG_CONF_ERROR_SHIFT)
+
+/* Parity error indication. */
+/* As not all regs with PARITY bit have LOGIC_ERROR bit to identify parity
+ * error, there's a fake parity bit set in these regs to indicate parity
+ * errors detected with software. */
+#define VHA_REG_PARITY_ERROR_SHIFT   (62)
+#define VHA_REG_PARITY_ERROR_CLRMSK  (0xBfffffffffffffffULL)
+#define VHA_REG_PARITY_ERROR_EN      (0x4000000000000000ULL)
+#define VHA_REG_SET_PARITY_ERROR(r) \
+	(r |= VHA_REG_PARITY_ERROR_EN)
+#define VHA_REG_CLR_PARITY_ERROR(r) \
+	(r &= VHA_REG_PARITY_ERROR_CLRMSK)
+#define VHA_REG_GET_PARITY_ERROR(r) \
+	((r & VHA_REG_PARITY_ERROR_EN) >> VHA_REG_PARITY_ERROR_SHIFT)
+
+/* Workload id mismatch indication. */
+/* There is a fake error bit set in the VHA_CR_WM_RESPONSE_FIFO_WL_STATUS reg
+ * to indicate workload id mismatch error detected with software. */
+#define VHA_REG_WL_ID_MISMATCH_ERROR_SHIFT   (61)
+#define VHA_REG_WL_ID_MISMATCH_ERROR_CLRMSK  (0xDfffffffffffffffULL)
+#define VHA_REG_WL_ID_MISMATCH_ERROR_EN      (0x2000000000000000ULL)
+#define VHA_REG_SET_WL_ID_MISMATCH_ERROR(r) \
+	(r |= VHA_REG_WL_ID_MISMATCH_ERROR_EN)
+#define VHA_REG_CLR_WL_ID_MISMATCH_ERROR(r) \
+	(r &= VHA_REG_WL_ID_MISMATCH_ERROR_CLRMSK)
+#define VHA_REG_GET_WL_ID_MISMATCH_ERROR(r) \
+	((r & VHA_REG_WL_ID_MISMATCH_ERROR_EN) >> VHA_REG_WL_ID_MISMATCH_ERROR_SHIFT)
+
+/* CRC mismatch indication. */
+/* There is a fake error bit set in the VHA_CR_WM_RESPONSE_FIFO_WL_STATUS reg
+ * to indicate workload id mismatch error detected with software. */
+#define VHA_REG_COMBINED_CRC_ERROR_SHIFT   (60)
+#define VHA_REG_COMBINED_CRC_ERROR_CLRMSK  (0xEFFFFFFFFFFFFFFFULL)
+#define VHA_REG_COMBINED_CRC_ERROR_EN      (0x1000000000000000ULL)
+#define VHA_REG_SET_COMBINED_CRC_ERROR(r) \
+	(r |= VHA_REG_COMBINED_CRC_ERROR_EN)
+#define VHA_REG_CLR_COMBINED_CRC_ERROR(r) \
+	(r &= VHA_REG_COMBINED_CRC_ERROR_CLRMSK)
+#define VHA_REG_GET_COMBINED_CRC_ERROR(r) \
+	((r & VHA_REG_COMBINED_CRC_ERROR_EN) >> VHA_REG_COMBINED_CRC_ERROR_SHIFT)
+
+/* General core error indication. */
+/* There is a fake error bit set in the VHA_CR_WM_EVENT_STATUS reg
+ * to indicate that core error was detected for one of the assigned cores. */
+#define VHA_REG_WM_CORE_ERROR_SHIFT   (24)
+#define VHA_REG_WM_CORE_ERROR_CLRMSK  (0xfffffffffeffffffULL)
+#define VHA_REG_WM_CORE_ERROR_EN      (0x0000000001000000ULL)
+#define VHA_REG_SET_WM_CORE_ERROR(r) \
+	(r |= VHA_REG_WM_CORE_ERROR_EN)
+#define VHA_REG_CLR_WM_CORE_ERROR(r) \
+	(r &= VHA_REG_WM_CORE_ERROR_CLRMSK)
+#define VHA_REG_GET_WM_CORE_ERROR(r) \
+	((r & VHA_REG_WM_CORE_ERROR_EN) >> VHA_REG_WM_CORE_ERROR_SHIFT)
+
+/* General interconnect error indication. */
+/* There is a fake error bit set in the VHA_CR_WM_EVENT_STATUS reg
+ * to indicate that interconnect error was detected for one of the assigned ones. */
+#define VHA_REG_WM_IC_ERROR_SHIFT   (25)
+#define VHA_REG_WM_IC_ERROR_CLRMSK  (0xfffffffffdffffffULL)
+#define VHA_REG_WM_IC_ERROR_EN      (0x0000000002000000ULL)
+#define VHA_REG_SET_WM_IC_ERROR(r) \
+	(r |= VHA_REG_WM_IC_ERROR_EN)
+#define VHA_REG_CLR_WM_IC_ERROR(r) \
+	(r &= VHA_REG_WM_IC_ERROR_CLRMSK)
+#define VHA_REG_GET_WM_IC_ERROR(r) \
+	((r & VHA_REG_WM_IC_ERROR_EN) >> VHA_REG_WM_IC_ERROR_SHIFT)
+
+
+#endif /* VHA_REGS_H */

+ 264 - 0
driver/vha/multi/vha_sc_dbg.c

@@ -0,0 +1,264 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <uapi/vha.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_io.h"
+
+#define VHA_MAX_NUM_SEGMENTS 10
+
+static bool generate_crcs_enable = true;
+module_param(generate_crcs_enable, bool, 0444);
+MODULE_PARM_DESC(generate_crcs_enable,
+	"Enable generating safety CRCs");
+
+struct vha_sc_dbgfs_ctx {
+	struct dentry    *sc_debugfs_dir;
+
+	struct dentry    *crcs_dir;
+	struct dentry    *crcs_sub_dirs[VHA_MAX_NUM_SEGMENTS];
+	uint32_t 		 num_cores_used[VHA_MAX_NUM_SEGMENTS];
+	uint32_t         latest_crcs[VHA_MAX_NUM_SEGMENTS][VHA_NUM_CORES];
+	uint8_t          segment_crc_idx_to_use;
+
+	uint8_t          num_segments;
+};
+
+static ssize_t vha_bin_crcs_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct vha_dev *vha = file->private_data;
+	uint32_t bin_crcs[VHA_MAX_NUM_SEGMENTS * VHA_MAX_CORES] = { 0 };
+	struct vha_sc_dbgfs_ctx *ctx = (struct vha_sc_dbgfs_ctx *)vha->sc_dbgfs_ctx;
+	size_t bytes = 0;
+	uint32_t offset = 0;
+	int i, j;
+
+	if (*ppos)
+		return 0;
+
+	for (i = 0; i < ctx->num_segments; i++) {
+		for (j = 0; j < ctx->num_cores_used[i]; j++) {
+			bin_crcs[offset] = ctx->latest_crcs[i][j];
+			offset++;
+		}
+	}
+
+	if (copy_to_user(buf, bin_crcs, offset * sizeof(bin_crcs[0]))) {
+		dev_err(vha->dev, "%s: bin_crcs read: copy to user failed\n",
+				__func__);
+		return -EFAULT;
+	}
+
+	if (count < offset)
+		return -EINVAL;	
+
+	bytes = offset * sizeof(uint32_t);
+	*ppos = bytes;
+
+	return bytes;
+}
+
+static const struct file_operations vha_crcs_bin_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = vha_bin_crcs_read,
+};
+
+
+static ssize_t vha_crcs_reset_write(struct file *file, const char __user *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct vha_dev *vha = file->private_data;
+	struct vha_sc_dbgfs_ctx *ctx = (struct vha_sc_dbgfs_ctx *)vha->sc_dbgfs_ctx;
+
+	if (ctx->crcs_dir) {
+		int i = 0;
+
+		for (i = 0; i < VHA_MAX_NUM_SEGMENTS; i++)
+			if (ctx->crcs_sub_dirs[i]) {
+				debugfs_remove_recursive(ctx->crcs_sub_dirs[i]);
+				ctx->crcs_sub_dirs[i] = NULL;
+			}
+
+		ctx->segment_crc_idx_to_use = 0;
+
+		memset(ctx->latest_crcs, 0, sizeof(ctx->latest_crcs));
+		memset(ctx->num_cores_used, 0, sizeof(ctx->num_cores_used));
+	}
+
+	return count;
+}
+
+static const struct file_operations vha_crcs_reset_fops = {
+	.write = vha_crcs_reset_write,
+	.open = simple_open,
+};
+
+void vha_update_crcs(struct vha_dev *vha, uint32_t crcs[VHA_NUM_CORES], int n) {
+	struct vha_sc_dbgfs_ctx *ctx = (struct vha_sc_dbgfs_ctx *)vha->sc_dbgfs_ctx;
+
+	if (ctx->crcs_dir && ctx->num_segments) {
+		uint8_t i;
+		uint8_t crc_idx = ctx->segment_crc_idx_to_use;
+		char core_txt[7] = "core_x";
+
+		if (crc_idx >= VHA_MAX_NUM_SEGMENTS) {
+			dev_warn_once(vha->dev, "%s: unable to update crcs, too many segments\n", __func__);			
+			return;
+		}
+
+		if (ctx->crcs_sub_dirs[crc_idx] == NULL) {
+			char dir_txt[10] = "segment_x";			
+			snprintf(dir_txt, sizeof(dir_txt), "segment_%d", crc_idx);
+			ctx->crcs_sub_dirs[crc_idx] = debugfs_create_dir(dir_txt, ctx->crcs_dir);
+			for (i = 0; i < n; i++) {
+				snprintf(core_txt, sizeof(core_txt), "core_%d", i);
+				debugfs_create_x32(core_txt, S_IRUGO, ctx->crcs_sub_dirs[crc_idx],
+						&ctx->latest_crcs[crc_idx][i]);
+			}			
+		}
+
+		ctx->num_cores_used[crc_idx] = n;
+
+		for (i = 0; i < n; i++)
+			ctx->latest_crcs[crc_idx][i] = crcs[i];
+
+		ctx->segment_crc_idx_to_use++;
+
+		if (ctx->segment_crc_idx_to_use >= ctx->num_segments)
+			ctx->segment_crc_idx_to_use = 0;
+	}
+}
+
+void vha_sc_dbg_init(struct vha_dev *vha, struct dentry *debugfs_dir)
+{
+	struct vha_sc_dbgfs_ctx *ctx = devm_kzalloc(vha->dev,
+			sizeof(struct vha_sc_dbgfs_ctx), GFP_KERNEL);
+	if (!ctx) {
+		dev_err(vha->dev,
+				"%s: Out of memory when creating debugfs context!\n",
+				__func__);
+		return;
+	}
+
+	/* Create userspace node */
+	if (!debugfs_dir) {
+		dev_warn(vha->dev,
+				"%s: Probably debugfs not enabled in this kernel!\n",
+				__func__);
+		return;
+	}
+
+#define VHA_DBGFS_CREATE_FILE_IN_DIR(_perm_, _name_, _fops_, dir) \
+	{ \
+			if (!debugfs_create_file(_name_, \
+				_perm_, ctx->dir, vha, \
+				&vha_##_fops_##_fops)) { \
+				dev_warn(vha->dev, \
+					"%s: failed to create %s dbg file!\n", \
+					__func__, _name_); \
+			} \
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)
+#define CTX_DBGFS_CREATE_RW(_type_, _name_, _ctx_dev_member_, dir) \
+	{ \
+			struct dentry *dentry; \
+			debugfs_create_##_type_(_name_, \
+				S_IWUSR|S_IRUGO, ctx->dir, \
+				&ctx->_ctx_dev_member_); \
+			dentry = debugfs_lookup(_name_, ctx->dir); \
+			if (!dentry) { \
+				dev_warn(vha->dev, \
+					"%s: failed to create %s dbg file!\n", \
+					__func__, _name_); \
+			} else { \
+				dput(dentry); \
+			} \
+	}
+#else
+#define CTX_DBGFS_CREATE_RW(_type_, _name_, _ctx_dev_member_, dir) \
+	{ \
+			if (!debugfs_create_##_type_(_name_, \
+				S_IWUSR|S_IRUGO, ctx->dir, \
+				&ctx->_ctx_dev_member_)) { \
+				dev_warn(vha->dev, \
+					"%s: failed to create %s dbg file!\n", \
+					__func__, _name_); \
+			} \
+	}
+#endif
+
+	ctx->sc_debugfs_dir = debugfs_create_dir("sf_gen", debugfs_dir);
+	CTX_DBGFS_CREATE_RW(u8, "num_segments", num_segments, sc_debugfs_dir);
+
+	if (generate_crcs_enable) {
+		ctx->crcs_dir = debugfs_create_dir("CRCs", ctx->sc_debugfs_dir);
+		if (ctx->crcs_dir) {
+			VHA_DBGFS_CREATE_FILE_IN_DIR(S_IWUSR, "crcs_reset", crcs_reset, crcs_dir);
+			VHA_DBGFS_CREATE_FILE_IN_DIR(S_IRUGO, "crcs_bin", crcs_bin, crcs_dir);
+		}
+	}
+
+#undef VHA_DBGFS_CREATE_FILE_IN_DIR	
+#undef CTX_DBGFS_CREATE_RW
+
+	vha->sc_dbgfs_ctx = (void *)ctx;
+}
+
+void vha_sc_dbg_deinit(struct vha_dev *vha)
+{
+	struct vha_sc_dbgfs_ctx *ctx =
+			(struct vha_sc_dbgfs_ctx *)vha->sc_dbgfs_ctx;
+	debugfs_remove_recursive(ctx->sc_debugfs_dir);
+}
+
+

+ 1896 - 0
driver/vha/multi/vha_wm.c

@@ -0,0 +1,1896 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include <uapi/vha.h>
+#include <uapi/vha_errors.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+
+static uint32_t cnn_pdump_poll_count = 10000000;
+module_param(cnn_pdump_poll_count, uint, 0444);
+MODULE_PARM_DESC(cnn_pdump_poll_count,
+		"PDUMP: Number of times to poll for CNN status");
+
+static uint32_t wm_pdump_poll_count = 100;
+module_param(wm_pdump_poll_count, uint, 0444);
+MODULE_PARM_DESC(wm_pdump_poll_count,
+		"PDUMP: Number of times to poll for WM status");
+
+static bool cnn_preloads_disable;
+module_param(cnn_preloads_disable, bool, 0444);
+MODULE_PARM_DESC(cnn_preloads_disable,
+		"Disables CNN preloads");
+
+static uint32_t cnn_hl_wdt_cycles = VHA_CORE_WDT_CYCLES;
+module_param(cnn_hl_wdt_cycles, uint, 0444);
+MODULE_PARM_DESC(cnn_hl_wdt_cycles,
+		"High level core watchdog cycles");
+
+static uint32_t cnn_hl_wdt_mode = VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS;
+module_param(cnn_hl_wdt_mode, uint, 0444);
+MODULE_PARM_DESC(cnn_hl_wdt_mode,
+		"High level core watchdog mode: 1-pass; 2-layer group. See TRM");
+
+static uint32_t cnn_mem_wdt_cycles = VHA_CORE_MEM_WDT_CYCLES;
+module_param(cnn_mem_wdt_cycles, uint, 0444);
+MODULE_PARM_DESC(cnn_mem_wdt_cycles,
+		"Core memory watchdog cycles");
+
+static uint32_t cnn_mem_wdt_mode = VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS;
+module_param(cnn_mem_wdt_mode, uint, 0444);
+MODULE_PARM_DESC(cnn_mem_wdt_mode,
+		"Core memory watchdog mode: 0-disabled; "
+		"1-CMD Parser starts a pass or CMD parser is kicked; "
+		"2-CMD parser is kicked. See TRM");
+
+static bool use_estimated_cycles_for_wm_wdt = false;
+module_param(use_estimated_cycles_for_wm_wdt, bool, 0444);
+MODULE_PARM_DESC(use_estimated_cycles_for_wm_wdt,
+		"WM workload watchdog cycles source: "
+		"false-the value from the wm_wl_wdt_cycles parameter will be used; "
+		"true-the value from the MBS SEGMENT_ESTIMATED_CYCLES filed will be used");
+
+static uint32_t wm_wl_wdt_estimated_cycles_margin = 0;
+module_param(wm_wl_wdt_estimated_cycles_margin, uint, 0444);
+MODULE_PARM_DESC(wm_wl_wdt_estimated_cycles_margin,
+		"WM workload watchdog cycles margin added to the SEGMENT_ESTIMATED_CYCLES"
+		" value, used only if use_estimated_cycles_for_wm_wdt==true");
+
+static uint32_t wm_wl_wdt_cycles = VHA_WM_WDT_CYCLES;
+module_param(wm_wl_wdt_cycles, uint, 0444);
+MODULE_PARM_DESC(wm_wl_wdt_cycles,
+		"WM workload watchdog cycles");
+
+static uint32_t wm_wl_wdt_mode = VHA_CR_WM_WL_WDT_CTRL_WL_WDT_CTRL_KICK_WL;
+module_param(wm_wl_wdt_mode, uint, 0444);
+MODULE_PARM_DESC(wm_wl_wdt_mode,
+		"WM workload watchdog mode: 0-disabled; 1-enabled. See TRM");
+
+static uint32_t socm_xor_bits[2] = { 0, 0 };
+module_param_array(socm_xor_bits, uint, NULL, 0444);
+MODULE_PARM_DESC(socm_xor_bits,
+	"SOCM Hashing: This parameter reflects SOCM_B7_XOR_BITS & SOCM_B8_XOR_BITS"
+	"hw registers. If not set the default values are used. See TRM.");
+
+/*
+ * Internal memory layout:
+ * .onchipmem_phys_start
+ * LOCM - <onchipmem_size>
+ * 4k GUARD PAGE
+ * WM0 SOCM - <shared_onchipmem_size>
+ * 4k GUARD PAGE
+ * WM1 SOCM
+ * 4k GUARD PAGE
+ * ...
+ * WMn SOCM
+ * 4k GUARD PAGE
+ * WM0 LL SYNC buffer- 4k PAGE
+ * 4k GUARD PAGE
+ * WM1 LL SYNC buffer- 4k PAGE
+ * 4k GUARD PAGE
+ * ...
+ * WMn LL SYNC buffer- 4k PAGE
+ * 4k GUARD PAGE
+ */
+#define LLSYNC_SIZE 0x1000
+
+struct vha_config_regs {
+	uint64_t core_assignment;
+	uint64_t cnn_control[VHA_MAX_CORES];
+	uint64_t cmd_base_addr[VHA_MAX_CORES];
+	uint64_t cnn_alt_addr[VHA_CORE_MAX_ALT_ADDRS];
+	uint64_t locm_base_addr;
+	uint64_t socm_circ_buff_size;
+	uint64_t socm_base_addr;
+	uint64_t socm_buf_assignment;
+	uint64_t socm_b7_xor_bits;
+	uint64_t socm_b8_xor_bits;
+	uint64_t low_level_sync_base_addr;
+	uint64_t cnn_alt_addr_used;
+	uint64_t cnn_vcore_mapping;
+};
+
+/* Note:
+ * The SOCM_BUF_<X>_WM_MAPPING and the CORE_<X>_WM_MAPPING registers muse be configured to be the same
+ * thus we use the core_mask for a given WM. */
+static uint64_t wm_assign_socm(struct vha_dev *vha, uint64_t socm_buf_addr,
+		uint8_t wm_id, uint8_t core_mask, uint32_t circ_buf_offs, struct vha_config_regs* regs)
+{
+	uint64_t socm_buf_assignment = IOREAD64_CR_REGIO(SOCM_BUF_ASSIGNMENT);
+	uint32_t assignment_field_shift =
+				VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_SHIFT -
+					VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_SHIFT;
+	uint64_t assignment_field_mask =
+					~VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_CLRMSK;
+	uint64_t base_addr = socm_buf_addr;
+	uint32_t socm_chunk_size = vha->hw_props.socm_core_size_bytes *
+				VHA_CORE_MASK_TO_NUM(vha_wm_get_cores(vha, wm_id));
+
+	/* Use different address for each WM to make debugging easier */
+	base_addr += wm_id * (vha->hw_props.socm_size_bytes + IMG_MEM_VA_GUARD_GAP);
+	/* Virtual base address must be 256 byte aligned */
+	base_addr = ALIGN(base_addr, 256);
+	/* Chunk size used to calculate the offset must be 128 byte aligned */
+	socm_chunk_size = ALIGN(socm_chunk_size, 128);
+
+	/* circ_buf_offs = 0 means that the circular buffer is disabled */
+	if (circ_buf_offs && socm_chunk_size && circ_buf_offs <= socm_chunk_size) {
+		regs->socm_circ_buff_size = socm_chunk_size - circ_buf_offs;
+	} else {
+		regs->socm_circ_buff_size = 0;
+	}
+
+	regs->socm_base_addr = base_addr;
+	dev_dbg(vha->dev, "%s: set SOCM WM%u address -> %#llx\n",
+			__func__,  wm_id, base_addr);
+
+	while (core_mask != 0) {
+		uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
+
+		core_mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
+
+		socm_buf_assignment &=
+			~(assignment_field_mask << (curr_core_id * assignment_field_shift));
+		socm_buf_assignment |= wm_id << (curr_core_id * assignment_field_shift);
+	}
+
+	regs->socm_buf_assignment = socm_buf_assignment;
+	dev_dbg(vha->dev, "%s: assigned SOCM bufs for WM%u: 0x%llx\n",
+					__func__, wm_id, socm_buf_assignment);
+
+	if (socm_xor_bits[0]) {
+		regs->socm_b7_xor_bits = socm_xor_bits[0];
+	}
+
+	if (socm_xor_bits[1]) {
+		regs->socm_b8_xor_bits = socm_xor_bits[1];
+	}
+
+	return base_addr - socm_buf_addr;
+}
+
+static bool vha_wm_setup_config_regs_multi(struct vha_cmd *cmd, struct vha_config_regs* regs)
+{
+	int i;
+	bool ret = false;
+	const struct vha_user_cnn_submit_multi_cmd *user_submit_cmd =
+		(struct vha_user_cnn_submit_multi_cmd *)&cmd->user_cmd;
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	uint32_t val32 = 0;
+	struct vha_buffer *buf = NULL;
+	uint64_t *reg = NULL;
+
+	uint32_t core_mask;
+	uint64_t vcore_map = 0;
+	uint32_t vcore_field_shift =
+		VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE1_SHIFT -
+							VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE0_SHIFT;
+
+	if (cmd->size != sizeof(*user_submit_cmd)) {
+		dev_err(vha->dev, "%s: command buffer wrong size: %zu/%zu",
+			__func__, cmd->size, sizeof(*user_submit_cmd));
+		goto out_error;
+	}
+
+	if (!vha_dev_check_hw_capab(vha, user_submit_cmd->expected_ip_capab))
+		goto out_error;
+
+	/* At least num cores CMDs and IN */
+	if (user_submit_cmd->msg.num_inbufs < (user_submit_cmd->num_cores + 1) ||
+		/* At least OUT */
+		(user_submit_cmd->msg.num_inbufs - user_submit_cmd->num_cores
+				>= user_submit_cmd->msg.num_bufs) ||
+		/* And maybe TMP and others */
+		user_submit_cmd->msg.num_bufs > VHA_CORE_MAX_ALT_ADDRS) {
+		dev_err(vha->dev, "%s: wrong number of bufs: %u,%u\n",
+				__func__,
+				user_submit_cmd->msg.num_inbufs,
+				user_submit_cmd->msg.num_bufs);
+		goto out_error;
+	}
+
+	/* Number of cores. */
+	if ((user_submit_cmd->num_cores < 1) ||
+		(user_submit_cmd->num_cores > vha->hw_props.num_cnn_core_devs)) {
+		dev_err(vha->dev, "%s: wrong number of cores: %u\n",
+			__func__,
+			user_submit_cmd->num_cores);
+		goto out_error;
+	}
+
+	/* Number of cmd streams must match number of cores. */
+	for (i = 0; i < user_submit_cmd->num_cores; i++)
+		if (user_submit_cmd->cmdbuf[i] == 0)
+			break;
+
+	if ((i < user_submit_cmd->num_cores) ||
+		((user_submit_cmd->num_cores < VHA_MAX_CORES) &&
+		(user_submit_cmd->cmdbuf[i] != 0))) {
+		for (; i < VHA_MAX_CORES; i++)
+			if (user_submit_cmd->cmdbuf[i] == 0)
+				break;
+
+		dev_err(vha->dev, "%s: wrong number of cmd streams: %u,%u\n",
+			__func__,
+			i, user_submit_cmd->num_cores);
+		goto out_error;
+	}
+
+	/* Make WM<->cores binding. */
+	vha_wm_assign_cores(vha, sched_info->wm_id, sched_info->core_mask, &regs->core_assignment);
+	dev_dbg(vha->dev, "%s: assigned cores for WM%u: 0x%02x\n",
+		__func__, sched_info->wm_id, vha_wm_get_cores(vha, sched_info->wm_id));
+
+	/* write buffer address to each register,
+	 * and pdump LDB each of the the input buffers */
+	img_pdump_printf("-- Load inputs\n");
+
+	/* First program cmd stream addrs. */
+	core_mask = sched_info->core_mask;
+	if (VHA_CORE_MASK_TO_NUM(core_mask) != user_submit_cmd->num_cores) {
+		dev_err(vha->dev, "%s: invalid core_mask!\n", __func__);
+		goto out_error;
+	}
+
+	for (i = 0; i < user_submit_cmd->num_cores; i++) {
+		uint64_t curr_core;
+		uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
+
+		buf = vha_find_bufid(session, user_submit_cmd->cmdbuf[i]);
+		if (buf == NULL) {
+			dev_err(vha->dev, "%s: invalid buffer id:%d\n",
+				__func__, user_submit_cmd->cmdbuf[i]);
+			goto out_error;
+		}
+		if (buf->size == 0) {
+			dev_err(vha->dev, "%s: invalid cmdstream size\n", __func__);
+			goto out_error;
+		}
+
+		/* Choose next core from the WM set. */
+		curr_core = VHA_CORE_ID_TO_MASK(curr_core_id);
+		core_mask &= ~((uint32_t)curr_core);
+
+		val32 = min(2048U, (uint32_t)buf->size)/32 - 1;
+		val32 = VHA_CR_SETBITS(OS0_CNN_CONTROL, CMD_SIZE_MIN1, val32) |
+					VHA_CR_SETBITS(OS0_CNN_CONTROL, CTXT_PASID,
+						session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id) |
+					VHA_CR_SETBITS(OS0_CNN_CONTROL, CTXT_PASID_IO,
+						session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].hw_id);
+		regs->cnn_control[curr_core_id] = val32;
+
+		/* Pdump the cmd stream buffers. */
+		vha_pdump_ldb_buf(session, PDUMP_PRM,
+				buf, 0, buf->size,
+				buf->status == VHA_BUF_FILLED_BY_SW);
+
+		/* Write to core's cmd register.
+		 * In no-MMU mode, write phys address of a contig buffer.
+		 * In MMU mode, write virt address of buffer. */
+		SET_BUFADDR(session, buf, 0, &regs->cmd_base_addr[curr_core_id]);
+
+		/* Map this core. */
+		vcore_map |= curr_core_id << (i * vcore_field_shift);
+
+		if (vha_buf_needs_flush(session, buf->id))
+			img_mem_sync_cpu_to_device(session->mem_ctx, buf->id);
+	}
+
+	/* Command stream buffers are already handled */
+	for (i = 0; i < (user_submit_cmd->msg.num_bufs - 1); i++) {
+		uint32_t offset;
+		uint32_t size;
+
+		buf = vha_find_bufid(session, user_submit_cmd->bufs[i]);
+		if (buf == NULL) {
+			dev_err(vha->dev, "%s: invalid buffer id:%d\n",
+					__func__, user_submit_cmd->bufs[i]);
+			goto out_error;
+		}
+
+		/* offset can be specified for all
+		 * buffers except cmdstream buf */
+		offset = user_submit_cmd->bufoffsets[i];
+		size = user_submit_cmd->bufsizes[i];
+
+		if (size + offset > buf->size) {
+			dev_err(vha->dev, "%s: invalid size+offset: %x+%x > %zx\n",
+					__func__, size, offset, buf->size);
+			goto out_error;
+		}
+
+		/* Calculate reg address */
+		reg = &regs->cnn_alt_addr[user_submit_cmd->regidx[i]];
+		/* Record what alt address is in use */
+		regs->cnn_alt_addr_used |= 1 << user_submit_cmd->regidx[i];
+		regs->cnn_alt_addr_used |= buf->req_type <<
+			(VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT +
+			user_submit_cmd->regidx[i]);
+
+		if (user_submit_cmd->onchipram_bufs[VHA_LOCAL_OCM] == buf->id) {
+			/* Check against overflow */
+			if (buf->devvirt + vha->hw_props.locm_size_bytes +
+					IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
+				dev_err(vha->dev, "%s: LOCM overflow!\n", __func__);
+				goto out_error;
+			}
+
+			/* Setup Local OCM */
+			regs->locm_base_addr = buf->devvirt;
+			dev_dbg(vha->dev, "%s: set LOCM address -> %#llx\n",
+					__func__, buf->devvirt);
+		}
+
+		if (user_submit_cmd->onchipram_bufs[VHA_SHARED_OCM] == buf->id) {
+			/* Check against overflow */
+			if (buf->devvirt + vha->hw_props.socm_size_bytes +
+					IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
+				dev_err(vha->dev, "%s: SOCM overflow!\n", __func__);
+				goto out_error;
+			}
+			/* Setup Shared OCM */
+			offset = wm_assign_socm(vha, buf->devvirt,
+					sched_info->wm_id, sched_info->core_mask,
+					user_submit_cmd->shared_circ_buf_offs, regs);
+			/* Check against overflow */
+			if (regs->socm_base_addr + vha->hw_props.socm_size_bytes +
+					IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
+				dev_err(vha->dev, "%s: SOCM overflow!\n", __func__);
+				goto out_error;
+			}
+		}
+
+		/* pdump the input buffers (not filled by the hw),
+		 * try to cache buffers filled by SW,
+		 * to avoid unnecessary LDBs */
+		if (i < user_submit_cmd->msg.num_inbufs - user_submit_cmd->num_cores &&
+				!(buf->status == VHA_BUF_FILLED_BY_HW))
+			vha_pdump_ldb_buf(session, PDUMP_PRM,
+					buf, offset, size,
+					buf->status == VHA_BUF_FILLED_BY_SW);
+
+		/* Write to the index register.
+		 * In no-MMU mode, write phys address of a contig buffer.
+		 * In MMU mode, write virt address of buffer. */
+		SET_BUFADDR(session, buf, offset, reg);
+
+		if (vha_buf_needs_flush(session, buf->id))
+			img_mem_sync_cpu_to_device(session->mem_ctx, buf->id);
+	}
+
+	if (vha->ocm_paddr != ~0) {
+		/* Low level sync buffer address
+		 * It has fixed size = 512 bytes but we operate on 4k pages
+		 * It is placed after SOCM
+		 * including gap page between LOCM&SOCM and after SOCM.
+		 */
+		uint64_t ll_sync_addr = vha->ocm_paddr +
+				vha->hw_props.locm_size_bytes + IMG_MEM_VA_GUARD_GAP +
+				vha->hw_props.num_cnn_core_devs * (vha->hw_props.socm_size_bytes + IMG_MEM_VA_GUARD_GAP);
+		/* Add offset based on WM id */
+		ll_sync_addr += sched_info->wm_id * (LLSYNC_SIZE +
+				IMG_MEM_VA_GUARD_GAP);
+
+		/* Check against overflow */
+		if (ll_sync_addr + LLSYNC_SIZE +
+				IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
+			dev_err(vha->dev, "%s: LLSYNC overflow!\n", __func__);
+			goto out_error;
+		}
+
+		/* Setup low level sync buffer address */
+		regs->low_level_sync_base_addr = ll_sync_addr;
+		dev_dbg(vha->dev, "%s: set LLSYNC address -> %#llx\n",
+				__func__, ll_sync_addr);
+	}
+
+	ret = true;
+	/* Program core mappings. */
+	regs->cnn_vcore_mapping = vcore_map;
+
+out_error:
+	return ret;
+}
+
+static bool vha_wm_write_config_regs(struct vha_cmd *cmd, struct vha_config_regs* regs)
+{
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	uint8_t wm_id = sched_info->wm_id;
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	uint32_t reg_size = VHA_CR_OS0_CNN_ALT_ADDRESS1 - VHA_CR_OS0_CNN_ALT_ADDRESS0;
+	uint32_t reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
+	uint32_t reg_idx_offset = 0;
+	uint32_t core_id = 0;
+	int i;
+
+	img_pdump_printf("-- Assign cores 0x%02x to WM%u\n", sched_info->core_mask, wm_id);
+	IOWRITE64_CR_PDUMP(regs->core_assignment, CORE_ASSIGNMENT);
+
+	for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
+		if (sched_info->core_mask & (1 << core_id)) {
+			uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
+
+			img_pdump_printf("-- Select core: %llu\n", curr_core);
+			IOWRITE64_CR_PDUMP(curr_core, CORE_CTRL_INDIRECT);
+
+			img_pdump_printf("-- Setup command stream for core %u\n", core_id);
+			IOWRITE64_CR_PDUMP(regs->cnn_control[core_id], OS0_CNN_CONTROL);
+			IOWRITE64_CR_PDUMP(regs->cmd_base_addr[core_id], OS0_CNN_CMD_BASE_ADDRESS);
+		}
+	}
+
+	/* Operate only on a core assigned to this WM. */
+	img_pdump_printf("-- Select only cores assigned to WM: %u\n",
+							sched_info->core_mask);
+	IOWRITE64_CR_PDUMP(sched_info->core_mask, CORE_CTRL_INDIRECT);
+	/* Make WM<->core binding. */
+
+	if (regs->socm_base_addr != ~0) {
+		img_pdump_printf("-- Set SOCM circular buffer size for WM%d\n", wm_id);
+		IOWRITE64_CR_PDUMP(regs->socm_circ_buff_size, SOCM_CIRCULAR_BUFFER_SIZE);
+
+		img_pdump_printf("-- Set SOCM WM%u address\n", wm_id);
+		IOWRITE64_CR_PDUMP(regs->socm_base_addr, SOCM_BASE_ADDR);
+
+		img_pdump_printf("-- Assign SOCM bufs 0x%02x to WM%u\n", sched_info->core_mask, wm_id);
+		IOWRITE64_CR_PDUMP(regs->socm_buf_assignment, SOCM_BUF_ASSIGNMENT);
+
+		if (regs->socm_b7_xor_bits)
+			IOWRITE64_CR_PDUMP(regs->socm_b7_xor_bits, SOCM_B7_XOR_BITS);
+		if (regs->socm_b8_xor_bits)
+			IOWRITE64_CR_PDUMP(regs->socm_b8_xor_bits, SOCM_B8_XOR_BITS);
+	}
+
+	if (regs->locm_base_addr != ~0) {
+		img_pdump_printf("-- Set LOCM address\n");
+		IOWRITE64_CR_PDUMP(regs->locm_base_addr, OS0_LOCM_BASE_ADDR);
+	}
+
+	for (i = 0; i < VHA_CORE_MAX_ALT_ADDRS; i++) {
+		if (i >= 8) {
+			reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS8;
+			reg_idx_offset = 8;
+		}
+
+		if (regs->cnn_alt_addr_used & (1 << i)) {
+			img_pdump_printf("-- Set ALT_%d address\n", i);
+			IOWRITE64_PDUMP(regs->cnn_alt_addr[i], reg_base + (i - reg_idx_offset) * reg_size);
+		}
+	}
+
+	if (regs->low_level_sync_base_addr != ~0) {
+		/* Setup low level sync buffer address */
+		img_pdump_printf("-- Set LLSYNC address\n");
+		IOWRITE64_CR_PDUMP(regs->low_level_sync_base_addr, LOW_LEVEL_SYNC_BASE_ADDR);
+	}
+
+	if (!cnn_preloads_disable) {
+		/* Inform the hw what alt addresses are in use,
+		 * so the command decoder can prefetch */
+		img_pdump_printf("-- Setup CNN prefetch register\n");
+		IOWRITE64_CR_PDUMP(regs->cnn_alt_addr_used, OS0_CNN_ALT_ADDRESS_USED);
+	}
+
+	/* Program core mapping. */
+	img_pdump_printf("-- Program virtual core mappings\n");
+	IOWRITE64_CR_PDUMP(regs->cnn_vcore_mapping,	OS0_CNN_VCORE_MAPPING);
+
+	return true;
+}
+
+#ifdef VHA_SCF
+#ifdef VHA_EVENT_INJECT
+#define CHECK_TOP_REG(_val_, _reg_)	do {		\
+	uint64_t val64 = IOREAD64_CR_REGIO(_reg_);	\
+	if((vha->injection.conf_err & CONF_ERR_TOP) && __EVENT_INJECT())	\
+		val64 = ~val64;							\
+	if (val64 != _val_) {						\
+		cmd->conf_top_error = true;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, top register: 0x%x \n 	\
+		 expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64);		\
+		goto out_error;							\
+	}} while(0)
+
+#define CHECK_CR_CORE_REG(_val_, _reg_, _core_id_)	do {	\
+	uint64_t val64 = IOREAD64_CR_REGIO(_reg_);				\
+	if((vha->injection.conf_err & CONF_ERR_BOTTOM) && __EVENT_INJECT()) \
+		val64 = ~val64;										\
+	if (val64 != _val_) {									\
+		cmd->conf_core_error |= 1 << _core_id_;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n  \
+		 expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64);		\
+	}} while(0)
+
+#define CHECK_CORE_REG(_val_, _reg_, _core_id_)	do { 		\
+	uint64_t val64 = IOREAD64_REGIO(_reg_); 				\
+	if((vha->injection.conf_err & CONF_ERR_BOTTOM) && __EVENT_INJECT()) \
+		val64 = ~val64;										\
+	if (val64 != _val_) {									\
+		cmd->conf_core_error |= 1 << _core_id_;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n  \
+		 expected: 0x%016llx actual: 0x%016llx\n", _reg_, (uint64_t)_val_, val64);		\
+	}} while(0)
+#else
+#define CHECK_TOP_REG(_val_, _reg_)	do {		\
+	uint64_t val64 = IOREAD64_CR_REGIO(_reg_);	\
+	if (val64 != _val_) {						\
+		cmd->conf_top_error = true;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, top register: 0x%x \n 	\
+		 expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64);		\
+		goto out_error;							\
+	}} while(0)
+
+#define CHECK_CR_CORE_REG(_val_, _reg_, _core_id_)	do {	\
+	uint64_t val64 = IOREAD64_CR_REGIO(_reg_);				\
+	if (val64 != _val_) {									\
+		cmd->conf_core_error |= 1 << _core_id_;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n  \
+		 expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64);		\
+	}} while(0)
+
+#define CHECK_CORE_REG(_val_, _reg_, _core_id_)	do { 		\
+	uint64_t val64 = IOREAD64_REGIO(_reg_); 				\
+	if (val64 != _val_) {									\
+		cmd->conf_core_error |= 1 << _core_id_;				\
+		dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n  \
+		 expected: 0x%016llx actual: 0x%016llx\n", _reg_, (uint64_t)_val_, val64);		\
+	}} while(0)
+#endif
+
+static bool vha_wm_confirm_config_regs(struct vha_cmd *cmd, struct vha_config_regs* regs)
+{
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	uint32_t reg_size = VHA_CR_OS0_CNN_ALT_ADDRESS1 - VHA_CR_OS0_CNN_ALT_ADDRESS0;
+	uint32_t reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
+	uint32_t reg_idx_offset = 0;
+	uint32_t core_id = 0;
+	int i;
+
+	CHECK_TOP_REG(regs->core_assignment, CORE_ASSIGNMENT);
+
+	for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
+		reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
+		reg_idx_offset = 0;
+
+		if (sched_info->core_mask & (1 << core_id)) {
+			uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
+
+			IOWRITE64_CR_REGIO(curr_core, CORE_CTRL_INDIRECT);
+
+			CHECK_CR_CORE_REG(regs->cnn_control[core_id], OS0_CNN_CONTROL, core_id);
+			CHECK_CR_CORE_REG(regs->cmd_base_addr[core_id], OS0_CNN_CMD_BASE_ADDRESS, core_id);
+
+			if (regs->socm_base_addr != ~0) {
+				CHECK_CR_CORE_REG(regs->socm_circ_buff_size, SOCM_CIRCULAR_BUFFER_SIZE, core_id);
+				CHECK_CR_CORE_REG(regs->socm_base_addr, SOCM_BASE_ADDR, core_id);
+				CHECK_CR_CORE_REG(regs->socm_buf_assignment, SOCM_BUF_ASSIGNMENT, core_id);
+
+				if (regs->socm_b7_xor_bits)
+					CHECK_CR_CORE_REG(regs->socm_b7_xor_bits, SOCM_B7_XOR_BITS, core_id);
+				if (regs->socm_b8_xor_bits)
+					CHECK_CR_CORE_REG(regs->socm_b8_xor_bits, SOCM_B8_XOR_BITS, core_id);
+			}
+
+			if (regs->locm_base_addr != ~0) {
+				CHECK_CR_CORE_REG(regs->locm_base_addr, OS0_LOCM_BASE_ADDR, core_id);
+			}
+
+			for (i = 0; i < VHA_CORE_MAX_ALT_ADDRS; i++) {
+				if (i >= 8) {
+					reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS8;
+					reg_idx_offset = 8;
+				}
+
+				if (regs->cnn_alt_addr_used & (1 << i))
+					CHECK_CORE_REG(regs->cnn_alt_addr[i], reg_base + (i - reg_idx_offset) * reg_size, core_id);
+			}
+
+			if (regs->low_level_sync_base_addr != ~0) {
+				CHECK_CR_CORE_REG(regs->low_level_sync_base_addr, LOW_LEVEL_SYNC_BASE_ADDR, core_id);
+			}
+
+			if (!cnn_preloads_disable)
+				CHECK_CR_CORE_REG(regs->cnn_alt_addr_used, OS0_CNN_ALT_ADDRESS_USED, core_id);
+
+			CHECK_CR_CORE_REG(regs->cnn_vcore_mapping, OS0_CNN_VCORE_MAPPING, core_id);
+		}
+	}
+out_error:
+	return cmd->conf_top_error;
+}
+
+static bool vha_wm_confirm_mmu_regs(struct vha_cmd *cmd)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	uint32_t ctx_id = 0;
+
+	if (vha->mmu_mode == VHA_MMU_DISABLED) {
+		CHECK_TOP_REG(VHA_CR_OS(MMU_CTRL_BYPASS_EN), OS0_MMU_CTRL);
+		return 0;
+	}
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
+		IOWRITE64_CR_REGIO(session->mmu_ctxs[ctx_id].hw_id, OS0_MMU_CBASE_MAPPING_CONTEXT);
+		CHECK_TOP_REG(session->mmu_ctxs[ctx_id].pc_baddr, OS0_MMU_CBASE_MAPPING);
+	}
+
+out_error:
+	return cmd->conf_top_error;
+}
+
+static bool vha_wm_confirm_mh_regs(struct vha_cmd *cmd, struct vha_mh_config_regs * regs) {
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+
+	CHECK_TOP_REG(regs->cnn_preload_control, OS0_CNN_PRELOAD_CONTROL);
+	CHECK_TOP_REG(regs->req_ctxt_override, REQ_CTXT_OVERRIDE);
+
+	if (regs->slc_control)
+		CHECK_TOP_REG(regs->slc_control, SLC_CTRL);
+
+out_error:
+	return cmd->conf_top_error;
+}
+
+static bool vha_wm_confirm_crc_regs(struct vha_cmd *cmd, struct vha_crc_config_regs * regs) {
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	uint32_t core_id = 0;
+
+	if (session->cnn_dbg.cnn_crc_buf[0] || vha->cnn_combined_crc_enable ) {
+		for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
+			if (sched_info->core_mask & (1 << core_id)) {
+				uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
+
+				IOWRITE64_CR_REGIO(curr_core, CORE_CTRL_INDIRECT);
+
+				CHECK_CR_CORE_REG(regs->crc_control, OS0_CNN_CRC_CONTROL, core_id);
+				CHECK_CR_CORE_REG(regs->crc_mask_ctrl, OS0_CNN_CRC_MASK_CTRL, core_id);
+
+				if (session->cnn_dbg.cnn_crc_buf[0])
+					CHECK_CR_CORE_REG(regs->crc_address[core_id],
+										OS0_CNN_CRC_ADDRESS, core_id);
+
+				if (vha->cnn_combined_crc_enable)
+					CHECK_CR_CORE_REG(regs->crc_combined_address[core_id],
+					 					OS0_COMBINED_CNN_CRC_ADDRESS, core_id);
+			}
+		}
+	}
+
+	return false;
+}
+#endif
+/*
+ * submit a command stream to the CNN hardware
+ * input buffers:
+ *   command
+ *   input
+ *   coeff
+ * output buffers:
+ *   output
+ *   accum_load
+ * data:
+ *   none
+ */
+static int do_cmd_cnn_submit(struct vha_cmd *cmd, uint64_t *rsp_err_flags)
+{
+	const struct vha_user_cmd *user_cmd =
+		(struct vha_user_cmd *)&cmd->user_cmd;
+	struct vha_session *session = cmd->session;
+	struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
+	struct vha_dev *vha = session->vha;
+	int ret = -EINVAL;
+	struct vha_config_regs regs;
+	struct vha_mh_config_regs mh_regs;
+	struct vha_crc_config_regs crc_regs;
+#ifdef VHA_SCF
+	int i;
+#endif
+
+	memset(&regs, 0, sizeof(regs));
+	memset(&mh_regs, 0, sizeof(mh_regs));
+	memset(&crc_regs, 0, sizeof(crc_regs));
+
+	regs.socm_base_addr = ~0;
+	regs.locm_base_addr = ~0;
+	regs.low_level_sync_base_addr = ~0;
+
+#ifdef VHA_SCF
+	//initialize progress counters with max values possible
+	for (i = 0; i < VHA_NUM_CORES; i++) {
+		cmd->layer_count[i] = ~0;
+		cmd->pass_count[i] = ~0;
+	}
+#endif
+
+	if (vha->hw_bypass) {
+		ret = -EAGAIN;
+		dev_info(vha->dev, "%s skip\n", __func__);
+		*rsp_err_flags |= VHA_RSP_ERROR(SW_SKIP_CMD);
+		goto out_error;
+	}
+
+	img_pdump_printf("-- WM_SETUP_BEGIN\n");
+
+	/* Select WM to submit this cmd to. */
+	img_pdump_printf("-- Select WM%u\n", sched_info->wm_id);
+	VHA_LOCK_WM();
+	VHA_SELECT_WM(sched_info->wm_id);
+
+	/* Wait for the previous kick to be accepted */
+	if (vha->low_latency != VHA_LL_DISABLED) {
+		/* Sanity wait for the WM kick bit to be deasserted */
+		ret = IOPOLL64_CR_PDUMP(0, 1000, 10,
+				(uint64_t)VHA_CR_BITMASK(WM_WL_CONTROL, WL_START),
+				WM_WL_CONTROL);
+		VHA_UNLOCK_WM();
+		if(ret) {
+			dev_err(vha->dev, "%s: WM%u kick bit read-back failed!\n",
+					__func__, sched_info->wm_id);
+			*rsp_err_flags |= VHA_RSP_ERROR(SW_KICK_BIT_READ_BACK_FAILURE);
+			goto out_error;
+		}
+		if (cmd->queued &&
+				vha->low_latency == VHA_LL_SW_KICK)
+			goto hw_kick;
+	} else {
+		VHA_UNLOCK_WM();
+	}
+
+	ret = -EINVAL;
+
+	if (vha->pendcmd[sched_info->wm_id].cmd != NULL &&
+				vha->low_latency == VHA_LL_DISABLED) {
+		dev_err(vha->dev, "%s: trying to submit workload on WM%u when hw busy!\n",
+			__func__, sched_info->wm_id);
+		*rsp_err_flags |= VHA_RSP_ERROR(SW_HW_BUSY);
+		goto out_error;
+	}
+
+	if (user_cmd->cmd_type == VHA_CMD_CNN_SUBMIT_MULTI)
+	{
+		if (!vha_wm_setup_config_regs_multi(cmd, &regs)) {
+			dev_err(vha->dev, "%s: invalid cmd info\n", __func__);
+			*rsp_err_flags |= VHA_RSP_ERROR(SW_INVALID_CMD_INFO);
+			goto out_error;
+		}
+	}
+	else {
+		dev_err(vha->dev, "%s: invalid cmd type %u\n",
+				__func__, user_cmd->cmd_type);
+		*rsp_err_flags |= VHA_RSP_ERROR(SW_INVALID_CMD_TYPE);
+		ret = -EINVAL;
+		goto out_error;
+	}
+
+	vha_wm_write_config_regs(cmd, &regs);
+	/* write the stream size only */
+	ret = 0;
+	if (vha->pendcmd[cmd->hw_sched_info.wm_id].cmd) {
+		vha->queuedcmd[cmd->hw_sched_info.wm_id].cmd = cmd;
+		cmd->queued = true;
+		vha->stats.cnn_kicks_queued++;
+		img_pdump_printf("-- WM%u already kicked queueing!\n",
+							cmd->hw_sched_info.wm_id);
+		dev_dbg(vha->dev, "%s: WM%u already kicked. "
+				"Queueing -> kicked: 0x%08x/%u, queueing: 0x%08x/%u\n",
+				__func__, cmd->hw_sched_info.wm_id,
+				vha->pendcmd[cmd->hw_sched_info.wm_id].cmd->user_cmd.cmd_id,
+				vha->pendcmd[cmd->hw_sched_info.wm_id].cmd->session->id,
+				cmd->user_cmd.cmd_id, session->id);
+		if (vha->low_latency == VHA_LL_SW_KICK)
+			return ret;
+	}
+hw_kick:
+	/* Operate only on cores assigned to this WM. */
+	img_pdump_printf("-- Select cores\n");
+	IOWRITE64_CR_PDUMP(vha_wm_get_cores(vha, cmd->hw_sched_info.wm_id),
+			CORE_CTRL_INDIRECT);
+
+	/* Change mmu context */
+	ret = vha_mmu_setup(cmd->session);
+	if (ret) {
+		dev_err(vha->dev, "%s: Error while MMU setup!\n", __func__);
+		*rsp_err_flags |= VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE);
+		goto out_error;
+	}
+	/* Setup memory stuff */
+	vha_dev_mh_setup(vha, session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id, &mh_regs);
+
+	/* Prepare debug buffer registers */
+	vha_dbg_prepare_hwbufs(session, cmd, &crc_regs);
+
+	/* Setup cnn hw watchdog before kicking the hw */
+	{
+		uint64_t wl_cycles, core_cycles;
+
+		vha_wm_hwwdt_calculate(vha, cmd, &wl_cycles, &core_cycles);
+		vha_wm_hwwdt_setup(vha, cmd, wl_cycles, core_cycles);
+	}
+
+	img_pdump_printf("-- Select WM%d\n", cmd->hw_sched_info.wm_id);
+	/* Select WM to setup. */
+	VHA_LOCK_WM();
+	VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
+	/* Generate and set workload id. */
+	cmd->wm_cmd_id = ++vha->wm_cmd_id_count;
+	cmd->wm_cmd_id = (cmd->wm_cmd_id & VHA_WL_KICK_ID_COUNT_MASK) |
+						(cmd->hw_sched_info.wm_id << VHA_WL_KICK_ID_WM_ID_SHIFT);
+	img_pdump_printf("-- Set workload id: %u\n", cmd->wm_cmd_id);
+	IOWRITE64_CR_PDUMP(VHA_CR_SETBITS(WM_WL_ID, WL_ID, cmd->wm_cmd_id), WM_WL_ID);
+	VHA_UNLOCK_WM();
+
+	if (CMD_EXEC_ON_HW(cmd)) {
+		cmd->in_hw = true;
+		if (!cmd->queued)
+			vha->pendcmd[cmd->hw_sched_info.wm_id].cmd = cmd;
+	}
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+	/* Mark kick for dummy driver */
+	cmd->dummy_kicked = true;
+#endif
+
+	/* Consider this WL as kicked. */
+	vha->pri_q_counters[cmd->user_cmd.priority]--;
+
+	img_pdump_printf("-- WM_SETUP_END\n");
+
+	/* Remember the time cnn is kicked */
+	GETNSTIMEOFDAY(&cmd->hw_proc_start);
+	VHA_SET_WM_STAT(vha, hw_proc_start, cmd->hw_sched_info.wm_id, cmd->hw_proc_start);
+	/* Need to generate proper pdump */
+	if (cmd->queued &&
+			vha->low_latency == VHA_LL_SW_KICK) {
+		/* Do not write to pdump
+		 * this needs to be done after irq POL*/
+		VHA_LOCK_WM();
+		VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
+		IOWRITE64_CR_REGIO(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
+		VHA_UNLOCK_WM();
+		VHA_INC_WL_STAT(vha, kicks_queued, cmd);
+		dev_dbg(vha->dev, "%s: WM%u kick queued for cmd id 0x%08x/%u (WL kick id: 0x%08x)!\n",
+				__func__, sched_info->wm_id, cmd->user_cmd.cmd_id, session->id, cmd->wm_cmd_id);
+		cmd->queued = false;
+	} else {
+		img_pdump_printf("-- WM_KICK_BEGIN\n");
+		img_pdump_printf("-- Select WM%u\n", sched_info->wm_id);
+		VHA_LOCK_WM();
+		VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
+		img_pdump_printf("-- WM kick!\n");
+		IOWRITE64_CR_PDUMP(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
+		VHA_UNLOCK_WM();
+		if (cmd->queued)
+			VHA_INC_WL_STAT(vha, kicks_queued, cmd);
+		dev_dbg(vha->dev, "%s: WM%u %skick for cmd id 0x%08x/%u (WL kick id: 0x%08x)!\n",
+				__func__, sched_info->wm_id, cmd->queued ? "queued " : "",
+				cmd->user_cmd.cmd_id, session->id, cmd->wm_cmd_id);
+		img_pdump_printf("-- WM_KICK_END\n");
+	}
+
+#ifdef VHA_SCF
+	if (vha->confirm_config_reg) {
+		if (vha_wm_confirm_config_regs(cmd, &regs))
+			goto out_complete;
+		if (vha_wm_confirm_mmu_regs(cmd))
+			goto out_complete;
+		if (vha_wm_confirm_mh_regs(cmd, &mh_regs))
+			goto out_complete;
+		vha_wm_confirm_crc_regs(cmd, &crc_regs);
+out_complete:
+		complete(&cmd->conf_done);
+	}
+#endif
+
+	/* Update kick stats. */
+	vha->stats.cnn_kicks++;
+	VHA_INC_WL_STAT(vha, kicks, cmd);
+
+	/* Notify any observers of the submit event. */
+	if (vha_observers.submitted)
+		vha_observers.submitted(vha->id, session->id, user_cmd->cmd_id, false, user_cmd->priority);
+
+
+out_error:
+	if (ret != 0) {
+		/* Consider this WL as kicked for errors too. */
+		vha->pri_q_counters[cmd->user_cmd.priority]--;
+	}
+	return ret;
+}
+
+/*
+ * append a string to the pdump TXT file
+ * buffers:
+ *   none
+ * data:
+ *   string to be printed
+ */
+static int do_cmd_cnn_pdump_msg(const struct vha_cmd *cmd)
+{
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+	struct vha_session *session = cmd->session;
+	struct vha_dev* vha = session->vha;
+	int ret = 0;
+
+	if (user_cmd->num_inbufs != 0 || user_cmd->num_bufs != 0) {
+		dev_err(session->vha->dev, ">0 buffers in cmd is wrong\n");
+		ret = -EINVAL;
+	}
+	/* remember the pdump message may not be null terminated */
+	img_pdump_printf("%.*s\n", (int)cmd->size, (char *)user_cmd->data);
+	return ret;
+}
+
+/*
+ * Simple procedure that generates watchdog interrupt
+ */
+void vha_cnn_start_calib(struct vha_dev *vha)
+{
+	uint64_t core_mask = VHA_CALIBRATION_CORE_MASK;
+	uint64_t core_assignment;
+	uint64_t val64 = 0;
+
+	/* Use WM0 and core 0. */
+	vha_wm_assign_cores(vha, VHA_CALIBRATION_WM_ID, VHA_CALIBRATION_CORE_MASK, &core_assignment);
+	IOWRITE64_CR_PDUMP(core_assignment, CORE_ASSIGNMENT);
+
+	/* Operate only on core 0. */
+	IOWRITE64_CR_REGIO(core_mask, CORE_CTRL_INDIRECT);
+
+	/* Setup core WDTs. */
+	IOWRITE64_CR_REGIO(vha->calibration_cycles, CNN_WDT_COMPAREMATCH);
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(CNN_WDT_CTRL, MODE, KICK_PASS);
+	IOWRITE64_CR_REGIO(val64, CNN_WDT_CTRL);
+
+	IOWRITE64_CR_REGIO(VHA_CORE_MEM_WDT_CYCLES, CNN_MEM_WDT_COMPAREMATCH);
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(CNN_MEM_WDT_CTRL, MODE, KICK_PASS);
+	IOWRITE64_CR_REGIO(val64, CNN_MEM_WDT_CTRL);
+
+	/* Disabling command decoder, so we can generate WDT interrupt
+	 * without providing any buffer address. */
+	val64 = IOREAD64_CR_REGIO(CLK_CTRL0);
+	VHA_CR_CLEARBITS(val64, CLK_CTRL0, CNN_CMD);
+	IOWRITE64_CR_REGIO(val64, CLK_CTRL0);
+
+	/* To be sure the command decoder clock has switched off. */
+	udelay(100);
+
+	/* Enable core only events */
+	IOWRITE64_CR_REGIO(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_ENABLE);
+	IOWRITE64_CR_REGIO(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_CLEAR);
+
+	/* Set minimum command stream size. */
+	val64 = VHA_CR_SETBITS(OS0_CNN_CONTROL, CMD_SIZE_MIN1, (2048U/32-1));
+	IOWRITE64_CR_REGIO(val64, OS0_CNN_CONTROL);
+
+	/* Enable MMU bypass */
+	IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
+		VHA_CR_OS(MMU_CTRL));
+
+	VHA_LOCK_WM();
+	/* Select WM0 for calibration. */
+	VHA_SELECT_WM(VHA_CALIBRATION_WM_ID);
+	/* Disable WM events */
+	IOWRITE64_CR_REGIO(0, WM_EVENT_ENABLE);
+	/* Start WM0. */
+	IOWRITE64_CR_REGIO(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
+	VHA_UNLOCK_WM();
+	/* Remember the time WM0 is kicked */
+	GETNSTIMEOFDAY(&vha->stats.wm_stats[VHA_CALIBRATION_WM_ID].hw_proc_start);
+}
+
+void vha_cnn_update_stats(struct vha_dev *vha)
+{
+	vha->stats.cnn_last_proc_us =
+		vha->stats.last_proc_us;
+	vha->stats.cnn_total_proc_us +=
+		vha->stats.last_proc_us;
+
+	if (vha->stats.cnn_kicks) {
+		uint64_t avg = vha->stats.cnn_total_proc_us;
+		do_div(avg, vha->stats.cnn_kicks);
+		vha->stats.cnn_avg_proc_us = avg;
+	}
+
+	if (vha->stats.cnn_last_cycles && vha->freq_khz) {
+		uint64_t est_proc_us = 1000UL * vha->stats.cnn_last_cycles;
+		do_div(est_proc_us, vha->freq_khz);
+		vha->stats.cnn_last_est_proc_us = est_proc_us;
+	}
+	vha->stats.cnn_total_cycles += vha->stats.cnn_last_cycles;
+	if (vha->stats.cnn_kicks &&
+			vha->stats.cnn_total_cycles && vha->freq_khz) {
+		uint64_t avg = 1000UL * vha->stats.cnn_total_cycles;
+		do_div(avg, vha->stats.cnn_kicks);
+		do_div(avg, vha->freq_khz);
+		vha->stats.cnn_avg_est_proc_us = avg;
+	}
+}
+
+/*
+ * a command has completed. sent notification to user
+ */
+void vha_cnn_cmd_completed(struct vha_cmd *cmd, uint64_t status, int err, uint64_t rsp_err_flags)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	struct vha_rsp *rsp = NULL;
+	int i;
+	struct vha_user_cnn_submit_rsp * cnn_submit_rsp = NULL;
+
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+
+	switch (user_cmd->cmd_type) {
+	case VHA_CMD_CNN_SUBMIT_MULTI:
+	{
+		size_t mem_usage;
+		/* allocate sufficient space for the response */
+		size_t sz = sizeof(*rsp)
+			+ sizeof(struct vha_user_cnn_submit_rsp)
+			- sizeof(struct vha_user_rsp);
+#ifdef VHA_SCF
+		uint64_t wm_fifo_ready =
+				VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_EN |
+				VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_EN;
+		uint64_t wm_fifo_mask =
+				VHA_WM_EVENTS_DEFAULT | VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_EN;
+
+		uint64_t wm_fifo_status_success =
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN |
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN;
+		uint64_t wm_fifo_status_mask =
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL;
+#else
+		uint64_t wm_fifo_ready =
+				VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_EN;
+		uint64_t wm_fifo_mask = VHA_WM_EVENTS_DEFAULT;
+
+		uint64_t wm_fifo_status_success =
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN;
+		uint64_t wm_fifo_status_mask =
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL &
+				~VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN;
+#endif
+		uint64_t wm_src_mask = VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM,
+								VHA_WM_ID_TO_MASK(cmd->hw_sched_info.wm_id)) |
+								VHA_SET_FIELD_SIMPLE_VAL(HOST_EVENT_SOURCE, SYS, EN) |
+								VHA_SET_FIELD_SIMPLE_FULL(HOST_EVENT_SOURCE, CORE) |
+								VHA_SET_FIELD_SIMPLE_FULL(HOST_EVENT_SOURCE, IC);
+		uint32_t num_cores;
+		uint32_t outbuf_offset;
+		uint32_t outbuf_last_idx;
+		uint32_t outbuf_data_offset;
+		uint32_t* bufoffsets;
+		uint32_t* bufsizes;
+		struct vha_user_cnn_submit_multi_cmd *msg;
+
+		rsp = kzalloc(sz, GFP_KERNEL);
+		if (rsp == NULL) {
+			session->oom = true;
+			return;
+		}
+
+		cnn_submit_rsp = (struct vha_user_cnn_submit_rsp*)&rsp->user_rsp;
+		rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
+
+		if (vha->hw_bypass) {
+			vha->hw_bypass--;
+			break;
+		}
+
+		dev_dbg(vha->dev, "%s: 0x%08x/%u\n", __func__, cmd->user_cmd.cmd_id, session->id);
+
+		img_pdump_printf("-- WM_WAIT_BEGIN\n");
+		/* pdump POL for event source change
+		 * count=cnn_pdump_poll_count, delay=1000cycles */
+		img_pdump_printf("-- Wait for WM%u or any event source to be signalled\n"
+				"POL :REG:%#x 0 %#llx 3 %u 1000\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_HOST_EVENT_SOURCE,
+				wm_src_mask,
+				cnn_pdump_poll_count);
+
+		/* quick pdump POL for the related WM source flag only:
+		 * count=1, delay=10cycles */
+		img_pdump_printf("-- Check for WM%u source, all COREs/ICs & SYS\n"
+				"POL :REG:%#x %#llx 0x%llx 0 %u 10\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_HOST_EVENT_SOURCE,
+				VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM,
+						VHA_WM_ID_TO_MASK(cmd->hw_sched_info.wm_id)),
+				wm_src_mask,
+				wm_pdump_poll_count);
+
+		/* quick pdump POL for the FIFO_READY flag only in related WM:
+		 * count=1, delay=10cycles */
+		img_pdump_printf("-- Select WM%u\n"
+				"WRW64 :REG:%#x %#llx\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_TLC_WM_INDIRECT,
+				(uint64_t)cmd->hw_sched_info.wm_id);
+		img_pdump_printf("-- Check for WM%u FIFO_READY flag\n"
+				"POL :REG:%#x %#llx 0x%llx 0 1 10\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_WM_EVENT_STATUS,
+				wm_fifo_ready,
+				wm_fifo_mask);
+
+		/* quick pdump POL for AXI errors:
+		 * count=1, delay=10cycles
+		 */
+		img_pdump_printf("-- Post check of AXI status\n"
+				"POL :REG:%#x 0 0xffffffff 0 1 10\n",
+				VHA_CR_ACE_STATUS);
+
+		/* We do clear interrupts in the irq handler,
+		 * but this is not recorded into pdump because
+		 * of the irq context, so do it here */
+		img_pdump_printf("-- Clear SYS events\n"
+				"WRW64 :REG:%#x %#x\n",
+				VHA_CR_SYS_EVENT_CLEAR,
+				VHA_SYS_EVENTS_DEFAULT);
+		img_pdump_printf("-- Clear WM%u events\n"
+				"WRW64 :REG:%#x %#x\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_WM_EVENT_CLEAR,
+				VHA_WM_EVENTS_DEFAULT);
+		img_pdump_printf("-- Select core assigned to WM%u\n"
+				"WRW64 :REG:%#x %#x\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_CORE_CTRL_INDIRECT,
+				cmd->hw_sched_info.core_mask);
+		img_pdump_printf("-- Clear core events\n"
+				"WRW64 :REG:%#x %#x\n",
+				VHA_CR_CORE_EVENT_HOST_CLEAR,
+				VHA_CORE_EVENTS_DEFAULT);
+
+		img_pdump_printf("-- Check RESPONSE_FIFO status for WM%u\n"
+				"POL :REG:%#x %#llx 0x%llx 0 1 10\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_WM_RESPONSE_FIFO_WL_STATUS,
+				wm_fifo_status_success,
+				wm_fifo_status_mask);
+
+		img_pdump_printf("-- Check RESPONSE_FIFO workload id for WM%u\n"
+				"POL :REG:%#x %#llx 0x%llx 0 1 10\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_WM_RESPONSE_FIFO_WL_ID,
+				(uint64_t)cmd->wm_cmd_id,
+				VHA_CR_WM_RESPONSE_FIFO_WL_ID_MASKFULL);
+
+		/* Pop the RESPONSE_FIFO */
+		img_pdump_printf("-- Pop RESPONSE_FIFO for WM%u\n"
+				"WRW64 :REG:%#x %#x\n",
+				cmd->hw_sched_info.wm_id,
+				VHA_CR_WM_RESPONSE_FIFO_READ,
+				VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_EN);
+
+#ifdef CONFIG_VHA_DUMMY
+		vha_wm_release_cores(session->vha,
+				cmd->hw_sched_info.core_mask, true);
+#endif
+		/* Try to flush hw debug buffers first
+		 * - this does pdump SAB when proper checkpoint is set */
+		vha_dbg_flush_hwbufs(session, 1, cmd->hw_sched_info.core_mask);
+
+		/* pdump SAB for each of the output buffers */
+		img_pdump_printf("-- Save outputs\n");
+		msg = container_of(user_cmd, struct vha_user_cnn_submit_multi_cmd, msg);
+		num_cores = msg->num_cores;
+		outbuf_offset = VHA_MAX_CORES + (user_cmd->num_inbufs - num_cores);
+		outbuf_last_idx = VHA_MAX_CORES + user_cmd->num_bufs - 1;
+		outbuf_data_offset = user_cmd->num_inbufs - num_cores;
+		bufoffsets = msg->bufoffsets;
+		bufsizes = msg->bufsizes;
+
+		/* There should be at least on output buffer */
+		WARN_ON(outbuf_last_idx <= outbuf_offset);
+
+		for (i = outbuf_offset; i < outbuf_last_idx; i++) {
+			struct vha_buffer *buf;
+			uint32_t offset;
+			uint32_t size;
+
+			buf = vha_find_bufid(session, user_cmd->data[i]);
+			if (buf == NULL) {
+				dev_err(vha->dev,
+						"%s: invalid buffer id:%d\n",
+						__func__, user_cmd->data[i]);
+				continue;
+			}
+			offset = bufoffsets[outbuf_data_offset];
+			size = bufsizes[outbuf_data_offset];
+			outbuf_data_offset++;
+
+			vha_pdump_sab_buf(session, PDUMP_RES, buf, offset, size);
+
+			/* Update status, do not signal fence yet,
+			 * it's is done explicitly below, after cache invalidation */
+			vha_set_buf_status(session, buf->id, VHA_BUF_FILLED_BY_HW,
+					VHA_SYNC_NONE, false);
+
+			if (vha_buf_needs_inval(session, buf->id) && !status)
+				img_mem_sync_device_to_cpu(session->mem_ctx, buf->id);
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+			img_mem_signal_fence(session->mem_ctx, buf->id);
+#endif
+		}
+
+		if (session->vha->low_latency == VHA_LL_SW_KICK) {
+			struct vha_cmd *qcmd =
+				session->vha->queuedcmd[cmd->hw_sched_info.wm_id].cmd;
+
+			if (qcmd && qcmd->queued) {
+				/* Setup kick info */
+				img_pdump_printf("-- CNN kick (queued)!\n");
+				img_pdump_printf("WRW64 :REG:%#x %#x\n",
+						VHA_CR_WM_WL_CONTROL, VHA_CR_WM_WL_CONTROL_WL_START_EN);
+			}
+		}
+		img_pdump_printf("-- WM_WAIT_END\n");
+
+		img_mem_get_usage(session->mem_ctx, NULL, &mem_usage);
+		/* send out an event when submit is complete */
+		if (vha_observers.completed)
+			vha_observers.completed(
+				session->vha->id,
+				session->id,
+				user_cmd->cmd_id,
+				status,
+				session->vha->stats.cnn_last_cycles,
+				mem_usage,
+				user_cmd->priority);
+
+		/* post some metrics about the hw to user space */
+#ifdef MEM_USAGE_LAST_METRICS_ARE_AVAILABLE
+		cnn_submit_rsp->mem_usage = mem_usage;
+#else
+		cnn_submit_rsp->mem_usage = ~0;
+#endif
+		cnn_submit_rsp->last_proc_us = session->vha->stats.cnn_last_proc_us;
+		cnn_submit_rsp->hw_cycles = session->vha->stats.cnn_last_cycles;
+		dev_dbg(session->vha->dev, "%s: 0x%08x/%u, hw_cycles %llx\n", __func__,
+				cmd->user_cmd.cmd_id, session->id, session->vha->stats.cnn_last_cycles);
+
+		if (session->vha->stats.cnn_last_cycles > (uint32_t)~0)
+			dev_warn(session->vha->dev,
+						"%s: hw_cycles %llx exceeds 32bit limit\n",
+						__func__,
+				session->vha->stats.cnn_last_cycles);
+		break;
+	}
+	case VHA_CMD_CNN_PDUMP_MSG:
+	default:
+		/* allocate space for standard response */
+		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+		if (rsp == NULL) {
+			session->oom = true;
+			return;
+		}
+		rsp->size = sizeof(rsp->user_rsp);
+		break;
+	}
+
+	if (user_cmd->flags & VHA_CMDFLAG_NOTIFY) {
+		rsp->user_rsp.cmd_id = cmd->user_cmd.cmd_id;
+		rsp->user_rsp.err_no = session->vha->hw_bypass ? 0 : err;
+		rsp->user_rsp.rsp_err_flags = rsp_err_flags;
+
+		cmd->rsp = rsp;
+	} else
+		kfree(rsp);
+}
+
+static uint32_t get_estimated_cycles(const struct vha_user_cmd *user_cmd)
+{
+	const struct vha_user_cnn_submit_multi_cmd *cnn_user_cmd =
+		(struct vha_user_cnn_submit_multi_cmd *)user_cmd;
+	return cnn_user_cmd->estimated_cycles;
+}
+
+/*
+ * Perform a command, as requested by user.
+ * note: this function is called with vha_dev.lock == locked
+ */
+int vha_do_cnn_cmd(struct vha_cmd *cmd)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+	int err = -EINVAL;
+	uint64_t rsp_err_flags = 0;
+
+	dev_dbg(vha->dev,
+			"%s: WL id:0x%08x type:%x nin:%x nbufs:%x\n",
+			__func__, user_cmd->cmd_id, user_cmd->cmd_type,
+			user_cmd->num_inbufs, user_cmd->num_bufs);
+	print_hex_dump_debug("VHA CMD: ", DUMP_PREFIX_NONE, 4, 4,
+							user_cmd, ALIGN(cmd->size, 4), false);
+
+	switch (user_cmd->cmd_type) {
+		case VHA_CMD_CNN_SUBMIT_MULTI:
+			err = do_cmd_cnn_submit(cmd, &rsp_err_flags);
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+			if (cmd->dummy_kicked) {
+				uint32_t estimated_cycles = get_estimated_cycles(user_cmd);
+
+				if (estimated_cycles == 0)
+					estimated_cycles = VHA_DUMMY_HW_PROCESSING_TIME_CYCLES;
+				cmd->dummy_exec_time = (estimated_cycles / (vha->freq_khz / 1000));
+				if (cmd->hw_sched_info.wm_id < vha->hw_props.num_cnn_core_devs)
+					schedule_delayed_work(
+							&vha->dummy_dworks[cmd->hw_sched_info.wm_id].dummy_dwork,
+							usecs_to_jiffies(cmd->dummy_exec_time));
+				cmd->dummy_kicked = false;
+			}
+#endif
+			break;
+		case VHA_CMD_CNN_PDUMP_MSG:
+			err = do_cmd_cnn_pdump_msg(cmd);
+			break;
+		default:
+			break;
+	}
+
+	/*
+	 * Immediately send notification to user if not using hw at all
+	 * or submitting failed.
+	 */
+	if (!CMD_EXEC_ON_HW(cmd) || err) {
+		bool is_cnn_cmd = CMD_IS_CNN(cmd);
+		vha_cnn_cmd_completed(cmd,
+			err ? (uint64_t)VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_FAILURE_EN : 0ULL, err, rsp_err_flags);
+		if (is_cnn_cmd) {
+			if (rsp_err_flags & VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE))
+				vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
+			/* Free current command */
+			vha_dev_free_cmd_res(vha, cmd, false);
+		}
+		vha_cmd_notify(cmd);
+
+		if (is_cnn_cmd) {
+			if (rsp_err_flags & VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE)) {
+				/* Rollback commands being processed to perform full reset */
+				vha_rollback_cmds(vha);
+				/* Perform stop & reset eventually*/
+				vha_dev_stop(vha, true);
+				/* Reschedule commands */
+				vha_chk_cmd_queues(vha, true);
+			}
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+uint8_t vha_wm_get_cores(struct vha_dev *vha, uint8_t wm_id)
+{
+	uint8_t core_mask = 0;
+	uint64_t wm_core_assignment;
+
+#define CHECK_CORE_ASSIGNMENT(c) \
+		if (wm_id == VHA_CR_GETBITS(CORE_ASSIGNMENT, CORE_##c##_WM_MAPPING, \
+									wm_core_assignment)) \
+			core_mask |= (1 << c);
+
+	wm_core_assignment = vha->wm_core_assignment;
+	dev_dbg(vha->dev, "%s: %llx\n", __func__, wm_core_assignment);
+	CHECK_CORE_ASSIGNMENT(0);
+	CHECK_CORE_ASSIGNMENT(1);
+	CHECK_CORE_ASSIGNMENT(2);
+	CHECK_CORE_ASSIGNMENT(3);
+	CHECK_CORE_ASSIGNMENT(4);
+	CHECK_CORE_ASSIGNMENT(5);
+	CHECK_CORE_ASSIGNMENT(6);
+	CHECK_CORE_ASSIGNMENT(7);
+
+#undef CHECK_CORE_ASSIGNMENT
+
+	return core_mask;
+}
+
+void vha_wm_assign_cores(struct vha_dev *vha, uint8_t wm_id, uint8_t core_mask, uint64_t *core_assignment)
+{
+	uint64_t wm_core_assignment = vha->wm_core_assignment;
+	uint32_t assignment_field_shift =
+					VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_SHIFT -
+								VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_SHIFT;
+	uint64_t assignment_field_mask =
+							~VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_CLRMSK;
+	uint64_t wm_core_assignment_orig = wm_core_assignment;
+
+	while (core_mask != 0) {
+		uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
+
+		core_mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
+
+		wm_core_assignment &=
+			~(assignment_field_mask << (curr_core_id * assignment_field_shift));
+		wm_core_assignment |= wm_id << (curr_core_id * assignment_field_shift);
+	}
+
+	dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, wm_core_assignment_orig, wm_core_assignment);
+	*core_assignment = wm_core_assignment;
+	vha->wm_core_assignment = wm_core_assignment;
+}
+
+static void wm_release_socm(struct vha_dev *vha, uint8_t core_mask, bool to_pdump)
+{
+	uint64_t cur_assignment = IOREAD64_CR_REGIO(SOCM_BUF_ASSIGNMENT);
+	uint32_t assignment_field_shift =
+				VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_SHIFT -
+					VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_SHIFT;
+	uint64_t assignment_field_mask =
+					~VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_CLRMSK;
+	uint64_t new_assignment = cur_assignment;
+	uint64_t mask = core_mask;
+
+	while (mask != 0) {
+			uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(mask);
+
+			mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
+
+			new_assignment &=
+				~(assignment_field_mask << (curr_core_id * assignment_field_shift));
+			new_assignment |= VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_UNALLOCATED
+					<< (curr_core_id * assignment_field_shift);
+	}
+
+	if (cur_assignment == new_assignment) {
+		dev_dbg(vha->dev, "%s: %llx -> %llx (no change)\n", __func__, cur_assignment, new_assignment);
+		return;
+	}
+
+	dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, cur_assignment, new_assignment);
+	if (to_pdump) {
+		img_pdump_printf("-- Release SOCM on cores 0x%02x\n", core_mask);
+		IOWRITE64_CR_PDUMP(new_assignment, SOCM_BUF_ASSIGNMENT);
+	} else
+		IOWRITE64_CR_REGIO(new_assignment, SOCM_BUF_ASSIGNMENT);
+}
+
+void vha_wm_release_cores(struct vha_dev *vha, uint8_t core_mask, bool to_pdump)
+{
+	uint64_t cur_assignment = vha->wm_core_assignment;
+	uint32_t assignment_field_shift =
+					VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_SHIFT -
+								VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_SHIFT;
+	uint64_t assignment_field_mask =
+							~VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_CLRMSK;
+	uint64_t new_assignment = cur_assignment;
+	uint64_t mask = core_mask;
+
+	wm_release_socm(vha, core_mask, to_pdump);
+
+	while (mask != 0) {
+		uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(mask);
+
+		mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
+
+		new_assignment &=
+			~(assignment_field_mask << (curr_core_id * assignment_field_shift));
+		new_assignment |=
+			VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED <<
+										(curr_core_id * assignment_field_shift);
+	}
+
+	if (cur_assignment == new_assignment) {
+		dev_dbg(vha->dev, "%s: %llx -> %llx (no change)\n", __func__, cur_assignment, new_assignment);
+		return;
+	}
+
+	dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, cur_assignment, new_assignment);
+	if (to_pdump) {
+		img_pdump_printf("-- Release cores 0x%02x\n", core_mask);
+		IOWRITE64_CR_PDUMP(new_assignment, CORE_ASSIGNMENT);
+	} else
+		IOWRITE64_CR_REGIO(new_assignment, CORE_ASSIGNMENT);
+	vha->wm_core_assignment = new_assignment;
+}
+
+int vha_wm_reset(struct vha_dev *vha, struct vha_hw_sched_info *sched_info)
+{
+	uint64_t val64 = 0;
+	uint64_t wm_reset_val64 = 0;
+	uint8_t wm_cores_mask = 0;
+	uint8_t core_mask = 0;
+	uint8_t id;
+	int ret = 0;
+
+	dev_dbg(vha->dev, "%s: WM%d\n", __func__, sched_info->wm_id);
+
+	img_pdump_printf("-- WM level RESET sequence BEGIN\n");
+
+	/* Perform reset procedure */
+
+	/* Operate only on cores assigned to this WM. */
+	wm_cores_mask = sched_info->core_mask;
+
+	/* Core Level Reset Assertion:
+	 * 4. Force global clocks on current core (others set to AUT0). */
+	img_pdump_printf("-- Force global clocks ON for all cores assigned to WM %u"
+						" (others set to AUTO)\n", sched_info->wm_id);
+	val64 = VHA_SYS_CLOCK_MODE(INTERCONNECT, ON) |
+			VHA_SYS_CLOCK_MODE_MULTI(CORE, ON, wm_cores_mask) |
+			VHA_SYS_CLOCK_MODE_MULTI(CORE, AUTO, (uint8_t)~wm_cores_mask) |
+			VHA_SYS_CLOCK_MODE_MULTI(NOC, AUTO, ~0) |
+			VHA_SYS_CLOCK_MODE_MULTI(WM, AUTO, ~0) |
+			VHA_SYS_CLOCK_MODE(AXI, AUTO) |
+			VHA_SYS_CLOCK_MODE(SLC, AUTO) |
+			VHA_SYS_CLOCK_MODE(LSYNC, AUTO) |
+			VHA_SYS_CLOCK_MODE(SOCM, AUTO) |
+			VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
+	IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+
+	/* WM reset procedure start. */
+	/* Move this WM into reset state. */
+	img_pdump_printf("-- Move WM%u into reset state\n", sched_info->wm_id);
+	wm_reset_val64 = VHA_CR_SETBITS(SYS_RESET_CTRL, WM, VHA_WM_ID_TO_MASK(sched_info->wm_id));
+	IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+
+	/* Core Level Reset Reset Sequence */
+
+	/* Proceed core by core. */
+	while (wm_cores_mask) {
+		/* Reset Assertion */
+
+		/* 1. Select current core. */
+		id = ffs(wm_cores_mask) - 1;
+		img_pdump_printf("-- Select core%u\n", id);
+		core_mask = VHA_CORE_ID_TO_MASK(id);
+		wm_cores_mask &= ~core_mask;
+		IOWRITE64_CR_PDUMP(core_mask, CORE_CTRL_INDIRECT);
+		/* 3. Disable page fault interrupts for core while resetting. */
+		img_pdump_printf("-- Disable page fault interrupts for core%u\n", id);
+		val64 = IOREAD64_CR_REGIO(SYS_EVENT_ENABLE);
+		val64 &= ~(VHA_CR_SETBITS(SYS_EVENT_ENABLE, MMU_PAGE_FAULT, core_mask));
+		IOWRITE64_CR_PDUMP(val64, SYS_EVENT_ENABLE);
+		/* 5. Set all core level clocks to AUTO. */
+		img_pdump_printf("-- Set all core%u level clocks to AUTO\n", id);
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 6. Move core into soft reset. */
+		img_pdump_printf("-- Perform soft reset on core%u\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_SOFT_RESET, CORE_RESET, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_SOFT_RESET);
+		/*    Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(CORE_SOFT_RESET);
+		/*    Clear reset. */
+		IOWRITE64_CR_PDUMP(0, CORE_SOFT_RESET);
+		/* 7. Wait until core memory bus reset has completed. */
+		img_pdump_printf("-- Wait until core%u memory bus reset has completed\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 1000, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 8. Clear core memory bus reset interrupt. */
+		img_pdump_printf("-- Clear core%u memory bus reset interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, MEMBUS_RESET_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/* 9. Clear the core indirect register. */
+		img_pdump_printf("-- Deselect core%u\n", id);
+		IOWRITE64_CR_PDUMP(0, CORE_CTRL_INDIRECT);
+		/* 10. Ensure no resets are pending. */
+		img_pdump_printf("-- Ensure no resets are pending\n");
+		IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
+		/* 11. Move current core into full reset state. Leave WM in reset. */
+		img_pdump_printf("-- Move core%u into full reset state\n", id);
+		val64 =  VHA_CR_SETBITS(SYS_RESET_CTRL, CORE, core_mask);
+		val64 |= wm_reset_val64;
+		IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
+		/* 12. Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+
+		/* Reset Deassertion */
+
+		/* 1. Move current core out of reset state. */
+		img_pdump_printf("-- Move core%u out of reset state\n", id);
+		IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
+		/*    Dummy read to avoid race conditions in the hw. */
+		val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+		/* 2. Select current core again. */
+		img_pdump_printf("-- Select core%u again\n", id);
+		IOWRITE64_CR_PDUMP(core_mask, CORE_CTRL_INDIRECT);
+		/* 5. Force core clocks to ON for everything. */
+		img_pdump_printf("-- Force core clocks ON for everything\n");
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(ON);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 6. Perform core level RAM initialisation. */
+		img_pdump_printf("-- Perform core%u level RAM initialisation\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(FUSA_CONTROL, ECC_INIT_KICK, EN);
+		IOWRITE64_CR_PDUMP(val64, FUSA_CONTROL);
+		/* 7. Perform LOCM scrubbing. */
+		img_pdump_printf("-- Perform core%u LOCM scrubbing\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(LOCM_SCRUB_CTRL, KICK, EN);
+		IOWRITE64_CR_PDUMP(val64, LOCM_SCRUB_CTRL);
+		/* 8. Wait until the RAM initialisation sequence has completed. */
+		img_pdump_printf("-- Wait until the RAM initialisation sequence has completed\n");
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 9. Clear core RAM reset interrupt. */
+		img_pdump_printf("-- Clear core%u RAM reset interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, RAM_INIT_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/*    Confirm that 'RAM_INIT_DONE' field is cleared. */
+		img_pdump_printf("-- Confirm that core%u RAM reset interrupt is cleared\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(0ULL, 10, 10, val64, CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 10. Wait until the LOCM scrubbing sequence has completed. */
+		img_pdump_printf("-- Wait until the LOCM scrubbing sequence has completed.\n");
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(val64, 1000, 1000,
+				(uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE),
+				CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 11. Deassert core LOCM scrubbing. */
+		img_pdump_printf("-- Deassert core%u LOCM scrubbing\n", id);
+		IOWRITE64_CR_PDUMP(0, LOCM_SCRUB_CTRL);
+		/* 12. Clear core LOCM scrub interrupt. */
+		img_pdump_printf("-- Clear core%u LOCM scrub interrupt\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, LOCM_SCRUB_DONE, EN);
+		IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
+		/*    Confirm that 'LOCM_SCRUB_DONE' field is cleared. */
+		img_pdump_printf("-- Confirm that core%u LOCM scrub interrupt is cleared\n", id);
+		val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
+		ret = IOPOLL64_CR_PDUMP(0ULL, 10, 10, val64, CORE_EVENT_HOST_STATUS);
+		if(ret)
+			return ret;
+		/* 13. Enable the interrupts from core to WM. */
+		img_pdump_printf("-- Enable CORE events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_ENABLE);
+		/* 14. Clear all status from CORE_EVENT_WM (clears the RAM_INIT_DONE). */
+		img_pdump_printf("-- Clear CORE events on WM\n");
+		IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, RAM_INIT_DONE, EN) |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, LOCM_SCRUB_DONE, EN) |
+				VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, MEMBUS_RESET_DONE, EN),
+				CORE_EVENT_WM_CLEAR);
+		/* 15. Enable the interrupts from interconnect to WM. */
+		img_pdump_printf("-- Enable INTERCONNECT events to WM\n");
+		IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_ENABLE);
+		/* 16. Disable all interrupts from the CORE to the HOST. */
+		img_pdump_printf("-- Disable CORE events on host\n");
+		IOWRITE64_CR_PDUMP(0, CORE_EVENT_HOST_ENABLE);
+		/* 17. Set all core level clocks back to AUTO. */
+		img_pdump_printf("-- Set all core%u level clocks back to AUTO\n", id);
+		val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+		IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
+		/* 18. Set core global clock back to AUTO. */
+		img_pdump_printf("-- Set core%u global clock back to AUTO (others set to ON or AUTO)\n", id);
+		if (wm_cores_mask == 0) {
+			val64 = VHA_SYS_CLOCKS_DEFAULT(AUTO);
+			IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+		} else {
+			val64 = VHA_SYS_CLOCK_MODE(INTERCONNECT, ON) |
+					VHA_SYS_CLOCK_MODE_MULTI(CORE, ON, wm_cores_mask) |
+					VHA_SYS_CLOCK_MODE_MULTI(CORE, AUTO, (uint8_t)~wm_cores_mask) |
+					VHA_SYS_CLOCK_MODE_MULTI(NOC, AUTO, ~0) |
+					VHA_SYS_CLOCK_MODE_MULTI(WM, AUTO, ~0) |
+					VHA_SYS_CLOCK_MODE(AXI, AUTO) |
+					VHA_SYS_CLOCK_MODE(SLC, AUTO) |
+					VHA_SYS_CLOCK_MODE(LSYNC, AUTO) |
+					VHA_SYS_CLOCK_MODE(SOCM, AUTO) |
+					VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
+			IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
+		}
+
+		/* Setup stalling if requested. */
+		if (vha->stalling_membus_sys_stall_ratio != 0)
+			IOWRITE64_CR_REGIO(vha->stalling_membus_sys_stall_ratio,
+								NN_SYS2_MEMBUS_SYS_STALL_RATIO);
+	}
+
+	/* WM reset procedure end. */
+	/* Move this WM out of reset state. */
+	img_pdump_printf("-- Move WM%u out of reset state\n", sched_info->wm_id);
+	IOWRITE64_CR_PDUMP(0ULL, SYS_RESET_CTRL);
+	/* Dummy read to avoid race conditions in the hw */
+	val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
+	img_pdump_printf("-- WM level RESET sequence END\n");
+
+	return 0;
+}
+
+void vha_wm_hwwdt_calculate(struct vha_dev *vha, struct vha_cmd *cmd,
+		uint64_t *wl_cycles, uint64_t *core_cycles)
+{
+	if (use_estimated_cycles_for_wm_wdt) {
+		/* Using values defined in MBS */
+		*wl_cycles = (uint64_t)get_estimated_cycles(&cmd->user_cmd) +
+						(uint64_t)wm_wl_wdt_estimated_cycles_margin;
+		*core_cycles = cnn_hl_wdt_cycles;
+	} else {
+		/* Using values defined as kernel param */
+		*wl_cycles = wm_wl_wdt_cycles;
+		*core_cycles = cnn_hl_wdt_cycles;
+	}
+}
+
+void vha_wm_hwwdt_setup(struct vha_dev *vha, struct vha_cmd *cmd,
+						uint64_t wl_cycles, uint64_t core_cycles)
+{
+	uint64_t val64 = 0;
+	uint64_t hw_brns =
+			((struct vha_user_cnn_submit_multi_cmd*)&cmd->user_cmd)->hw_brns;
+	uint8_t wm_id = cmd->hw_sched_info.wm_id;
+
+	img_pdump_printf("-- Set SYSTEM watchdogs \n");
+	/* Setup system WDTs. */
+	IOWRITE64_CR_PDUMP(VHA_SYS_MEM_WDT_CYCLES, SYS_MEM_WDT_COMPAREMATCH);
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_MEM_WDT_CTRL, MODE, KICK_WL);
+	IOWRITE64_CR_PDUMP(val64, SYS_MEM_WDT_CTRL);
+
+	img_pdump_printf("-- Set WM%d watchdogs \n", wm_id);
+	VHA_LOCK_WM();
+	VHA_SELECT_WM(wm_id);
+	/* Setup WM WDTs. */
+	IOWRITE64_CR_PDUMP(wl_cycles, WM_WL_WDT_COMPAREMATCH);
+	//val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_WL_WDT_CTRL, MODE, KICK_WL);
+	val64 = VHA_CR_SETBITS(WM_WL_WDT_CTRL, MODE, wm_wl_wdt_mode);
+	IOWRITE64_CR_PDUMP(val64, WM_WL_WDT_CTRL);
+
+	IOWRITE64_CR_PDUMP(VHA_WM_IDLE_WDT_CYCLES, WM_WL_IDLE_WDT_COMPAREMATCH);
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_WL_IDLE_WDT_CTRL, MODE, ENABLED);
+	IOWRITE64_CR_PDUMP(val64, WM_WL_IDLE_WDT_CTRL);
+
+	IOWRITE64_CR_PDUMP(VHA_WM_SOCIF_WDT_CYCLES, WM_SOCIF_WDT_COMPAREMATCH);
+	val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_SOCIF_WDT_CTRL, MODE, ENABLED);
+	IOWRITE64_CR_PDUMP(val64, WM_SOCIF_WDT_CTRL);
+	VHA_UNLOCK_WM();
+
+	/* Operate only on cores assigned to this WM. */
+	img_pdump_printf("-- Select cores\n");
+	IOWRITE64_CR_PDUMP(vha_wm_get_cores(vha, wm_id),
+			CORE_CTRL_INDIRECT);
+	img_pdump_printf("-- Set CORE watchdogs \n");
+	/* Setup core WDTs. */
+	IOWRITE64_CR_PDUMP(core_cycles, CNN_WDT_COMPAREMATCH);
+	val64 = VHA_CR_SETBITS(CNN_WDT_CTRL, MODE, cnn_hl_wdt_mode);
+	IOWRITE64_CR_PDUMP(val64, CNN_WDT_CTRL);
+
+	if (VHA_IS_BRN(hw_brns, 71556) ||
+			VHA_IS_BRN(hw_brns, 71338))
+		/* Always set max value */
+		IOWRITE64_CR_PDUMP(VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL, CNN_MEM_WDT_COMPAREMATCH);
+	else
+		IOWRITE64_CR_PDUMP(cnn_mem_wdt_cycles, CNN_MEM_WDT_COMPAREMATCH);
+
+	val64 = VHA_CR_SETBITS(CNN_MEM_WDT_CTRL, MODE, cnn_mem_wdt_mode);
+	IOWRITE64_CR_PDUMP(val64, CNN_MEM_WDT_CTRL);
+
+	val64 = VHA_CR_SETBITS(CNN_CORE_SYNC_WDT_CTRL, ENABLE,
+							VHA_CR_CNN_CORE_SYNC_WDT_CTRL_ENABLE_EN) |
+			VHA_CR_SETBITS(CNN_CORE_SYNC_WDT_CTRL, VALUE,
+							VHA_CORE_SYNC_WDT_CYCLES);
+	IOWRITE64_CR_PDUMP(val64, CNN_CORE_SYNC_WDT_CTRL);
+}
+
+void vha_wm_status(struct vha_dev *vha, uint8_t wm_id, uint8_t core_mask)
+{
+	uint64_t wm_status;
+
+	dev_err(vha->dev, " WM%u failure:\n", wm_id);
+	/* Select WM to read from. */
+	VHA_LOCK_WM();
+	VHA_SELECT_WM(wm_id);
+	wm_status = IOREAD64_CR_REGIO(WM_STATUS);
+	VHA_UNLOCK_WM();
+
+	dev_err(vha->dev, "  WM_STATUS:      0x%016llx\n",
+			wm_status);
+	dev_err(vha->dev, "  LLSYNC_STATUS:  0x%016llx\n",
+			IOREAD64_CR_REGIO(LOW_LEVEL_SYNC_STATUS));
+
+	while (core_mask != 0) {
+		uint32_t core_id = VHA_CORE_MASK_TO_ID(core_mask);
+
+		dev_err(vha->dev, "  core%u:\n", core_id);
+
+		IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT, MASK, (1 << core_id)),
+										 CORE_CTRL_INDIRECT);
+
+		dev_err(vha->dev, "    CNN_STATUS:  0x%016llx\n",
+				IOREAD64_CR_REGIO(OS0_CNN_STATUS));
+		dev_err(vha->dev, "    CNN_STATUS2: 0x%016llx\n",
+				IOREAD64_CR_REGIO(OS0_CNN_STATUS2));
+		{
+			uint64_t reg = VHA_CR_CORE0_LAST_NNA_SYNC_ID +
+					core_id * (VHA_CR_CORE1_LAST_NNA_SYNC_ID - VHA_CR_CORE0_LAST_NNA_SYNC_ID);
+			dev_err(vha->dev, "    LAST_NNA_SYNC_ID: 0x%016llx\n",
+					IOREAD64(vha->reg_base, reg));
+			reg = VHA_CR_CORE0_LAST_MMM_SYNC_ID +
+					core_id * (VHA_CR_CORE1_LAST_MMM_SYNC_ID - VHA_CR_CORE0_LAST_MMM_SYNC_ID);
+			dev_err(vha->dev, "    LAST_MMM_SYNC_ID: 0x%016llx\n",
+					IOREAD64(vha->reg_base, reg));
+		}
+
+		core_mask &= ~(VHA_CORE_ID_TO_MASK(core_id));
+	}
+}
+

+ 104 - 0
driver/vha/platform/vha_plat.h

@@ -0,0 +1,104 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#ifndef VHA_PLAT_H
+#define VHA_PLAT_H
+
+/* Core clock frequency: default 30MHz */
+#define VHA_CORE_CLOCK_MHZ 30
+
+/* Core watchdog cycles default value */
+#if defined(HW_AX2)
+#define VHA_CORE_WDT_CYCLES           0x7fffff
+#elif defined(HW_AX3)
+/* MMM can transfer any number of bytes at cost of higher cycles, setting it to ~100ms @800MHz */
+#define VHA_CORE_WDT_CYCLES           0xfffffff
+/* Memory watchdog is set ~1ms @800MHz which is very safe value to avoid any false interrupts */
+#define VHA_CORE_MEM_WDT_CYCLES       0xffffffff
+#endif
+
+#ifdef CONFIG_HW_MULTICORE
+/* System watchdog cycles default values */
+#define VHA_SYS_MEM_WDT_CYCLES        0xffffffff
+/* WM watchdog cycles default values */
+#define VHA_WM_WDT_CYCLES             0xffffffff
+#define VHA_WM_IDLE_WDT_CYCLES        0xfffff
+#define VHA_WM_SOCIF_WDT_CYCLES       0xfffff
+/* Core watchdog cycles default values */
+/* MMM can transfer any number of bytes at cost of higher cycles, setting it to ~100ms @800MHz */
+#define VHA_CORE_WDT_CYCLES           0xfffffff
+/* Memory watchdog is set ~1ms @800MHz which is very safe value to avoid any false interrupts */
+#define VHA_CORE_MEM_WDT_CYCLES       0xffffffff
+#define VHA_CORE_SYNC_WDT_CYCLES      0xffff
+#endif
+
+/* Memory burst size */
+#define VHA_CORE_MH_MAX_BURST_LENGTH 128
+/* SLC cache policy type (0-use cache, 1-bypass cache) */
+#define VHA_CORE_MH_SLC_CACHE_POLICY_TYPE 1
+/* GPU pipe coherent type */
+#define VHA_CORE_MH_GPU_PIPE_COHERENT_TYPE 1
+/* Persistence priority 0-lowest,3-highest */
+#define VHA_CORE_MH_PERSISTENCE_PRIO 0
+
+/* Suspend delay in ms after which the
+ * runtime suspend callback is called */
+#define VHA_CORE_SUSPEND_DELAY 10
+
+/* Default OCM start address */
+#ifdef CONFIG_HW_MULTICORE
+#define VHA_OCM_ADDR_START 0x1000
+#else
+#define VHA_OCM_ADDR_START (~0)
+#endif
+
+/* IO hooks */
+uint64_t vha_plat_read64(void *addr);
+void vha_plat_write64(void *addr, uint64_t val);
+
+int vha_plat_init(void);
+int vha_plat_deinit(void);
+
+#endif /* VHA_PLAT_H */

+ 862 - 0
driver/vha/platform/vha_plat_apollo.c

@@ -0,0 +1,862 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
+#include <linux/dma-mapping.h>
+#else
+#include <linux/dma-map-ops.h>
+#endif
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/mod_devicetable.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#define DEVICE_NAME "vha"
+
+#define IS_APOLLO_DEVICE(devid) ((devid) == PCI_APOLLO_DEVICE_ID)
+
+/*
+ * from TCF Support FPGA.Technical Reference
+ * Manual.1.0.92.Internal Atlas GEN.External.doc:
+ */
+/* Altas - System control register bar */
+#define PCI_ATLAS_SYS_CTRL_REGS_BAR (0)
+/* Altas - System control register offset */
+#define PCI_ATLAS_SYS_CTRL_REGS_OFFSET (0x0000)
+/* Atlas - Offset of INTERRUPT_STATUS */
+/*#define PCI_ATLAS_INTERRUPT_STATUS (0x00E0)*/
+/* Atlas - Offset of INTERRUPT_ENABLE */
+/*#define PCI_ATLAS_INTERRUPT_ENABLE (0x00F0)*/
+/* Atlas - Offset of INTERRUPT_CLEAR */
+/*#define PCI_ATLAS_INTERRUPT_CLEAR (0x00F8)*/
+/* Atlas - Master interrupt enable */
+#define PCI_ATLAS_MASTER_ENABLE (1<<31)
+/* Atlas - Device interrupt */
+#define PCI_ATLAS_DEVICE_INT (1<<13)
+/* Atlas - SCB Logic soft reset */
+#define PCI_ATLAS_SCB_RESET (1<<4)
+/* Atlas - PDP2 soft reset */
+#define PCI_ATLAS_PDP2_RESET (1<<3)
+/* Atlas - PDP1 soft reset */
+#define PCI_ATLAS_PDP1_RESET (1<<2)
+/* Atlas - soft reset the DDR logic */
+#define PCI_ATLAS_DDR_RESET (1<<1)
+/* Atlas - soft reset the device under test */
+#define PCI_ATLAS_DUT_RESET (1<<0)
+#define PCI_ATLAS_RESET_REG_OFFSET (0x0080)
+#define PCI_ATLAS_RESET_BITS (PCI_ATLAS_DDR_RESET | PCI_ATLAS_DUT_RESET \
+		| PCI_ATLAS_PDP1_RESET | PCI_ATLAS_PDP2_RESET | \
+		PCI_ATLAS_SCB_RESET)
+
+/* Apollo - Offset of INTERRUPT_STATUS */
+#define PCI_APOLLO_INTERRUPT_STATUS (0x00C8)
+/* Apollo - Offset of INTERRUPT_ENABLE */
+#define PCI_APOLLO_INTERRUPT_ENABLE (0x00D8)
+/* Apollo - Offset of INTERRUPT_CLEAR */
+#define PCI_APOLLO_INTERRUPT_CLEAR (0x00E0)
+/* Apollo - DCM Logic soft reset */
+#define PCI_APOLLO_DCM_RESET (1<<10)
+#define PCI_APOLLO_RESET_BITS (PCI_ATLAS_RESET_BITS | PCI_APOLLO_DCM_RESET)
+
+#define PCI_ATLAS_TEST_CTRL (0xb0)
+#define PCI_APOLLO_TEST_CTRL (0x98)
+
+#define PCI_ATLAS_VENDOR_ID (0x1010)
+#define PCI_ATLAS_DEVICE_ID (0x1CF1)
+#define PCI_APOLLO_DEVICE_ID (0x1CF2)
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+/*#define FPGA_IMAGE_REV_OFFSET (0x604)
+ #define FPGA_IMAGE_REV_MASK (0xFFFF)*/
+
+/* Parameters applicable when using bus master mode */
+static unsigned long contig_phys_start;
+module_param(contig_phys_start, ulong, 0444);
+MODULE_PARM_DESC(contig_phys_start,
+		"Physical address of start of contiguous region");
+static uint32_t contig_size;
+module_param(contig_size, uint, 0444);
+MODULE_PARM_DESC(contig_size,
+		"Size of contiguous region: takes precedence over any PCI based memory");
+static uint32_t fpga_heap_type = IMG_MEM_HEAP_TYPE_UNIFIED;
+module_param(fpga_heap_type, uint, 0444);
+MODULE_PARM_DESC(fpga_heap_type, "Fpga primary heap type");
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size,
+		"physical size in bytes, when 0 (default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+static bool mem_static_kptr = true;
+module_param(mem_static_kptr, bool, 0444);
+MODULE_PARM_DESC(mem_static_kptr,
+		"Creates static kernel mapping for fpga memory");
+
+/*
+ * Special handling (not implemented) is required for the VHA device
+ * to be able to access both carvout buffers (internal memory) and
+ * dmabuf buffers (system memory).The latter have to go through
+ * the system bus to be accessed whereas the former do not.
+ */
+static struct heap_config vha_plat_fpga_heap_configs[] = {
+	/* Primary heap used for internal allocations */
+#ifdef FPGA_BUS_MASTERING
+	{
+		.type = -1, /* selected with fpga_heap_type */
+		.options = {
+			.unified.gfp_type = GFP_DMA32 | __GFP_ZERO,
+			.unified.max_order = 4,
+		},
+		.to_dev_addr = NULL,
+		.to_host_addr = NULL,
+	},
+#elif CONFIG_GENERIC_ALLOCATOR
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+		/* .to_host_addr to be filled at run time */
+	},
+#else
+#error Neither FPGA_BUS_MASTERING or CONFIG_GENERIC_ALLOCATOR was defined
+#endif
+
+	/* Secondary heap used for importing an external memory */
+#ifdef FPGA_BUS_MASTERING
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+	},
+#endif
+#if CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+#ifndef FPGA_BUS_MASTERING
+		.options.dmabuf = {
+				.use_sg_dma = true,
+		},
+#endif
+	},
+#endif
+};
+
+static const int vha_plat_fpga_heaps =
+	sizeof(vha_plat_fpga_heap_configs)/sizeof(*vha_plat_fpga_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(PCI_ATLAS_VENDOR_ID, PCI_ATLAS_DEVICE_ID), },
+	{ PCI_DEVICE(PCI_ATLAS_VENDOR_ID, PCI_APOLLO_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+struct imgpci_prvdata {
+	int irq;
+	struct {
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} memmap[3];
+	struct pci_dev *pci_dev;
+};
+
+
+struct img_pci_driver {
+	struct pci_dev *pci_dev;
+	struct pci_driver pci_driver;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+static int vha_plat_runtime_idle(struct device *dev);
+static int vha_plat_runtime_suspend(struct device *dev);
+static int vha_plat_runtime_resume(struct device *dev);
+
+static struct dev_pm_ops vha_pm_plat_ops = {
+#ifdef FPGA_BUS_MASTERING
+	/* Runtime pm will not work with fpga internal memory
+	 * because pci bus driver suspend is also called,
+	 * which disables core/mem clocks */
+	SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
+			vha_plat_runtime_resume, vha_plat_runtime_idle)
+#endif
+	SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
+};
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA FPGA driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_pci",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.pm = &vha_pm_plat_ops,
+			.groups = drv_groups,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
+
+static int interrupt_status_reg = -1;
+static int interrupt_clear_reg = -1;
+static int interrupt_enable_reg = -1;
+static int test_ctrl_reg = -1;
+
+static unsigned int fpga_readreg32(struct imgpci_prvdata *data,
+		int bar, unsigned long offset
+)
+{
+	void __iomem *reg =
+		(void __iomem *)(data->memmap[bar].km_addr + offset);
+	return ioread32(reg);
+}
+
+static void fpga_writereg32(struct imgpci_prvdata *data,
+		int bar, unsigned long offset, int val)
+{
+	void __iomem *reg =
+		(void __iomem *)(data->memmap[bar].km_addr + offset);
+	iowrite32(val, reg);
+}
+
+static void reset_fpga(struct pci_dev *dev,
+		struct imgpci_prvdata *data, unsigned int mask)
+{
+	uint32_t bits = 0;
+
+	if (!dev)
+		return;
+
+	bits = PCI_APOLLO_RESET_BITS;
+
+	dev_dbg(&dev->dev, "reset fpga!\n");
+	bits &= mask;
+
+	if (bits) {
+		uint32_t val = fpga_readreg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET);
+
+		val &= ~bits;
+		fpga_writereg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET, val);
+		udelay(100); /* arbitrary delays, just in case! */
+		val |= bits;
+		fpga_writereg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET, val);
+		/* If not only DUT is reset, add a delay */
+		if (mask != PCI_ATLAS_DUT_RESET)
+			msleep(100);
+		else
+			udelay(100); /* arbitrary delays, just in case! */
+	}
+
+	dev_dbg(&dev->dev, "reset fpga done!\n");
+}
+
+static void fpga_clear_irq(struct imgpci_prvdata *data, unsigned int intstatus)
+{
+	unsigned int max_retries = 1000;
+
+	while (fpga_readreg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
+				interrupt_status_reg) && max_retries--)
+		fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
+				interrupt_clear_reg,
+				(PCI_ATLAS_MASTER_ENABLE | intstatus));
+}
+
+static irqreturn_t pci_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+static irqreturn_t pci_isrcb(int irq, void *dev_id)
+{
+	unsigned int intstatus;
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+	irqreturn_t ret = IRQ_NONE;
+
+	if (data == NULL || dev_id == NULL) {
+		/* spurious interrupt: not yet initialised. */
+		goto exit;
+	}
+
+	intstatus = fpga_readreg32(data,
+			PCI_ATLAS_SYS_CTRL_REGS_BAR,
+			interrupt_status_reg);
+
+	if (intstatus) {
+
+		ret = vha_handle_irq(&dev->dev);
+		/*
+		 * We need to clear interrupts for the embedded device
+		 * via the fpga interrupt controller...
+		 */
+		fpga_clear_irq(data, intstatus);
+	} else {
+		/* either a spurious interrupt, or, more likely
+		 * a shared interrupt line, which will be handled by another driver
+		*/
+		goto exit;
+	}
+
+exit:
+
+	return ret;
+}
+
+/*
+ * IO hooks : Address bus for hw registers is 32bit!
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return (uint64_t)readl((const volatile void __iomem *)addr) |
+				((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
+	writel((uint32_t)(val >> 32),        (volatile void __iomem *)addr + 4);
+}
+
+int vha_plat_deinit(void)
+{
+	struct pci_dev *dev = vha_pci_drv.pci_dev;
+	int ret;
+
+	if (dev) {
+		struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+		if (data) {
+			/* reset the hardware */
+			reset_fpga(data->pci_dev, data, ~0);
+		} else {
+			dev_dbg(&dev->dev,
+				"%s: prv data not found, HW reset omitted\n",
+				__func__);
+		}
+	} else {
+		pr_debug("%s: dev missing, HW reset omitted\n", __func__);
+	}
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+	iounmap((volatile void __iomem *)addr);
+	return 0;
+}
+#endif
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int bar, ret = 0;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	struct device *dev = &pci_dev->dev;
+	int heap;
+
+	dev_dbg(dev, "probing device, pci_dev\n");
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %#llx\n",
+			 __func__, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+	ret = dma_set_mask(dev, dma_get_mask(dev));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+	for (bar = 0; bar < 3; bar++) {
+
+		data->memmap[bar].size = pci_resource_len(pci_dev, bar);
+		data->memmap[bar].addr = pci_resource_start(pci_dev, bar);
+		if (bar == 2) {
+			if (pci_size)
+				data->memmap[bar].size = pci_size;
+			/* ioremap fpga memory only when static mode is used */
+			if (!mem_static_kptr)
+				continue;
+		}
+
+		if (data->memmap[bar].size > maxmapsize) {
+			/*
+			 * We avoid mapping too big regions: we do not need
+			 * such a big amount of memory and some times we do
+			 * not have enough contiguous 'vmallocable' memory.
+			 */
+			dev_warn(dev, "not mapping all mem for bar %u\n", bar);
+			data->memmap[bar].size = maxmapsize;
+		}
+		data->memmap[bar].km_addr = devm_ioremap(dev,
+				pci_resource_start(pci_dev, bar),
+				data->memmap[bar].size);
+
+		dev_dbg(dev, "[bar %u] addr: 0x%lx size: 0x%lx km: 0x%p\n",
+				bar, data->memmap[bar].addr,
+				data->memmap[bar].size,
+				data->memmap[bar].km_addr);
+	}
+
+	/* Get the IRQ...*/
+	data->irq = pci_dev->irq;
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	reset_fpga(pci_dev, data, ~0);
+
+	interrupt_status_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
+		PCI_APOLLO_INTERRUPT_STATUS;
+	interrupt_clear_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
+		PCI_APOLLO_INTERRUPT_CLEAR;
+	interrupt_enable_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
+		PCI_APOLLO_INTERRUPT_ENABLE;
+	test_ctrl_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
+		PCI_APOLLO_TEST_CTRL;
+
+	/*
+	 * We need to enable interrupts for the embedded device
+	 * via the fpga interrupt controller...
+	 */
+	{
+		unsigned int ena;
+
+		ena = fpga_readreg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
+						 interrupt_enable_reg);
+		ena |= PCI_ATLAS_MASTER_ENABLE | PCI_ATLAS_DEVICE_INT;
+
+		fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
+				interrupt_enable_reg, ena);
+
+		fpga_clear_irq(data, ena);
+	}
+
+#ifdef FPGA_BUS_MASTERING
+	dev_dbg(dev, "enabling FPGA bus mastering\n");
+	fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR, test_ctrl_reg, 0x0);
+#else
+	/* Route to internal RAM - this is reset value */
+	dev_dbg(dev, "disabling FPGA bus mastering\n");
+	fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR, test_ctrl_reg, 0x1);
+#endif
+
+	/* patch heap config with PCI memory addresses */
+	for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
+		struct heap_config *cfg = &vha_plat_fpga_heap_configs[heap];
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+			if (contig_size && contig_phys_start) {
+				/*
+				 * 2 types of carveout memory are supported:
+				 * memory carved out of the main DDR
+				 * memory region.
+				 * eg: linux boot option memmap=512M$0x5CAFFFFF
+				 * This is configured using module parameters:
+				 * contig_phys_start and size
+				 * DDR populated in the actual PCI card,
+				 * in BAR 4.
+				 * The module parameters take precedence
+				 * over PCI memory.
+				 */
+				cfg->options.carveout.phys = contig_phys_start;
+				cfg->options.carveout.size = contig_size;
+				cfg->to_dev_addr = NULL;
+				cfg->to_host_addr = NULL;
+				dev_info(dev, "using %dMB CARVEOUT at x%lx\n",
+					 contig_size/1024/1024,
+					 contig_phys_start);
+			} else {
+				cfg->options.carveout.phys =
+					data->memmap[2].addr;
+				if (mem_static_kptr)
+					cfg->options.carveout.kptr =
+							data->memmap[2].km_addr;
+				cfg->options.carveout.size =
+					data->memmap[2].size;
+				cfg->options.carveout.offs = pci_offset;
+				cfg->to_dev_addr = carveout_to_dev_addr;
+				cfg->to_host_addr = carveout_to_host_addr;
+				dev_info(dev, "using %zuMB CARVEOUT from PCI at 0x%llx\n",
+					 cfg->options.carveout.size/1024/1024,
+					 cfg->options.carveout.phys);
+			}
+			/* IO memory access callbacks */
+			if (!mem_static_kptr) {
+				/* Dynamic kernel memory mapping */
+				cfg->options.carveout.get_kptr = carveout_get_kptr;
+				cfg->options.carveout.put_kptr = carveout_put_kptr;
+			}
+
+			break;
+		}
+#endif
+
+		if (cfg->type == IMG_MEM_HEAP_TYPE_COHERENT) {
+			ret = dma_declare_coherent_memory(dev,
+					contig_phys_start, contig_phys_start,
+					contig_size
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+					, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE
+#else
+													 , DMA_MEMORY_EXCLUSIVE
+#endif
+#endif
+					);
+			if (ret == 0) {
+				dev_err(dev, "failed to initialize coherent memory!\n");
+				/*
+				 * We will fallback to the default pool anyway
+				 * goto out_release;
+				 */
+			}
+			break;
+		}
+	}
+#ifdef FPGA_BUS_MASTERING
+	/* Allow the core driver to control pm_runtime */
+	pm_runtime_allow(dev);
+#endif
+
+	ret = vha_add_dev(dev, vha_plat_fpga_heap_configs,
+			vha_plat_fpga_heaps, data,
+			data->memmap[1].km_addr, data->memmap[1].size);
+	if (ret) {
+		dev_err(dev, "failed to initialize driver core!\n");
+		goto out_heap_deinit;
+	}
+
+	/*
+	 * Reset FPGA DUT only after disabling clocks in
+	 * vha_add_dev()-> get properties.
+	 * This workaround is required to ensure that
+	 * clocks (on daughter board) are enabled for test slave scripts to
+	 * read FPGA build version register.
+	 * NOTE: Asserting other bits like DDR reset bit cause problems
+	 * with bus mastering feature, thus results in memory failures.
+	 */
+	reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
+	{
+
+		/*uint32_t fpga_rev = fpga_readreg32(data, 1,
+				FPGA_IMAGE_REV_OFFSET) & FPGA_IMAGE_REV_MASK;
+		dev_dbg(dev, "fpga image revision: 0x%x\n", fpga_rev);
+		if (!fpga_rev || fpga_rev == 0xdead1) {
+			dev_err(dev, "fpga revision incorrect (0x%x)!\n",
+					fpga_rev);
+			goto out_rm_dev;
+		}*/
+	}
+
+	/* Install the ISR callback...*/
+	ret = devm_request_threaded_irq(dev, data->irq, &pci_isrcb,
+			&pci_thread_irq, IRQF_SHARED, DEVICE_NAME,
+			(void *)pci_dev);
+	if (ret) {
+		dev_err(dev, "failed to request irq!\n");
+		goto out_rm_dev;
+	}
+	dev_dbg(dev, "registered irq %d\n", data->irq);
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+out_heap_deinit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(dev);
+#endif
+/*out_release:*/
+	pci_release_regions(pci_dev);
+out_disable:
+	pci_disable_device(pci_dev);
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+	dev_dbg(&dev->dev, "removing device\n");
+
+	if (data == NULL) {
+		dev_err(&dev->dev, "PCI priv data missing!\n");
+	} else {
+		/*
+		 * We  need to disable interrupts for the
+		 * embedded device via the fpga interrupt controller...
+		 */
+		fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
+				interrupt_enable_reg, 0x00000000);
+
+#ifdef FPGA_BUS_MASTERING
+		/* Route to internal RAM - this is reset value */
+		dev_dbg(&dev->dev, "disabling FPGA bus mastering\n");
+		fpga_writereg32(data,
+				PCI_ATLAS_SYS_CTRL_REGS_BAR,
+				test_ctrl_reg, 0x1);
+#endif
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(&dev->dev);
+#endif
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+#ifdef FPGA_BUS_MASTERING
+	pm_runtime_forbid(&dev->dev);
+#endif
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	struct pci_dev *pci_dev = vha_pci_drv.pci_dev;
+	struct imgpci_prvdata *data = vha_get_plat_data(dev);
+	int ret;
+
+	ret = vha_suspend_dev(dev);
+	if (ret) {
+		dev_dbg(dev, "suspend device\n");
+		reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
+	} else
+		dev_err(dev, "failed to suspend!\n");
+
+	return ret;
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = vha_pci_drv.pci_dev;
+	struct imgpci_prvdata *data = vha_get_plat_data(dev);
+	int ret;
+
+	dev_dbg(dev, "resume device\n");
+	reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
+	ret = vha_resume_dev(dev);
+	if (ret)
+		dev_err(dev, "failed to resume!\n");
+
+	return ret;
+}
+
+static int __maybe_unused vha_plat_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int __maybe_unused vha_plat_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int __maybe_unused vha_plat_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+#ifdef FPGA_BUS_MASTERING
+	vha_plat_fpga_heap_configs[0].type = fpga_heap_type;
+#endif
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}

+ 386 - 0
driver/vha/platform/vha_plat_dt.c

@@ -0,0 +1,386 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_dt.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include <img_mem_man.h>
+#include "vha_common.h"
+#include "uapi/version.h"
+#include "vha_plat.h"
+#include "vha_plat_dt.h"
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/vagus_system.h>
+#elif defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#endif
+
+#define DEVICE_NAME "vha"
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0xfffffff
+
+static bool poll_interrupts;   /* Disabled by default */
+module_param(poll_interrupts, bool, 0444);
+MODULE_PARM_DESC(poll_interrupts, "Poll for interrupts? 0: No, 1: Yes");
+
+static unsigned int irq_poll_interval_ms = 100; /* 100 ms */
+module_param(irq_poll_interval_ms, uint, 0444);
+MODULE_PARM_DESC(irq_poll_interval_ms, "Time in ms between each interrupt poll");
+
+/* Global timer used when irq poll mode is switched on.
+ * NOTE: only single core instance is supported in polling mode */
+static struct poll_timer {
+	struct platform_device *pdev;
+	struct timer_list tmr;
+	bool enabled;
+
+} irq_poll_timer;
+
+static irqreturn_t dt_plat_thread_irq(int irq, void *dev_id)
+{
+	struct platform_device *ofdev = (struct platform_device *)dev_id;
+
+	return vha_handle_thread_irq(&ofdev->dev);
+}
+
+static irqreturn_t dt_plat_isrcb(int irq, void *dev_id)
+{
+	struct platform_device *ofdev = (struct platform_device *)dev_id;
+
+	if (!ofdev)
+		return IRQ_NONE;
+
+	return vha_handle_irq(&ofdev->dev);
+}
+
+/* Interrupt polling function */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
+static void dt_plat_poll_interrupt(struct timer_list *t)
+{
+	struct poll_timer *poll_timer = from_timer(poll_timer, t, tmr);
+#else
+static void dt_plat_poll_interrupt(unsigned long ctx)
+{
+	struct poll_timer *poll_timer = (struct poll_timer *)ctx;
+#endif
+	struct platform_device *ofdev = poll_timer->pdev;
+	int ret;
+
+	if (!poll_timer->enabled)
+		return;
+
+	preempt_disable();
+	ret = vha_handle_irq(&ofdev->dev);
+	preempt_enable();
+	if (ret == IRQ_WAKE_THREAD)
+		vha_handle_thread_irq(&ofdev->dev);
+
+	/* retrigger */
+	mod_timer(&poll_timer->tmr,
+			jiffies + msecs_to_jiffies(irq_poll_interval_ms));
+}
+
+static int vha_plat_probe(struct platform_device *ofdev)
+{
+	int ret, module_irq;
+	struct resource res;
+	void __iomem *reg_addr;
+	uint32_t reg_size, core_size;
+
+	ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+	if (ret) {
+		dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
+		return ret;
+	}
+	pr_info("%s: registers %#llx-%#llx\n", __func__,
+		(unsigned long long)res.start, (unsigned long long)res.end);
+
+	module_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+	if (module_irq == 0) {
+		dev_err(&ofdev->dev, "could not map IRQ\n");
+		return -ENXIO;
+	}
+
+	/* Assuming DT holds a single registers space entry that covers all regions,
+	 * So we need to do the split accordingly */
+	reg_size = res.end - res.start + 1;
+
+#ifdef CFG_SYS_VAGUS
+	core_size = _REG_SIZE + _REG_NNSYS_SIZE;
+#else
+	core_size = _REG_SIZE;
+#endif
+	if ((res.start + _REG_START) > res.end) {
+		dev_err(&ofdev->dev, "wrong system conf for core region!\n");
+		return -ENXIO;
+	}
+
+	if ((res.start + _REG_START + core_size) > res.end) {
+		dev_warn(&ofdev->dev, "trimming system conf for core region!\n");
+		core_size = reg_size - _REG_START;
+	}
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	reg_addr = devm_ioremap_nocache(&ofdev->dev, res.start +
+			_REG_START, core_size);
+#else
+	reg_addr = devm_ioremap(&ofdev->dev, res.start +
+			_REG_START, core_size);
+#endif
+	if (!reg_addr) {
+		dev_err(&ofdev->dev, "failed to map core registers\n");
+		return -ENXIO;
+	}
+
+	ret = vha_plat_dt_hw_init(ofdev);
+	if (ret) {
+		dev_err(&ofdev->dev, "failed to init platform-specific hw!\n");
+		goto out_add_dev;
+	}
+
+	/* no 'per device' memory heaps used */
+	ret = vha_add_dev(&ofdev->dev, NULL, 0,
+				NULL /* plat priv data */, reg_addr, core_size);
+	if (ret) {
+		dev_err(&ofdev->dev, "failed to intialize driver core!\n");
+		goto out_add_dev;
+	}
+
+	if (!poll_interrupts) {
+		ret = devm_request_threaded_irq(&ofdev->dev, module_irq, &dt_plat_isrcb,
+				&dt_plat_thread_irq, IRQF_SHARED, DEVICE_NAME, ofdev);
+		if (ret) {
+			dev_err(&ofdev->dev, "failed to request irq\n");
+			goto out_irq;
+		}
+	} else {
+		irq_poll_timer.pdev = ofdev;
+		irq_poll_timer.enabled = true;
+		/* Setup and start poll timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
+		timer_setup(&irq_poll_timer.tmr, dt_plat_poll_interrupt, 0);
+#else
+		setup_timer(&irq_poll_timer.tmr, dt_plat_poll_interrupt,
+				(uintptr_t)&irq_poll_timer);
+#endif
+		mod_timer(&irq_poll_timer.tmr,
+				jiffies + msecs_to_jiffies(irq_poll_interval_ms));
+	}
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(&ofdev->dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(&ofdev->dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_irq;
+	}
+	return ret;
+
+out_irq:
+	vha_rm_dev(&ofdev->dev);
+out_add_dev:
+	devm_iounmap(&ofdev->dev, reg_addr);
+	return ret;
+}
+
+static int vha_plat_remove(struct platform_device *ofdev)
+{
+	vha_rm_dev(&ofdev->dev);
+
+	vha_plat_dt_hw_destroy(ofdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_suspend_dev(dev);
+	if (ret)
+		dev_err(dev, "failed to suspend the core!\n");
+	else {
+		ret = vha_plat_dt_hw_suspend(ofdev);
+		if (ret)
+			dev_err(dev, "failed to suspend platform-specific hw!\n");
+	}
+
+	return ret;
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_plat_dt_hw_resume(ofdev);
+	if (ret)
+		dev_err(dev, "failed to resume platform-specific hw!\n");
+	else {
+		ret = vha_resume_dev(dev);
+		if (ret)
+			dev_err(dev, "failed to resume the core!\n");
+	}
+
+	return ret;
+}
+
+static int vha_plat_runtime_idle(struct device *dev)
+{
+	/* Eg. turn off external clocks */
+	return 0;
+}
+
+static int vha_plat_runtime_suspend(struct device *dev)
+{
+	/* Nothing to do */
+	return 0;
+}
+
+static int vha_plat_runtime_resume(struct device *dev)
+{
+	/* Eg. turn on external clocks */
+	return 0;
+}
+
+#endif
+
+static struct dev_pm_ops vha_pm_plat_ops = {
+	SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
+			vha_plat_runtime_resume, vha_plat_runtime_idle)
+	SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
+};
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA DT driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct platform_driver vha_plat_drv = {
+	.probe  = vha_plat_probe,
+	.remove = vha_plat_remove,
+	.driver = {
+		.name = VHA_PLAT_DT_NAME,
+		.groups = drv_groups,
+		.owner = THIS_MODULE,
+		.of_match_table = vha_plat_dt_of_ids,
+		.pm = &vha_pm_plat_ops,
+	},
+};
+
+int vha_plat_init(void)
+{
+	int ret = 0;
+	struct heap_config *heap_configs;
+	int num_heaps;
+
+	vha_plat_dt_get_heaps(&heap_configs, &num_heaps);
+	ret = vha_init_plat_heaps(heap_configs, num_heaps);
+	if (ret) {
+		pr_err("failed to initialize global heaps\n");
+		return -ENOMEM;
+	}
+
+	ret = platform_driver_register(&vha_plat_drv);
+	if (ret) {
+		pr_err("failed to register VHA driver!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int vha_plat_deinit(void)
+{
+	int ret;
+
+	if (poll_interrupts) {
+		irq_poll_timer.enabled = false;
+		del_timer_sync(&irq_poll_timer.tmr);
+	}
+
+	/* Unregister the driver from the OS */
+	platform_driver_unregister(&vha_plat_drv);
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 78 - 0
driver/vha/platform/vha_plat_dt.h

@@ -0,0 +1,78 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_dt.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+
+#ifndef VHA_PLAT_DT_H
+#define VHA_PLAT_DT_H
+
+#include <linux/platform_device.h>
+
+/* OpenFirmware device tree id, for this driver */
+#if defined(HW_AX2)
+
+#define VHA_PLAT_DT_OF_ID "img,ax21xx-nna"
+#define VHA_PLAT_DT_NAME  "ax21xx-nna"
+
+#elif defined(HW_AX3)
+
+#define VHA_PLAT_DT_OF_ID "img,ax3xxx-nna"
+#define VHA_PLAT_DT_NAME  "ax3xxx-nna"
+
+#else
+
+#error "No HW layout defined"
+
+#endif
+
+extern const struct of_device_id vha_plat_dt_of_ids[];
+
+void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps);
+int vha_plat_dt_hw_init(struct platform_device *pdev);
+void vha_plat_dt_hw_destroy(struct platform_device *pdev);
+
+int vha_plat_dt_hw_suspend(struct platform_device *pdev);
+int vha_plat_dt_hw_resume(struct platform_device *pdev);
+
+#endif /* VHA_PLAT_DT_H */

+ 156 - 0
driver/vha/platform/vha_plat_dt_example.c

@@ -0,0 +1,156 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_dt_example.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include <img_mem_man.h>
+#include "vha_plat.h"
+#include "vha_plat_dt.h"
+
+const struct of_device_id vha_plat_dt_of_ids[] = {
+	{ .compatible = VHA_PLAT_DT_OF_ID },
+	{ }
+};
+
+static struct heap_config example_heap_configs[] = {
+	{
+		.type = IMG_MEM_HEAP_TYPE_UNIFIED,
+		.options.unified = {
+			.gfp_type = GFP_KERNEL | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+		.to_dev_addr = NULL,
+	},
+};
+/*
+ * IO hooks.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return readq((volatile void __iomem *)addr);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writeq(val, (volatile void __iomem *)addr);
+}
+
+int vha_plat_dt_hw_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+	uint64_t dma_mask;
+
+	dev_dbg(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+			 __func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	/* Try alternative dma_mask setting from device tree */
+	if (!of_property_read_u64(pdev->dev.of_node, "dma-mask",
+				(uint64_t *)&dma_mask)) {
+		dev_info(dev, "%s forcing custom mask from DT : %#llx\n",
+				__func__, dma_mask);
+	} else {
+		/* If alternative mask not defined in
+		 * DT -> "dma-mask" property, use the default one (32bit) */
+		dma_mask = dma_get_mask(dev);
+	}
+	ret = dma_set_mask(dev, dma_mask);
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		return ret;
+	}
+
+	/* Put any vendor related code:
+	 * get clock domain, voltage regulator, set clock rate, etc */
+	return 0;
+}
+
+/* return platform global heaps */
+void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps)
+{
+	*heap_configs = example_heap_configs;
+	*num_heaps = sizeof(example_heap_configs)/sizeof(struct heap_config);
+}
+
+void vha_plat_dt_hw_destroy(struct platform_device *pdev)
+{
+	/* Put any vendor related code:
+	 * put clock domain, voltage regulator, etc */
+}
+
+int vha_plat_dt_hw_suspend(struct platform_device *pdev)
+{
+	/* This is the place where vendor specific code shall be called:
+	 * eg. turn off voltage regulator/disable power domain */
+	return 0;
+}
+
+int vha_plat_dt_hw_resume(struct platform_device *pdev)
+{
+	/* This is the place where vendor specific code shall be called:
+	 * eg. turn on voltage regulator/enable power domain */
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(of, vha_plat_dt_of_ids);

+ 60 - 0
driver/vha/platform/vha_plat_dt_example.dts

@@ -0,0 +1,60 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_dt_example.dts
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+ / {
+    /* ... */
+    vha {
+         compatible = "img,ax21xx-nna";
+         reg = <0x0 0xe8800000 0x0 0x100000>;
+         interrupts = <0 257 4>;
+         clocks = <&clk_gate_vha>;
+         clock-names = "clk_vha";  
+         vha_clk_rate = <800000000>;
+         ldo_vha-supply = <&vha>;
+         dma-mask = 0xffffffffff; /* 40bit mask */
+         status = enabled;
+    };
+    /* ... */
+};

+ 81 - 0
driver/vha/platform/vha_plat_dt_fenrir.dts

@@ -0,0 +1,81 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_dt_fenrir.dts
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+/* Build this file using:
+ *
+ *  dtc -@ -I dts -O dtb vha_plat_dt_fenrir.dts -o vha_plat_dt_fenrir.dtbo
+ *
+ * The loading process on the target is done this way:
+ *
+ *  sudo mkdir /sys/kernel/config/device-tree/overlays/nna
+ *  cat vha_plat_dt_fenrir.dtbo | sudo tee /sys/kernel/config/device-tree/overlays/nna/dtbo > /dev/null
+ *
+ * This will apply the device overlay and set the needed "compatible" entry for the driver to load.
+ */
+/dts-v1/;
+/plugin/;
+
+&m_loki_core {
+   compatible = "img,loki";
+
+   interrupt-parent = <&gic>;
+   interrupts = <0x0 0x59 0x4>;
+   interrupt-controller;
+   #interrupt-cells = <1>;
+
+   /* Not needed  at the moment, but keep them just in case */
+   //memif-cache = <0x0>;
+   //memif-prot = <0x0>;
+};
+
+&m_dut_socif {
+  compatible = "img,ax3xxx-nna";
+
+  interrupt-parent = <&m_loki_core>;
+  interrupts = <0x0>;
+
+  dma-mask = /bits/ 64 <0xFFFFFFFFF>;
+};
+

+ 361 - 0
driver/vha/platform/vha_plat_dummy.c

@@ -0,0 +1,361 @@
+/*****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "uapi/version.h"
+#include "vha_regs.h"
+
+#if defined(CFG_SYS_MAGNA)
+#include "hwdefs/vha_cr_magna.h"
+#endif
+
+#define DEVICE_NAME "vha"
+
+static unsigned short min_alloc_order;
+module_param(min_alloc_order, ushort, 0444);
+MODULE_PARM_DESC(min_alloc_order,
+		"Minimum allocation order, depends on PAGE_SIZE, \
+		for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
+static unsigned short max_alloc_order = 10; /* 4MB for PAGE_SIZE=4kB */
+module_param(max_alloc_order, ushort, 0444);
+MODULE_PARM_DESC(max_alloc_order,
+		"Maximum allocation order, depends on PAGE_SIZE, \
+		for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
+
+static unsigned char num_clusters = 1;
+module_param(num_clusters, byte, 0444);
+MODULE_PARM_DESC(num_clusters,
+		"Number of dummy clusters. Each cluster will be instantiated "
+		"as separate /dev/vhaN node. Max number supported is 255.");
+
+static struct heap_config dummy_heap_configs[] = {
+	/* the first config is default */
+	{
+		.type = IMG_MEM_HEAP_TYPE_UNIFIED,
+		.options.unified = {
+			.gfp_type = GFP_KERNEL | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+	},
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+	},
+#endif
+};
+static size_t num_dummy_heaps =
+	sizeof(dummy_heap_configs) / sizeof(*dummy_heap_configs);
+
+static const size_t nna_regs_size =
+#ifdef _REG_NNSYS_SIZE
+	_REG_NNSYS_SIZE +
+#endif
+	_REG_SIZE;
+static bool use_dummy_regs = false;
+
+/* IO hooks - do nothing */
+uint64_t vha_plat_read64(void *addr)
+{
+	if (use_dummy_regs)
+		return *((uint64_t*)addr);
+
+	return 0ULL;
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	if (use_dummy_regs)
+		*((uint64_t*)addr) = val;
+}
+
+#if defined(CFG_SYS_MAGNA)
+static void vha_plat_magna_write_defaults(uint8_t* nna_regs) {
+	if (!use_dummy_regs)
+		return;
+	*((uint64_t*)(nna_regs + VHA_CR_CORE_ASSIGNMENT)) =
+			VHA_CR_CORE_ASSIGNMENT_CORE_7_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_6_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_5_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_4_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_3_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_2_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_UNALLOCATED |
+			VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED;
+
+	*((uint64_t*)(nna_regs + VHA_CR_SOCM_BUF_ASSIGNMENT)) =
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_7_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_6_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_5_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_4_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_3_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_2_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_UNALLOCATED |
+			VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_UNALLOCATED;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+
+static int vha_plat_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int vha_plat_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int vha_plat_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+#endif
+
+static int vha_plat_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	void* nna_regs=NULL;
+	int ret = 0;
+
+	/* dma_mask is required in order for dma_ops mapping to work */
+	dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+			 __func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	/* Give 128GB fake dma address range,
+	 * so that dma_map_page/map_sg does not throw any error,
+	 * when dealing with high mem address alocations */
+	ret = dma_set_mask(dev, DMA_BIT_MASK(37));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_add_dev;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n",
+			__func__, dma_get_mask(dev));
+
+	if (dev->platform_data)
+		nna_regs = *(uint8_t**)dev->platform_data;
+
+	ret = vha_add_dev(dev, NULL, 0,
+			NULL /* plat data */,
+			nna_regs /* reg base */,
+			nna_regs_size /* reg size*/);
+	if (ret) {
+		dev_err(dev, "vha_add_dev failed\n");
+		goto out_add_dev;
+	}
+out_add_dev:
+	return ret;
+}
+
+static int vha_plat_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	dev_info(&pdev->dev, "%s\n", __func__);
+
+	vha_rm_dev(&pdev->dev);
+
+	return ret;
+}
+
+static struct dev_pm_ops vha_pm_plat_ops = {
+	SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
+			vha_plat_runtime_resume, vha_plat_runtime_idle)
+	SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
+};
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA dummy driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct platform_driver vha_driver = {
+	.driver = {
+			 .owner = THIS_MODULE,
+			 .name = "vha",
+			 .groups = drv_groups,
+			 .pm = &vha_pm_plat_ops,
+			 },
+	.probe = vha_plat_probe,
+	.remove = vha_plat_remove,
+};
+
+static struct platform_device **pd;
+
+int __exit vha_plat_deinit(void)
+{
+	int ret;
+	int cluster;
+	uint8_t* nna_regs;
+
+	platform_driver_unregister(&vha_driver);
+	for (cluster=0; cluster<num_clusters; ++cluster) {
+		BUG_ON(pd[cluster]==NULL);
+		nna_regs = *(uint8_t**)pd[cluster]->dev.platform_data;
+		vfree(nna_regs);
+		platform_device_unregister(pd[cluster]);
+	}
+	use_dummy_regs = false;
+	kfree(pd);
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return 0;
+}
+
+int __init vha_plat_init(void)
+{
+	int ret;
+	int cluster;
+
+	if (min_alloc_order > max_alloc_order) {
+		pr_err("Can't set min_alloc_order > max_alloc_order !\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(dummy_heap_configs[0].type != IMG_MEM_HEAP_TYPE_UNIFIED);
+	dummy_heap_configs[0].options.unified.min_order = min_alloc_order;
+	dummy_heap_configs[0].options.unified.max_order = max_alloc_order;
+
+	ret = vha_init_plat_heaps(dummy_heap_configs, num_dummy_heaps);
+	if (ret) {
+		pr_err("failed to initialize global heaps\n");
+		return -ENOMEM;
+	}
+	if (num_clusters == 0) {
+		pr_notice("Overriding num_clusters parameter to 1\n");
+		num_clusters=1;
+	}
+	pr_notice("%s instantiating %d dummy clusters\n",
+				__func__, num_clusters);
+
+	pd = kmalloc(num_clusters*sizeof(*pd), GFP_KERNEL);
+	if (pd == NULL) {
+		pr_err("failed to allocate memory!\n");
+		return -ENOMEM;
+	}
+	memset(pd, 0, num_clusters*sizeof(*pd));
+	for (cluster=0; cluster<num_clusters; ++cluster) {
+		void* nna_regs=NULL;
+
+		pr_notice("%s Instantiating dummy vha%d cluster\n", __func__, cluster);
+#ifdef _REG_NNA_SIZE
+		nna_regs = vmalloc(nna_regs_size);
+		if (nna_regs == NULL)
+			pr_warn("Failed allocating dummy NNA reg space. Will not use it...\n");
+		else {
+			pr_notice("Successfully allocated dummy NNA reg space.\n");
+			memset(nna_regs, 0, nna_regs_size);
+			use_dummy_regs = true;
+#if defined(CFG_SYS_MAGNA)
+			vha_plat_magna_write_defaults(nna_regs);
+#endif
+		}
+#endif
+
+		/* after this call a copy of pointer to nna_regs is stored in struct device.platform_data
+		 * internal data is managed by platform device */
+		pd[cluster] = platform_device_register_data(NULL, "vha", cluster, &nna_regs, sizeof(&nna_regs));
+		ret = IS_ERR(pd[cluster]);
+		if (ret) {
+			pr_err("failed to register platform device!\n");
+			goto _err;
+		}
+	}
+	ret = platform_driver_register(&vha_driver);
+	if (ret) {
+		pr_err("failed to register platform driver!\n");
+		goto _err;
+	}
+	return ret;
+_err:
+	for (cluster=0; cluster<num_clusters; ++cluster) {
+		uint8_t* nna_regs = *(uint8_t**)pd[cluster]->dev.platform_data;
+		vfree(nna_regs);
+		platform_device_unregister(pd[cluster]);
+	}
+	kfree(pd);
+	return ret;
+}

+ 641 - 0
driver/vha/platform/vha_plat_emu.c

@@ -0,0 +1,641 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_emu.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#define DEVICE_NAME "vha"
+
+/*
+ * Spec:
+ * Emulator PCIe In-Circuit Interface Card.Technical
+ *   Reference Manual.1.0.24.External PSTDRW.External
+ */
+
+/* Emulator address range 0x4000-0x4FFF */
+#define PCI_EMU_SYS_CTRL_REGS_BAR (0)
+/* Offset of INTERRUPT_ENABLE */
+#define PCI_EMU_INTERRUPT_ENABLE_OFS (0x4048)
+/* master interrupt enable - default high */
+#define PCI_EMU_IRQ_ENABLE (1<<0)
+#define PCI_EMU_IRQ_HIGH (1<<1)
+
+/* Emulator reset offset */
+#define PCI_EMU_RESET_OFS (0x4000)
+/* Emulator reset bits */
+#define PCI_EMU_RESET_LOGIC (1<<0)
+#define PCI_EMU_RESET_DUT   (1<<1)
+
+#define PCI_EMU_VENDOR_ID (0x1010)
+#define PCI_EMU_DEVICE_ID (0x1CE3)
+
+#define NUM_EMU_BARS      3
+#define EMU_REG_BAR       PCI_EMU_SYS_CTRL_REGS_BAR
+#define NNA_REG_BAR       1
+#define NNA_MEM_BAR       2
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+static unsigned short pool_alloc_order;
+module_param(pool_alloc_order, ushort, 0444);
+MODULE_PARM_DESC(pool_alloc_order,
+		"Carveout pool allocation order, depends on PAGE_SIZE, \
+		for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
+
+static unsigned long poll_interrupts = 1;   /* Enabled by default */
+module_param(poll_interrupts, ulong, 0444);
+MODULE_PARM_DESC(poll_interrupts, "Poll for interrupts? 0: No, 1: Yes");
+
+static unsigned long irq_poll_delay_us = 10000; /* 10 ms */
+module_param(irq_poll_delay_us, ulong, 0444);
+MODULE_PARM_DESC(irq_poll_delay_us, "Delay in us between each interrupt poll");
+
+static bool mem_static_kptr = true;
+module_param(mem_static_kptr, bool, 0444);
+MODULE_PARM_DESC(mem_static_kptr,
+		"Creates static kernel mapping for fpga memory");
+
+static struct heap_config vha_plat_emu_heap_configs[] = {
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+		/* .to_host_addr to be filled at run time */
+	},
+#else
+#error CONFIG_GENERIC_ALLOCATOR was not defined
+#endif
+#if CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+		.options.dmabuf = {
+				.use_sg_dma = true,
+		},
+	},
+#else
+#warning "Memory importing not supported!"
+#endif
+};
+
+static const int vha_plat_emu_heaps =
+	sizeof(vha_plat_emu_heap_configs)/sizeof(*vha_plat_emu_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(PCI_EMU_VENDOR_ID, PCI_EMU_DEVICE_ID) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+struct imgpci_prvdata {
+	int irq;
+	struct {
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} memmap[NUM_EMU_BARS];
+	struct pci_dev *pci_dev;
+	int irq_poll;
+	struct delayed_work irq_work;
+};
+
+
+struct img_pci_driver {
+	struct pci_dev *pci_dev;
+	struct pci_driver pci_driver;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
+		vha_plat_suspend, vha_plat_resume);
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA EMU driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_pci",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.groups = drv_groups,
+			.pm = &vha_pm_plat_ops,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
+
+#if 0
+static unsigned int emu_readreg32(struct imgpci_prvdata *data,
+		int bar, unsigned long offset
+)
+{
+	void __iomem *reg =
+		(void __iomem *)(data->memmap[bar].km_addr + offset);
+	return ioread32(reg);
+}
+#endif
+
+static void emu_writereg32(struct imgpci_prvdata *data,
+		int bar, unsigned long offset, int val)
+{
+	void __iomem *reg =
+		(void __iomem *)(data->memmap[bar].km_addr + offset);
+	iowrite32(val, reg);
+}
+
+static void reset_emu(struct pci_dev *dev,
+		struct imgpci_prvdata *data)
+{
+	if (!dev)
+		return;
+
+	emu_writereg32(data, PCI_EMU_SYS_CTRL_REGS_BAR,
+			PCI_EMU_RESET_OFS,
+			~(PCI_EMU_RESET_LOGIC|PCI_EMU_RESET_DUT));
+	mdelay(100);
+	emu_writereg32(data, PCI_EMU_SYS_CTRL_REGS_BAR,
+			PCI_EMU_RESET_OFS,
+			PCI_EMU_RESET_LOGIC|PCI_EMU_RESET_DUT);
+}
+
+static irqreturn_t pci_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+static irqreturn_t pci_handle_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+	irqreturn_t ret = IRQ_NONE;
+
+	if (data == NULL || dev_id == NULL) {
+		/* spurious interrupt: not yet initialised. */
+		goto exit;
+	}
+
+	ret = vha_handle_irq(&dev->dev);
+exit:
+	return ret;
+}
+
+/* Interrupt polling function */
+static void pci_poll_interrupt(struct work_struct *work)
+{
+	struct imgpci_prvdata *data = container_of(work,
+			struct imgpci_prvdata, irq_work.work);
+	struct pci_dev *dev = data->pci_dev;
+	int ret;
+
+	if (!data->irq_poll)
+		return;
+
+	preempt_disable();
+	ret = vha_handle_irq(&dev->dev);
+	preempt_enable();
+	if (ret == IRQ_WAKE_THREAD)
+		vha_handle_thread_irq(&dev->dev);
+
+	/* retrigger */
+	schedule_delayed_work(&data->irq_work,
+			usecs_to_jiffies(irq_poll_delay_us));
+}
+
+/*
+ * IO hooks.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access
+ */
+static DEFINE_SPINLOCK(io_irq_lock);
+static unsigned long io_irq_flags;
+
+uint64_t vha_plat_read64(void *addr)
+{
+	u64 val;
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	val =(uint64_t)readl((const volatile void __iomem *)addr) |
+				((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+	return val;
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
+	writel((uint32_t)(val >> 32),        (volatile void __iomem *)addr + 4);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+}
+
+int vha_plat_deinit(void)
+{
+	struct pci_dev *dev = vha_pci_drv.pci_dev;
+	int ret;
+
+	if (dev) {
+		struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+		if (data) {
+			if (poll_interrupts) {
+				data->irq_poll = 0;
+				cancel_delayed_work_sync(&data->irq_work);
+			}
+			/* reset the emulator */
+			reset_emu(data->pci_dev, data);
+			emu_writereg32(data,
+					PCI_EMU_SYS_CTRL_REGS_BAR,
+					PCI_EMU_INTERRUPT_ENABLE_OFS,
+					~PCI_EMU_IRQ_ENABLE);
+		} else {
+			dev_dbg(&dev->dev,
+				"%s: prv data not found, HW reset omitted\n",
+				__func__);
+		}
+	} else {
+		pr_debug("%s: dev missing, HW reset omitted\n", __func__);
+	}
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+	iounmap((volatile void __iomem *)addr);
+	return 0;
+}
+#endif
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int bar, ret = 0;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	struct device *dev = &pci_dev->dev;
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	int heap;
+#endif
+
+	dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+			 __func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+	ret = dma_set_mask(dev, dma_get_mask(dev));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+	for (bar = 0; bar < NUM_EMU_BARS; bar++) {
+		data->memmap[bar].addr = pci_resource_start(pci_dev, bar);
+		data->memmap[bar].size = pci_resource_len(pci_dev, bar);
+		if (data->memmap[bar].size > maxmapsize) {
+			/*
+			 * We avoid mapping too big regions: we do not need
+			 * such a big amount of memory and some times we do
+			 * not have enough contiguous 'vmallocable' memory.
+			 */
+			dev_warn(dev, "not mapping all mem for bar %u\n", bar);
+			data->memmap[bar].size = maxmapsize;
+		}
+
+		if (bar == NNA_MEM_BAR) {
+			/* Change memory size according to module parameter */
+			if (pci_size)
+				data->memmap[bar].size = pci_size;
+
+			/* ioremap fpga memory only when static mode is used */
+			if (!mem_static_kptr)
+				continue;
+		}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		data->memmap[bar].km_addr = devm_ioremap_nocache(dev,
+				pci_resource_start(pci_dev, bar),
+				data->memmap[bar].size);
+#else
+		data->memmap[bar].km_addr = devm_ioremap(dev,
+				pci_resource_start(pci_dev, bar),
+				data->memmap[bar].size);
+#endif
+
+		dev_dbg(dev, "[bar %u] addr: 0x%lx size: 0x%lx km: 0x%p\n",
+				bar, data->memmap[bar].addr,
+				data->memmap[bar].size,
+				data->memmap[bar].km_addr);
+	}
+
+	/* Get the IRQ...*/
+	data->irq = pci_dev->irq;
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	reset_emu(pci_dev, data);
+
+	if (!poll_interrupts) {
+		/* Enable interrupts */
+		emu_writereg32(data, PCI_EMU_SYS_CTRL_REGS_BAR,
+				PCI_EMU_INTERRUPT_ENABLE_OFS,
+				PCI_EMU_IRQ_ENABLE | PCI_EMU_IRQ_HIGH);
+	}
+
+
+	/* patch heap config with PCI memory addresses */
+	for (heap = 0; heap < vha_plat_emu_heaps; heap++) {
+		struct heap_config *cfg = &vha_plat_emu_heap_configs[heap];
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+			cfg->options.carveout.phys = data->memmap[NNA_MEM_BAR].addr;
+			if (mem_static_kptr)
+				cfg->options.carveout.kptr =
+						data->memmap[NNA_MEM_BAR].km_addr;
+			cfg->options.carveout.size = data->memmap[NNA_MEM_BAR].size;
+			cfg->options.carveout.offs = pci_offset;
+			cfg->to_dev_addr = carveout_to_dev_addr;
+			cfg->to_host_addr = carveout_to_host_addr;
+
+			/* IO memory access callbacks */
+			if (!mem_static_kptr) {
+				/* Dynamic kernel memory mapping */
+				cfg->options.carveout.get_kptr = carveout_get_kptr;
+				cfg->options.carveout.put_kptr = carveout_put_kptr;
+			}
+			/* Allocation order */
+			cfg->options.carveout.pool_order = pool_alloc_order;
+			break;
+		}
+#endif
+	}
+
+	ret = vha_add_dev(dev, vha_plat_emu_heap_configs,
+			vha_plat_emu_heaps, data,
+			data->memmap[NNA_REG_BAR].km_addr, data->memmap[NNA_REG_BAR].size);
+	if (ret) {
+		dev_err(dev, "failed to intialize driver core!\n");
+		goto out_release;
+	}
+
+	if (!poll_interrupts) {
+		/* Install the ISR callback...*/
+		ret = devm_request_threaded_irq(dev, data->irq, &pci_handle_irq,
+				&pci_thread_irq, IRQF_SHARED, DEVICE_NAME,
+				(void *)pci_dev);
+		if (ret) {
+			dev_err(dev, "failed to request irq!\n");
+			goto out_rm_dev;
+		}
+		dev_dbg(dev, "registered irq %d\n", data->irq);
+	} else {
+		INIT_DELAYED_WORK(&data->irq_work, pci_poll_interrupt);
+		data->irq_poll = 1;
+		/* Start the interrupt poll */
+		schedule_delayed_work(&data->irq_work,
+				usecs_to_jiffies(irq_poll_delay_us));
+	}
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+out_release:
+	pci_release_regions(pci_dev);
+out_disable:
+	pci_disable_device(pci_dev);
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	dev_dbg(&dev->dev, "removing device\n");
+
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 1004 - 0
driver/vha/platform/vha_plat_frost.c

@@ -0,0 +1,1004 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/nn_sys_cr_vagus.h>
+#endif
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/vagus_system.h>
+#elif defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#elif defined(CFG_SYS_MAGNA)
+#include <hwdefs/magna_system.h>
+#endif
+
+
+#define DEVICE_NAME "vha"
+
+#define IS_FROST_DEVICE(devid) ((devid) == PCI_FROST_DEVICE_ID)
+
+/*
+ * from ICE2 card Frost.Technical Reference Manual.docx
+ */
+
+#define PCI_FROST_VENDOR_ID (0x1AEE)
+#define PCI_FROST_DEVICE_ID (0x1030)
+
+/* Frost - System control register bar */
+#define PCI_FROST_SYS_CTRL_REGS_BAR (0)
+
+#define PCI_FROST_SYS_CTRL_BASE_OFFSET           (0x0000)
+/* props */
+#define PCI_FROST_CORE_ID                        (0x0000)
+#define PCI_FROST_CORE_REVISION                  (0x0004)
+#define PCI_FROST_CORE_CHANGE_SET                (0x0008)
+#define PCI_FROST_CORE_USER_ID                   (0x000C)
+#define PCI_FROST_CORE_USER_BUILD                (0x0010)
+#define PCI_FROST_CORE_SW_IF_VERSION             (0x0014)
+#define PCI_FROST_CORE_UC_IF_VERSION             (0x0018)
+/* Interrupt mode */
+#define PCI_FROST_CORE_EMU_INTERRUPT_CTRL        (0x0048)
+/* Resets */
+#define PCI_FROST_CORE_INTERNAL_RESETN           (0x0080)
+#define PCI_FROST_CORE_EXTERNAL_RESETN           (0x0084)
+#define PCI_FROST_CORE_INTERNAL_AUTO_RESETN      (0x008C)
+/* Interrupts */
+#define PCI_FROST_CORE_INTERRUPT_STATUS          (0x0100)
+#define PCI_FROST_CORE_INTERRUPT_ENABLE          (0x0104)
+#define PCI_FROST_CORE_INTERRUPT_CLR             (0x010C)
+#define PCI_FROST_CORE_INTERRUPT_TEST            (0x0110)
+#define PCI_FROST_CORE_INTERRUPT_TIMEOUT_CLR     (0x0114)
+#define PCI_FROST_CORE_INTERRUPT_TIMEOUT         (0x0118)
+/* MISC */
+#define PCI_FROST_CORE_SYSTEM_ID                 (0x0120)
+/* LEDs! */
+#define PCI_FROST_CORE_DASH_LEDS                 (0x01A8)
+/* Core stuff */
+#define PCI_FROST_CORE_PCIE_TO_EMU_ADDR_OFFSET   (0x0204)
+#define PCI_FROST_CORE_EMU_TO_PCIE_ADDR_OFFSET   (0x0208)
+#define PCI_FROST_CORE_CORE_CONTROL              (0x0210)
+#define PCI_FROST_CORE_EMU_CLK_CNT               (0x0214)
+
+/* Interrupt bits */
+#define PCI_FROST_CORE_EMU_INTERRUPT_CTRL_ENABLE (1 << 0)
+#define PCI_FROST_CORE_EMU_INTERRUPT_CTRL_SENSE  (1 << 1)
+
+/* core bits definitions */
+#define INTERNAL_RESET_INTERNAL_RESETN_CMDA      (1 << 0)
+#define INTERNAL_RESET_INTERNAL_RESETN_GIST      (1 << 1)
+#define EXTERNAL_RESET_EXTERNAL_RESETN_EMU       (1 << 0)
+#define INTERNAL_AUTO_RESETN_AUX                 (1 << 0)
+
+/* interrupt bits definitions */
+#define INT_INTERRUPT_MASTER_ENABLE              (0)  /*(1 << 31) - disabled */
+#define INT_INTERRUPT_IRQ_TEST                   (1 << 30)
+#define INT_INTERRUPT_CDMA                       (1 << 1)
+#define INT_INTERRUPT_EMU                        (1 << 0)
+
+#define INT_TEST_INTERRUPT_TEST                  (1 << 0)
+#define INTERRUPT_MST_TIMEOUT_CLR                (1 << 1)
+#define INTERRUPT_MST_TIMEOUT                    (1 << 0)
+
+#define PCI_FROST_CORE_REG_SIZE                  (0x1000)
+
+/* Frost - Device Under Test (DUT) register bar */
+#define PCI_FROST_DUT_REG_BAR (2)
+#define PCI_FROST_DUT_MEM_BAR (4)
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+static unsigned short pool_alloc_order;
+module_param(pool_alloc_order, ushort, 0444);
+MODULE_PARM_DESC(pool_alloc_order,
+		"Carveout pool allocation order, depends on PAGE_SIZE, \
+		for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
+
+static unsigned long poll_interrupts = 1;
+module_param(poll_interrupts, ulong, 0444);
+MODULE_PARM_DESC(poll_interrupts, "Poll for interrupts? 0: No, 1: Yes");
+
+static unsigned long irq_poll_delay_us = 10000; /* 10 ms */
+module_param(irq_poll_delay_us, ulong, 0444);
+MODULE_PARM_DESC(irq_poll_delay_us, "Delay in us between each interrupt poll");
+
+static bool irq_self_test;
+module_param(irq_self_test, bool, 0444);
+MODULE_PARM_DESC(irq_self_test, "Enable self irq test board feature");
+
+static struct heap_config vha_dev_frost_heap_configs[] = {
+	/* Primary heap used for internal allocations */
+#if CONFIG_GENERIC_ALLOCATOR
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+	},
+#endif
+#if CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+		.options.dmabuf = {
+				.use_sg_dma = true,
+		},
+	},
+#else
+#warning "Memory importing not supported!"
+#endif
+};
+
+static const int vha_dev_frost_heaps = sizeof(vha_dev_frost_heap_configs)/
+	sizeof(*vha_dev_frost_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(PCI_FROST_VENDOR_ID, PCI_FROST_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+enum { CORE_REG_BANK = 0,
+	NNA_REG_BANK, MEM_REG_BANK,
+	REG_BANK_COUNT /* Must be the last */};
+
+struct imgpci_prvdata {
+	int irq;
+
+	struct {
+		int bar;
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} reg_bank[REG_BANK_COUNT];
+
+	struct pci_dev *pci_dev;
+	int irq_poll;
+	struct delayed_work irq_work;
+};
+
+struct img_pci_driver {
+	struct pci_dev *pci_dev;
+	struct pci_driver pci_driver;
+	struct delayed_work irq_work;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
+		vha_plat_suspend, vha_plat_resume);
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA Frost driver version : " VERSION_STRING "\n");
+}
+
+static inline uint64_t __readreg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset) __maybe_unused;
+static inline void __writereg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, uint64_t val) __maybe_unused;
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_pci",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.groups = drv_groups,
+			.pm = &vha_pm_plat_ops,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 2048;
+
+/**
+ * __readreg32 - Generic PCI bar read functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ */
+static inline unsigned int __readreg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	return ioread32(reg);
+}
+
+/**
+ * __writereg32 - Generic PCI bar write functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void __writereg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, int val)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	iowrite32(val, reg);
+}
+
+/*
+ * __readreg64 - Generic PCI bar read functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ */
+static inline uint64_t __readreg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset)
+{
+		void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr + offset);
+		return (uint64_t)ioread32(reg) | ((uint64_t)ioread32(reg + 4) << 32);
+}
+
+/*
+ * __writereg64 - Generic PCI bar write functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void __writereg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, uint64_t val)
+{
+		void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr + offset);
+		iowrite32(val & 0xFFFFFFFF, reg);
+		iowrite32(val >> 32, reg + 4);
+}
+
+/**
+ * frost_core_writereg32 - Write to Frost control registers
+ * @data: pointer to the data
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void frost_core_writereg32(struct imgpci_prvdata *data,
+		unsigned long offset, int val)
+{
+	__writereg32(data, CORE_REG_BANK, offset, val);
+}
+
+/**
+ * frost_core_readreg32 - Read Frost control registers
+ * @data: pointer to the data
+ * @offset: offset within bank
+ */
+static inline unsigned int frost_core_readreg32(struct imgpci_prvdata *data,
+		unsigned long offset)
+{
+	return __readreg32(data, CORE_REG_BANK, offset);
+}
+
+
+static inline void frost_reset_int(struct imgpci_prvdata *data) {
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE, 0);
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_CLR, 0xFFFFFFFF);
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TIMEOUT, 0xFFFFFFFF);
+	/* SENSE shall be low, because polarity is reversed */
+	frost_core_writereg32(data, PCI_FROST_CORE_EMU_INTERRUPT_CTRL,
+			PCI_FROST_CORE_EMU_INTERRUPT_CTRL_ENABLE);
+}
+
+/**
+ * frost_enable_int - Enable an interrupt
+ * @data: pointer to the data
+ * @intmask: interrupt mask
+ */
+static inline void frost_enable_int(struct imgpci_prvdata *data,
+		uint32_t intmask)
+{
+	uint32_t irq_enabled = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE);
+
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE, irq_enabled | intmask | INT_INTERRUPT_MASTER_ENABLE);
+}
+
+/**
+ * frost_disable_int - Disable an interrupt
+ * @data: pointer to the data
+ * @intmask: interrupt mask
+ */
+static inline void frost_disable_int(struct imgpci_prvdata *data,
+		uint32_t intmask)
+{
+	uint32_t irq_enabled = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE);
+
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE,
+		irq_enabled & ~intmask);
+}
+
+/**
+ * frost_test_int - Test an interrupt
+ * @data: pointer to the data
+ */
+static inline void frost_test_int(struct imgpci_prvdata *data) {
+	frost_enable_int(data, INT_INTERRUPT_IRQ_TEST);
+	pr_warn("%s: trigger interrupt!\n", __func__);
+	frost_core_writereg32(data, PCI_FROST_CORE_EMU_INTERRUPT_CTRL,
+			PCI_FROST_CORE_EMU_INTERRUPT_CTRL_SENSE); 	/* SENSE shall be high */
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TEST, INT_TEST_INTERRUPT_TEST);
+}
+
+/**
+ * reset_dut - Reset the Device Under Test
+ * @data: pointer to the data
+ */
+static void reset_dut(struct imgpci_prvdata *data)
+{
+
+	uint32_t internal_rst = frost_core_readreg32(data, PCI_FROST_CORE_INTERNAL_RESETN);
+	uint32_t external_rst = frost_core_readreg32(data, PCI_FROST_CORE_EXTERNAL_RESETN);
+
+	dev_dbg(&data->pci_dev->dev, "going to reset DUT frost!\n");
+
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERNAL_RESETN,
+		internal_rst & ~(INTERNAL_RESET_INTERNAL_RESETN_GIST|
+			INTERNAL_RESET_INTERNAL_RESETN_CMDA));
+		frost_core_writereg32(data, PCI_FROST_CORE_EXTERNAL_RESETN,
+		external_rst & ~(EXTERNAL_RESET_EXTERNAL_RESETN_EMU));
+
+	udelay(100); /* arbitrary delays, just in case! */
+
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERNAL_RESETN, internal_rst);
+	frost_core_writereg32(data, PCI_FROST_CORE_EXTERNAL_RESETN, external_rst);
+
+	msleep(100);
+
+	dev_dbg(&data->pci_dev->dev, "DUT frost reset done!\n");
+}
+
+/**
+ * pci_thread_irq - High latency interrupt handler
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t frost_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+/**
+ * frost_isr_clear - Clear an interrupt
+ * @data: pointer to the data
+ * @intstatus: interrupt status
+ *
+ * note: the reason of that function is unclear, it is taken from Apollo/Atlas code that have
+ * the same interrupt handler as Frost, is it because of a bug?
+ */
+static void frost_isr_clear(struct imgpci_prvdata *data, unsigned int intstatus)
+{
+	unsigned int max_retries = 1000;
+
+	while ((frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--) {
+		frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_CLR,
+			(INT_INTERRUPT_MASTER_ENABLE | intstatus));
+	}
+
+	if (!max_retries) {
+		pr_warn("Can't clear irq ! disabling interrupts!\n");
+		frost_reset_int(data);
+	}
+}
+
+
+/**
+ * pci_isr_cb - Low latency interrupt handler
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t frost_isr_cb(int irq, void *dev_id)
+{
+	uint32_t intstatus;
+
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	struct imgpci_prvdata *data;
+
+	irqreturn_t ret = IRQ_NONE;
+
+	if (dev_id == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Spurious interrupt data/dev_id not initialised!\n");
+		goto exit;
+	}
+
+	data = vha_get_plat_data(&dev->dev);
+
+	if (data == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Invalid driver private data!\n");
+		goto exit;
+	}
+
+	/* Read interrupt status register */
+	intstatus = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_STATUS);
+
+	/* Clear timeout bit just for sanity */
+	frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TIMEOUT_CLR,
+			INTERRUPT_MST_TIMEOUT_CLR);
+
+	if (intstatus & INT_INTERRUPT_IRQ_TEST) {
+		/* Handle test int */
+		pr_warn("Test interrupt OK! Switch back to normal mode!\n");
+		frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TEST, 0);
+		/* Disable irqs */
+		frost_reset_int(data);
+		ret = IRQ_HANDLED;
+	}
+
+	if (intstatus & INT_INTERRUPT_EMU) {
+		/* call real irq handler */
+		ret = vha_handle_irq(&dev->dev);
+	}
+
+	if (unlikely(intstatus == 0)) {
+		/* most likely this is a shared interrupt line */
+		dev_dbg(&dev->dev,
+				"%s: unexpected or spurious interrupt [%x] (shared IRQ?)!\n",
+			__func__, intstatus);
+		goto exit;
+	}
+
+	/* Ack the ints */
+	frost_isr_clear(data, intstatus);
+exit:
+	return ret;
+}
+
+/* Interrupt polling function */
+static void frost_poll_interrupt(struct work_struct *work)
+{
+	struct imgpci_prvdata *data = container_of(work,
+			struct imgpci_prvdata, irq_work.work);
+	struct pci_dev *dev = data->pci_dev;
+	int ret;
+
+	if (!data->irq_poll)
+		return;
+
+	preempt_disable();
+	ret = vha_handle_irq(&dev->dev);
+	preempt_enable();
+	if (ret == IRQ_WAKE_THREAD)
+		vha_handle_thread_irq(&dev->dev);
+
+#if 0
+	{
+		uint32_t clk_cnt = frost_core_readreg32(data, PCI_FROST_CORE_EMU_CLK_CNT);
+		pr_debug("%s: EMU clk_cnt%u\n", __func__, clk_cnt);
+	}
+#endif
+	/* retrigger */
+	schedule_delayed_work(&data->irq_work,
+			usecs_to_jiffies(irq_poll_delay_us));
+}
+
+/**
+ * frost_allocate_registers - Allocate memory for a register (or memory) bank
+ * @pci_dev: pointer to pci device
+ * @data: pointer to the data
+ * @bank: bank to set
+ * @bar: BAR where the register are
+ * @base: base address in the BAR
+ * @size: size of the register set
+ */
+static inline int frost_allocate_registers(struct pci_dev *pci_dev,
+		struct imgpci_prvdata *data, int bank,
+		int bar, unsigned long base, unsigned long size)
+{
+	unsigned long bar_size = pci_resource_len(pci_dev, bar);
+	unsigned long bar_addr = pci_resource_start(pci_dev, bar);
+	unsigned long bar_max_size = bar_size - base;
+	BUG_ON((base > bar_size) || ((base+size) > bar_size));
+
+	data->reg_bank[bank].bar = bar;
+	data->reg_bank[bank].addr = bar_addr + base;
+	data->reg_bank[bank].size = min(size, bar_max_size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	data->reg_bank[bank].km_addr = devm_ioremap_nocache(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#else
+	data->reg_bank[bank].km_addr = devm_ioremap(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#endif
+
+	pr_debug("[bank %u] bar:%d addr:0x%lx size:0x%lx km:0x%px\n",
+			bank, bar, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size,
+			data->reg_bank[bank].km_addr);
+
+	return data->reg_bank[bank].km_addr == NULL;
+}
+
+int vha_plat_deinit(void)
+{
+	struct pci_dev *dev = vha_pci_drv.pci_dev;
+	int ret;
+
+	if (dev) {
+		struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+		if (data) {
+			if (poll_interrupts) {
+				data->irq_poll = 0;
+				cancel_delayed_work_sync(&data->irq_work);
+			}
+
+			/* reset the hardware */
+			reset_dut(data);
+		} else {
+			dev_dbg(&dev->dev,
+					"%s: prv data not found, HW reset omitted\n",
+					__func__);
+		}
+	} else {
+		/*pr_debug("%s: dev missing, HW reset omitted\n", __func__);*/
+	}
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#define NNA_REG_BAR (PCI_FROST_DUT_REG_BAR)
+#ifdef CFG_SYS_VAGUS
+#define NNA_REG_SIZE (_REG_SIZE + _REG_NNSYS_SIZE)
+#else
+#define NNA_REG_SIZE (_REG_SIZE)
+#endif
+
+#define NNA_REG_OFFSET (_REG_START)
+
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	/*pr_debug(
+		"Mapping %zu bytes into kernel memory (Phys:%08llX, Kptr:%p)\n",
+		size, addr, kptr);
+	pr_debug("[%c%c%c]\n",
+			 (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
+			 (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
+			 (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');*/
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+/*	pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);*/
+	iounmap(addr);
+	return 0;
+}
+#endif
+
+/*
+ * IO hooks.
+ * NOTE: using spinlock to avoid
+ * problems with multi threaded IO access
+ */
+static DEFINE_SPINLOCK(io_irq_lock);
+static unsigned long io_irq_flags;
+
+uint64_t vha_plat_read64(void *addr)
+{
+	u64 val;
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	val =(uint64_t)readl((const volatile void __iomem *)addr) |
+				((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+	return val;
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
+	writel((uint32_t)(val >> 32),        (volatile void __iomem *)addr + 4);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+}
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int ret = 0;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	unsigned long vha_base_mem, vha_mem_size;
+	struct device *dev = &pci_dev->dev;
+	int heap;
+
+	dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+				__func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("Memory allocation error, aborting.\n");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+
+	/* Allocate frost core registers */
+	ret = frost_allocate_registers(pci_dev, data,
+				CORE_REG_BANK, PCI_FROST_SYS_CTRL_REGS_BAR,
+				PCI_FROST_SYS_CTRL_BASE_OFFSET,
+				PCI_FROST_CORE_REG_SIZE);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for frost regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	/* Display some infos */
+	{
+		uint32_t frost_id  = frost_core_readreg32(data, PCI_FROST_CORE_ID);
+		uint32_t frost_rev = frost_core_readreg32(data, PCI_FROST_CORE_REVISION);
+		uint32_t frost_cs  = frost_core_readreg32(data, PCI_FROST_CORE_CHANGE_SET);
+		uint32_t frost_ui  = frost_core_readreg32(data, PCI_FROST_CORE_USER_ID);
+		uint32_t frost_ub  = frost_core_readreg32(data, PCI_FROST_CORE_USER_BUILD);
+		uint32_t frost_swif = frost_core_readreg32(data, PCI_FROST_CORE_SW_IF_VERSION);
+		uint32_t frost_ucif = frost_core_readreg32(data, PCI_FROST_CORE_UC_IF_VERSION);
+
+		pr_info("Found Frost board v%d.%d (ID:%X CS:%X UI:%X UB:%X SWIF:%X UCIF:%X)",
+			(frost_rev >> 16) & 0xFFFF, frost_rev & 0xFFFF,
+			frost_id, frost_cs, frost_ui, frost_ub, frost_swif, frost_ucif);
+	}
+
+	/* Allocate NNA register space */
+	ret = frost_allocate_registers(pci_dev, data,
+				NNA_REG_BANK, NNA_REG_BAR,
+				NNA_REG_OFFSET,
+				NNA_REG_SIZE);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for vha regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	/* Allocate DUT memory space */
+	vha_mem_size = pci_resource_len(pci_dev, PCI_FROST_DUT_MEM_BAR);
+	if (vha_mem_size > maxmapsize)
+		vha_mem_size = maxmapsize;
+
+	vha_base_mem = pci_resource_start(pci_dev, PCI_FROST_DUT_MEM_BAR);
+
+	/* change alloc size according to module parameter */
+	if (pci_size)
+		vha_mem_size = pci_size;
+
+	/* We are not really allocating memory for that reg bank,
+	 * so hand set values here: */
+	data->reg_bank[MEM_REG_BANK].bar = PCI_FROST_DUT_MEM_BAR;
+	data->reg_bank[MEM_REG_BANK].addr = vha_base_mem;
+	data->reg_bank[MEM_REG_BANK].size = vha_mem_size;
+	pr_debug("[bank %u] bar:%d addr: 0x%lx size: 0x%lx\n",
+			MEM_REG_BANK, PCI_FROST_DUT_MEM_BAR,
+				data->reg_bank[MEM_REG_BANK].addr,
+				data->reg_bank[MEM_REG_BANK].size);
+
+	/* Get the IRQ...*/
+	data->irq = pci_dev->irq;
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	reset_dut(data);
+
+	for (heap = 0; heap < vha_dev_frost_heaps; heap++) {
+		struct heap_config *cfg = &vha_dev_frost_heap_configs[heap];
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+			cfg->options.carveout.phys =
+				data->reg_bank[MEM_REG_BANK].addr;
+			cfg->options.carveout.size =
+				data->reg_bank[MEM_REG_BANK].size;
+			cfg->options.carveout.offs = pci_offset;
+			cfg->to_dev_addr = carveout_to_dev_addr;
+			cfg->to_host_addr = carveout_to_host_addr;
+			/* IO memory access callbacks */
+			cfg->options.carveout.get_kptr = carveout_get_kptr;
+			cfg->options.carveout.put_kptr = carveout_put_kptr;
+			/* Allocation order */
+			cfg->options.carveout.pool_order = pool_alloc_order;
+			break;
+		}
+#endif
+	}
+
+	ret = vha_add_dev(dev,
+			vha_dev_frost_heap_configs,
+			vha_dev_frost_heaps,
+			data,
+			data->reg_bank[NNA_REG_BANK].km_addr,
+			data->reg_bank[NNA_REG_BANK].size);
+	if (ret) {
+		dev_err(dev, "failed to initialize driver core!\n");
+		goto out_deinit;
+	}
+
+	if (!poll_interrupts) {
+		/* Reset irqs at first */
+		frost_reset_int(data);
+
+		/* Install the ISR callback...*/
+		ret = devm_request_threaded_irq(dev, data->irq, &frost_isr_cb,
+				&frost_thread_irq, IRQF_SHARED, DEVICE_NAME,
+				(void *)pci_dev);
+		if (ret) {
+			dev_err(dev, "failed to request irq!\n");
+			goto out_rm_dev;
+		}
+		dev_dbg(dev, "registered irq %d\n", data->irq);
+
+		if (irq_self_test) {
+			/* Trigger Test interrupt */
+			frost_test_int(data);
+			/* Give some time to trigger test IRQ */
+			msleep(10);
+		} else {
+			frost_enable_int(data, INT_INTERRUPT_EMU);
+		}
+	} else {
+		INIT_DELAYED_WORK(&data->irq_work, frost_poll_interrupt);
+		data->irq_poll = 1;
+		/* Start the interrupt poll */
+		schedule_delayed_work(&data->irq_work,
+				usecs_to_jiffies(irq_poll_delay_us));
+	}
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+
+out_deinit:
+	/* Make sure int are no longer enabled */
+	frost_disable_int(data, INT_INTERRUPT_EMU);
+out_release:
+	pci_release_regions(pci_dev);
+out_disable:
+	pci_disable_device(pci_dev);
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+	dev_dbg(&dev->dev, "removing device\n");
+
+
+	if (data == NULL) {
+		dev_err(&dev->dev, "PCI priv data missing!\n");
+	} else if (!poll_interrupts) {
+		/*
+		 * We  need to disable interrupts for the
+		 * embedded device via the frost interrupt controller...
+		 */
+		frost_disable_int(data, INT_INTERRUPT_EMU);
+
+		/* Unregister int */
+		devm_free_irq(&dev->dev, data->irq, dev);
+	}
+
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}

+ 491 - 0
driver/vha/platform/vha_plat_nexef.c

@@ -0,0 +1,491 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
+#include <linux/of_platform.h>
+
+#include "vha_common.h"
+#include "vha_plat.h"
+
+
+#include <nexef_plat.h>
+/* NNPU TC exports we need*/
+#include <tc_drv.h>
+
+#define DEVICE_NAME "vha"
+
+/*
+ * Special handling (not implemented) is required for the VHA device
+ * to be able to access both carveout buffers (internal memory) and
+ * dmabuf buffers (system memory).The latter have to go through
+ * the system bus to be accessed whereas the former do not.
+ */
+static struct heap_config vha_plat_fpga_heap_configs[] = {
+        /* Primary heap used for internal allocations */
+#ifdef FPGA_BUS_MASTERING
+#error Bus mastering not supported on this platform
+#elif CONFIG_GENERIC_ALLOCATOR
+{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+	},
+#else
+#error Neither FPGA_BUS_MASTERING or CONFIG_GENERIC_ALLOCATOR was defined
+#endif
+
+        /* Secondary heap used for importing an external memory */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+	},
+#else
+#warning "Memory importing not supported!"
+#endif
+};
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+static const int vha_plat_fpga_heaps = sizeof(vha_plat_fpga_heap_configs)/
+                                       sizeof(*vha_plat_fpga_heap_configs);
+
+static int vha_plat_probe(struct platform_device *pdev);
+static int vha_plat_remove(struct platform_device *pdev);
+
+static int vha_plat_suspend(struct platform_device *pdev, pm_message_t state);
+static int vha_plat_resume(struct platform_device *pdev);
+
+enum {
+    PLATFORM_IS_NEXEF = 1,
+};
+
+static struct platform_device_id nna_platform_device_id_table[] = {
+        { .name = NEXEF_NNA_DEVICE_NAME, .driver_data = PLATFORM_IS_NEXEF },
+        { },
+};
+
+static struct platform_driver vha_platform_drv = {
+        .probe = vha_plat_probe,
+        .remove = vha_plat_remove,
+        .suspend = vha_plat_suspend,
+        .resume = vha_plat_resume,
+        .driver = {
+            .owner = THIS_MODULE,
+            .name = DEVICE_NAME,
+        },
+        .id_table = nna_platform_device_id_table,
+};
+
+struct nna_driver_priv {
+    struct platform_device *pdev;
+
+    void __iomem *nna_regs;
+    uint32_t      nna_size;
+
+    /* Work for the threaded interrupt. */
+    struct work_struct work;
+};
+
+/*
+ * reset_dut - Reset the Device Under Test
+ */
+static void reset_dut(struct device *dev)
+{
+    /* Nothing yet until Odin baseboard is updated to support that */
+    //tc_dut2_reset(dev);
+}
+
+/*
+ * pci_thread_irq - High latency interrupt handler
+ */
+static void nna_soft_isr_cb(struct work_struct *work)
+{
+    struct nna_driver_priv *priv = container_of(work, struct nna_driver_priv, work);
+    struct platform_device *pdev = priv->pdev;
+    struct device *dev = &pdev->dev;
+
+    vha_handle_thread_irq(dev);
+}
+
+/*
+ * pci_isr_cb - Low latency interrupt handler
+ */
+static void nna_hard_isr_cb(void *pdev_id)
+{
+    struct platform_device *pdev = (struct platform_device *)pdev_id;
+    struct device *dev = &pdev->dev;
+    struct nna_driver_priv *priv = (struct nna_driver_priv *)vha_get_plat_data(dev);
+
+    irqreturn_t ret = IRQ_NONE;
+
+    ret = vha_handle_irq(dev);
+
+    if (ret == IRQ_WAKE_THREAD) {
+        schedule_work(&priv->work);
+    }
+}
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	/*pr_debug(
+		"Mapping %zu bytes into kernel memory (Phys:%08llX, Kptr:%p)\n",
+		size, addr, kptr);
+	pr_debug("[%c%c%c]\n",
+			 (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
+			 (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
+			 (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');*/
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+/*	pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);*/
+	iounmap(addr);
+	return 0;
+}
+#endif
+
+static int vha_plat_probe(struct platform_device *pdev)
+{
+    int ret = 0;
+    struct nna_driver_priv *priv;
+    struct device *dev = &pdev->dev;
+    struct nexef_nna_platform_data *platdata;
+    struct resource *nna_registers;
+    uint64_t vha_mem_base, vha_mem_size;
+    uint64_t vha_mem_phys_offset = 0;
+    int heap;
+
+    dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+
+    priv = devm_kzalloc(dev, sizeof(struct nna_driver_priv), GFP_KERNEL);
+    if (!priv) {
+        ret = -ENOMEM;
+        goto out_no_free;
+    }
+
+    priv->pdev = pdev;
+
+    if (dev->dma_mask) {
+        dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+                 __func__, dev->dma_mask, *dev->dma_mask);
+    } else {
+        dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+        dev->dma_mask = &dev->coherent_dma_mask;
+    }
+
+    dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+    ret = dma_set_mask(dev, dma_get_mask(dev));
+
+    if (ret) {
+        dev_err(dev, "%s failed to set dma mask\n", __func__);
+        goto out_dma_free;
+    }
+
+
+    /* Allocate dut2 registers */
+    nna_registers = platform_get_resource_byname(pdev,
+            IORESOURCE_MEM, "nna-regs");
+    if (!nna_registers) {
+        ret = -EIO;
+        goto out_dma_free;
+    }
+
+    priv->nna_regs = devm_ioremap_resource(dev, nna_registers);
+    if (!priv->nna_regs) {
+        ret = -EIO;
+        goto out_dma_free;
+    }
+    priv->nna_size = nna_registers->end - nna_registers->start;
+
+    /* Get infos for DUT memory */
+    platdata = dev_get_platdata(dev);
+
+    /* Get out mem specs */
+    vha_mem_size = platdata->nna_memory_size;
+    vha_mem_base = platdata->nna_memory_base;
+    vha_mem_phys_offset = platdata->nna_memory_offset;
+
+    dev_dbg(dev, "PCI memory: base: %#llX - size: %#llX - offset: %#llX",
+            vha_mem_base, vha_mem_size, vha_mem_phys_offset);
+
+    /* patch heap config with PCI memory addresses */
+    for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
+        struct heap_config *cfg = &vha_plat_fpga_heap_configs[heap];
+
+        switch(cfg->type) {
+
+        case IMG_MEM_HEAP_TYPE_CARVEOUT:
+            cfg->options.carveout.phys = vha_mem_base;
+            cfg->options.carveout.size = vha_mem_size;
+            cfg->options.carveout.offs = vha_mem_phys_offset;
+
+            cfg->to_dev_addr = carveout_to_dev_addr;
+			cfg->to_host_addr = carveout_to_host_addr;
+
+			/* IO memory access callbacks */
+			cfg->options.carveout.get_kptr = carveout_get_kptr;
+			cfg->options.carveout.put_kptr = carveout_put_kptr;
+
+			break;
+
+        case IMG_MEM_HEAP_TYPE_DMABUF: /* Nothing to do here */
+            break;
+
+        default:
+            dev_err(dev, "Unsupported heap type %d!\n", cfg->type);
+            break;
+		}
+    }
+
+    reset_dut(dev->parent);
+
+    ret = vha_add_dev(dev,
+                      vha_plat_fpga_heap_configs,
+                      vha_plat_fpga_heaps,
+                      priv,
+					  priv->nna_regs,
+					  priv->nna_size);
+    if (ret) {
+        dev_err(dev, "failed to initialize driver core!\n");
+        goto out_dma_free;
+    }
+
+    /*
+     * Reset FPGA DUT only after disabling clocks in
+     * vha_add_dev()-> get properties.
+     * This workaround is required to ensure that
+     * clocks (on daughter board) are enabled for test slave scripts to
+     * read FPGA build version register.
+     */
+    reset_dut(dev->parent);
+
+    /* Install the ISR callback...*/
+    INIT_WORK(&priv->work, nna_soft_isr_cb);
+
+    ret = tc_set_interrupt_handler(dev->parent, TC_INTERRUPT_NNA, nna_hard_isr_cb, pdev);
+
+    ret |= tc_enable_interrupt(dev->parent, TC_INTERRUPT_NNA);
+    if (ret) {
+        dev_err(dev, "failed to request irq!\n");
+        goto out_rm_dev;
+    }
+
+		/* Try to calibrate the core if needed */
+		ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+		if (ret) {
+			dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+			goto out_rm_dev;
+		}
+		return ret;
+
+out_rm_dev:
+    /* Disable interrupt handler just in case it is enable which fail */
+    tc_set_interrupt_handler(dev->parent, TC_INTERRUPT_NNA, NULL, NULL);
+
+    vha_rm_dev(dev);
+
+out_dma_free:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(dev);
+#endif
+
+out_no_free:
+    return ret;
+}
+
+static int vha_plat_remove(struct platform_device *pdev)
+{
+    struct device *dev = &pdev->dev;
+    struct nna_driver_priv *priv =
+            (struct nna_driver_priv *)vha_get_plat_data(dev);
+
+    dev_dbg(dev, "removing device\n");
+
+    /* Disable interrupts */
+    tc_disable_interrupt(dev->parent, TC_INTERRUPT_NNA);
+    tc_set_interrupt_handler(dev->parent, TC_INTERRUPT_NNA, NULL, NULL);
+    /* Make sure there is no work in the queue */
+    if (priv)
+        cancel_work_sync(&priv->work);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(dev);
+#endif
+
+    vha_rm_dev(dev);
+
+    return 0;
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct platform_device *pdev, pm_message_t state)
+{
+    struct device *dev = &pdev->dev;
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct platform_device *pdev)
+{
+    struct device *dev = &pdev->dev;
+	return vha_resume_dev(dev);
+}
+#endif
+
+/* Functions called by vha_core */
+int vha_plat_init(void)
+{
+    int ret;
+
+    ret = platform_driver_register(&vha_platform_drv);
+    if (ret) {
+        pr_err("failed to register platform driver!\n");
+        return ret;
+    }
+
+    return 0;
+}
+
+int vha_plat_deinit(void)
+{
+    int ret;
+
+    //reset_dut();
+
+    platform_driver_unregister(&vha_platform_drv);
+
+    ret = vha_deinit();
+    if (ret)
+        pr_err("VHA driver deinit failed\n");
+
+    return ret;
+}
+
+/*
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access.
+ *
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+    return (uint64_t)readl(addr) | ((uint64_t)readl(addr + 4) << 32);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+    writel(val & 0xffffffff, addr);
+    writel(((uint64_t)val >> 32), addr + 4);
+}

+ 1152 - 0
driver/vha/platform/vha_plat_odin.c

@@ -0,0 +1,1152 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
+#include <linux/dma-mapping.h>
+#else
+#include <linux/dma-map-ops.h>
+#endif
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/workqueue.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/nn_sys_cr_vagus.h>
+#endif
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/vagus_system.h>
+#elif defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#endif
+
+#define DEVICE_NAME "vha"
+
+#define IS_ODIN_DEVICE(devid) ((devid) == PCI_ODIN_DEVICE_ID)
+
+/*
+ * from Odin Lite TRM rev 1.0.88
+ */
+
+#define PCI_ODIN_VENDOR_ID (0x1AEE)
+#define PCI_ODIN_DEVICE_ID (0x1010)
+
+/* Odin - System control register bar */
+#define PCI_ODIN_SYS_CTRL_REGS_BAR (0)
+
+#define PCI_ODIN_SYS_CTRL_BASE_OFFSET (0x0000)
+/* srs_core */
+#define PCI_ODIN_CORE_ID                        (0x0000)
+#define PCI_ODIN_CORE_REVISION                  (0x0004)
+#define PCI_ODIN_CORE_CHANGE_SET                (0x0008)
+#define PCI_ODIN_CORE_USER_ID                   (0x000C)
+#define PCI_ODIN_CORE_USER_BUILD                (0x0010)
+/* Resets */
+#define PCI_ODIN_CORE_INTERNAL_RESETN           (0x0080)
+#define PCI_ODIN_CORE_EXTERNAL_RESETN           (0x0084)
+#define PCI_ODIN_CORE_EXTERNAL_RESET            (0x0088)
+#define PCI_ODIN_CORE_INTERNAL_AUTO_RESETN      (0x008C)
+/* Clock */
+#define PCI_ODIN_CORE_CLK_GEN_RESET             (0x0090)
+/* Interrupts */
+#define PCI_ODIN_CORE_INTERRUPT_STATUS          (0x0100)
+#define PCI_ODIN_CORE_INTERRUPT_ENABLE          (0x0104)
+#define PCI_ODIN_CORE_INTERRUPT_CLR             (0x010C)
+#define PCI_ODIN_CORE_INTERRUPT_TEST            (0x0110)
+/* GPIOs */
+#define PCI_ODIN_CORE_NUM_GPIO                  (0x0180)
+#define PCI_ODIN_CORE_GPIO_EN                   (0x0184)
+#define PCI_ODIN_CORE_GPIO                      (0x0188)
+/* DUT Ctrl */
+#define PCI_ODIN_CORE_NUM_DUT_CTRL              (0x0190)
+#define PCI_ODIN_CORE_DUT_CTRL1                 (0x0194)
+#define PCI_ODIN_CORE_DUT_CTRL2                 (0x0198)
+#define PCI_ODIN_CORE_NUM_DUT_STAT              (0x019C)
+#define PCI_ODIN_CORE_DUT_STAT1                 (0x01A0)
+#define PCI_ODIN_CORE_DUT_STAT2                 (0x01A4)
+/* LEDs! */
+#define PCI_ODIN_CORE_DASH_LEDS                 (0x01A8)
+/* Core stuff */
+#define PCI_ODIN_CORE_CORE_STATUS               (0x0200)
+#define PCI_ODIN_CORE_CORE_CONTROL              (0x0204)
+#define PCI_ODIN_CORE_REG_BANK_STATUS           (0x0208)
+#define PCI_ODIN_CORE_MMCM_LOCK_STATUS          (0x020C)
+#define PCI_ODIN_CORE_GIST_STATUS               (0x0210)
+
+/* core bits definitions */
+#define INTERNAL_RESET_INTERNAL_RESETN_PIKE     (1 << 7)
+#define EXTERNAL_RESET_EXTERNAL_RESETN_DUT      (1 << 0)
+
+#define DUT_CTRL1_DUT_MST_OFFSET                (1 << 31)
+#define ODIN_CORE_CONTROL_DUT_OFFSET_SHIFT      (24)
+#define ODIN_CORE_CONTROL_DUT_OFFSET_MASK       (0x7 << ODIN_CORE_CONTROL_DUT_OFFSET_SHIFT)
+
+/* interrupt bits definitions */
+#define INT_INTERRUPT_MASTER_ENABLE             (1 << 31)
+#define INT_INTERRUPT_DUT0                      (1 << 0)
+#define INT_INTERRUPT_DUT1                      (1 << 9)
+
+/* srs_clk_blk */
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV1  (0x0020)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV2  (0x0024)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_OUT_DIV3  (0x001C)
+#define PCI_ODIN_CLK_BLK_DUT_REG_CLK_OUT_DIV1   (0x0028)
+#define PCI_ODIN_CLK_BLK_DUT_REG_CLK_OUT_DIV2   (0x002C)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT1     (0x0050)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT2     (0x0054)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_CLK_MULT3     (0x004C)
+#define PCI_ODIN_CLK_BLK_DUT_CORE_VLK_IN_DIV    (0x0058)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV1   (0x0220)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV2   (0x0224)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_OUT_DIV3   (0x021C)
+#define PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV1   (0x0228)
+#define PCI_ODIN_CLK_BLK_DUT_MEM_CLK_OUT_DIV2   (0x022C)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT1      (0x0250)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT2      (0x0254)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_MULT3      (0x024C)
+#define PCI_ODIN_CLK_BLK_DUT_SYS_CLK_IN_DIV     (0x0258)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV1 (0x0620)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV2 (0x0624)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV3 (0x061C)
+#define PCI_ODIN_CLK_BLK_PDP_MEM_CLK_OUT_DIV1   (0x0628)
+#define PCI_ODIN_CLK_BLK_PDP_MEM_CLK_OUT_DIV2   (0x062C)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT1    (0x0650)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT2    (0x0654)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_MULT3    (0x064C)
+#define PCI_ODIN_CLK_BLK_PDP_PIXEL_CLK_IN_DIV   (0x0658)
+
+#define PCI_ODIN_CORE_REG_SIZE                  (0x1000)
+
+/* Odin - Device Under Test (DUT) register bar */
+#define PCI_ODIN_DUT_REGS_BAR (2)
+#define PCI_ODIN_DUT_MEM_BAR  (4)
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+/* Parameters applicable when using bus master mode */
+static unsigned long contig_phys_start;
+module_param(contig_phys_start, ulong, 0444);
+MODULE_PARM_DESC(contig_phys_start, "Physical address of start of contiguous region");
+
+static uint32_t contig_size;
+module_param(contig_size, uint, 0444);
+MODULE_PARM_DESC(contig_size, "Size of contiguous region: takes precedence over any PCI based memory");
+
+static uint32_t fpga_heap_type = IMG_MEM_HEAP_TYPE_UNIFIED;
+module_param(fpga_heap_type, uint, 0444);
+MODULE_PARM_DESC(fpga_heap_type, "Fpga primary heap type");
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+static unsigned short pool_alloc_order;
+module_param(pool_alloc_order, ushort, 0444);
+MODULE_PARM_DESC(pool_alloc_order,
+		"Carveout pool allocation order, depends on PAGE_SIZE, \
+		for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
+
+/* Newest version of ODIN allow for dual DUT devices, this parameter allow at load time to select which DUT to use */
+static unsigned long dut_id = 0;
+module_param(dut_id, ulong, 0444);
+MODULE_PARM_DESC(dut_id, "DUT the driver try to address. valid: {0, 1}, (default: 0)");
+
+static bool mem_static_kptr = true;
+module_param(mem_static_kptr, bool, 0444);
+MODULE_PARM_DESC(mem_static_kptr,
+		"Creates static kernel mapping for fpga memory");
+
+/* Maximum DUT_ID allowed */
+#define MAX_DUT_ID (1)
+
+static uint32_t odin_dut_register_offset[] = {
+				0x00000000, /* DUT 0 */
+				0x02000000, /* DUT 1 */
+};
+
+static uint32_t odin_dut_interrupt_bit[] = {
+				INT_INTERRUPT_DUT0, /* DUT 0 */
+				INT_INTERRUPT_DUT1, /* DUT 1 */
+};
+
+/*
+ * Special handling (not implemented) is required for the VHA device
+ * to be able to access both carveout buffers (internal memory) and
+ * dmabuf buffers (system memory).The latter have to go through
+ * the system bus to be accessed whereas the former do not.
+ */
+
+#if !defined(FPGA_BUS_MASTERING) && !defined(CONFIG_GENERIC_ALLOCATOR)
+#error Neither FPGA_BUS_MASTERING or GENERIC_ALLOCATOR is defined
+#endif
+
+static struct heap_config vha_dev_fpga_heap_configs[] = {
+	/* Primary heap used for internal allocations */
+#if CONFIG_GENERIC_ALLOCATOR
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+	},
+#endif
+};
+static const int vha_dev_fpga_heaps = sizeof(vha_dev_fpga_heap_configs)/
+	sizeof(*vha_dev_fpga_heap_configs);
+
+static struct heap_config vha_plat_fpga_heap_configs[] = {
+	/* Secondary heap used for importing an external memory */
+#if defined(FPGA_BUS_MASTERING)
+#error Bus mastering not supported for now.
+	{
+		.type = -1, /* selected with fpga_heap_type */
+		.options = {
+			.unified.gfp_type = GFP_DMA32 | __GFP_ZERO,
+			.coherent.gfp_flags = GFP_DMA32 | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+	},
+#endif
+#if CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+#if !defined(FPGA_BUS_MASTERING)
+		.options.dmabuf = {
+				.use_sg_dma = true,
+		},
+#endif
+	},
+#else
+#warning "Memory importing not supported!"
+#endif
+};
+
+static const int vha_plat_fpga_heaps = sizeof(vha_plat_fpga_heap_configs)/
+	sizeof(*vha_plat_fpga_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(PCI_ODIN_VENDOR_ID, PCI_ODIN_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+enum { CORE_REG_BANK = 0,
+	NNA_REG_BANK, MEM_REG_BANK,
+	REG_BANK_COUNT /* Must be the last */};
+
+struct imgpci_prvdata {
+	int irq;
+
+	struct {
+		int bar;
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} reg_bank[REG_BANK_COUNT];
+
+	struct pci_dev *pci_dev;
+};
+
+
+struct img_pci_driver {
+	struct pci_dev *pci_dev;
+	struct pci_driver pci_driver;
+	struct delayed_work irq_work;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
+		vha_plat_suspend, vha_plat_resume);
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA Odin driver version : " VERSION_STRING "\n");
+}
+
+static inline uint64_t __readreg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset) __maybe_unused;
+static inline void __writereg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, uint64_t val) __maybe_unused;
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_pci",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.groups = drv_groups,
+			.pm = &vha_pm_plat_ops,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 4096;
+
+/**
+ * __readreg32 - Generic PCI bar read functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ */
+static inline unsigned int __readreg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	return ioread32(reg);
+}
+
+/**
+ * __writereg32 - Generic PCI bar write functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void __writereg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, int val)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	iowrite32(val, reg);
+}
+
+/*
+ * __readreg64 - Generic PCI bar read functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ */
+static inline uint64_t __readreg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset)
+{
+		void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+																				 offset);
+		return (uint64_t)ioread32(reg) | ((uint64_t)ioread32(reg + 4) << 32);
+}
+
+/*
+ * __writereg64 - Generic PCI bar write functions
+ * @data: pointer to the data
+ * @bank: register bank
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void __writereg64(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, uint64_t val)
+{
+		void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+																				 offset);
+		iowrite32(val & 0xFFFFFFFF, reg);
+		iowrite32(val >> 32, reg + 4);
+}
+
+/**
+ * odin_core_writereg32 - Write to Odin control registers
+ * @data: pointer to the data
+ * @offset: offset within bank
+ * @val: value to be written
+ */
+static inline void odin_core_writereg32(struct imgpci_prvdata *data,
+		unsigned long offset, int val)
+{
+	__writereg32(data, CORE_REG_BANK, offset, val);
+}
+
+/**
+ * odin_core_readreg32 - Read Odin control registers
+ * @data: pointer to the data
+ * @offset: offset within bank
+ */
+static inline unsigned int odin_core_readreg32(struct imgpci_prvdata *data,
+		unsigned long offset)
+{
+	return __readreg32(data, CORE_REG_BANK, offset);
+}
+
+/**
+ * reset_dut - Reset the Device Under Test
+ * @data: pointer to the data
+ */
+static void reset_dut(struct imgpci_prvdata *data)
+{
+
+	uint32_t internal_rst = odin_core_readreg32(data, PCI_ODIN_CORE_INTERNAL_RESETN);
+	uint32_t external_rst = odin_core_readreg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN);
+
+	dev_dbg(&data->pci_dev->dev, "going to reset DUT fpga!\n");
+
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERNAL_RESETN,
+		internal_rst & ~(INTERNAL_RESET_INTERNAL_RESETN_PIKE));
+		odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN,
+		external_rst & ~(EXTERNAL_RESET_EXTERNAL_RESETN_DUT));
+
+	udelay(100); /* arbitrary delays, just in case! */
+
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERNAL_RESETN, internal_rst);
+	odin_core_writereg32(data, PCI_ODIN_CORE_EXTERNAL_RESETN, external_rst);
+
+	msleep(100);
+
+	dev_dbg(&data->pci_dev->dev, "DUT fpga reset done!\n");
+}
+
+/**
+ * pci_thread_irq - High latency interrupt handler
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t pci_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+/**
+ * odin_isr_clear - Clear an interrupt
+ * @data: pointer to the data
+ * @intstatus: interrupt status
+ *
+ * note: the reason of that function is unclear, it is taken from Apollo/Atlas code that have
+ * the same interrupt handler as Odin, is it because of a bug?
+ */
+static void odin_isr_clear(struct imgpci_prvdata *data, unsigned int intstatus)
+{
+	unsigned int max_retries = 1000;
+
+	while ((odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--)
+		odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_CLR,
+			(INT_INTERRUPT_MASTER_ENABLE | intstatus));
+}
+
+
+/**
+ * pci_isr_cb - Low latency interrupt handler
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t pci_isr_cb(int irq, void *dev_id)
+{
+	uint32_t intstatus;
+
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	struct imgpci_prvdata *data;
+
+	irqreturn_t ret = IRQ_NONE;
+
+	if (dev_id == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Spurious interrupt data/dev_id not initialised!\n");
+		goto exit;
+	}
+
+	data = vha_get_plat_data(&dev->dev);
+
+	if (data == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Invalid driver private data!\n");
+		goto exit;
+	}
+
+	/* Read interrupt status register */
+	intstatus = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_STATUS);
+
+	/* Now handle the ints */
+	if (intstatus & odin_dut_interrupt_bit[dut_id]) {
+		/* call real irq handler */
+		ret = vha_handle_irq(&dev->dev);
+	} else {
+		/* most likely this is a shared interrupt line */
+		dev_dbg(&dev->dev,
+			"%s: unexpected or spurious interrupt [%x] (shared IRQ?)!\n",
+			__func__, intstatus);
+		/* WARN_ON(1); */
+
+		goto exit;
+	}
+
+		/* Ack the ints */
+		odin_isr_clear(data, intstatus);
+
+exit:
+	return ret;
+}
+
+static inline void odin_reset_int(struct imgpci_prvdata *data) {
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE, 0);
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_CLR, 0xFFFFFFFF);
+}
+
+/**
+ * odin_enable_int - Enable an interrupt
+ * @data: pointer to the data
+ * @intmask: interrupt mask
+ */
+static inline void odin_enable_int(struct imgpci_prvdata *data,
+		uint32_t intmask)
+{
+	uint32_t irq_enabled = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE);
+	intmask &= odin_dut_interrupt_bit[dut_id];
+
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE, irq_enabled | intmask | INT_INTERRUPT_MASTER_ENABLE);
+}
+
+/**
+ * odin_disable_int - Disable an interrupt
+ * @data: pointer to the data
+ * @intmask: interrupt mask
+ */
+static inline void odin_disable_int(struct imgpci_prvdata *data,
+		uint32_t intmask)
+{
+	uint32_t irq_enabled = odin_core_readreg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE);
+	intmask &= odin_dut_interrupt_bit[dut_id];
+
+	odin_core_writereg32(data, PCI_ODIN_CORE_INTERRUPT_ENABLE,
+		irq_enabled & ~intmask);
+}
+
+/**
+ * odin_allocate_registers - Allocate memory for a register (or memory) bank
+ * @pci_dev: pointer to pci device
+ * @data: pointer to the data
+ * @bank: bank to set
+ * @bar: BAR where the register are
+ * @base: base address in the BAR
+ * @size: size of the register set
+ */
+static inline int odin_allocate_registers(struct pci_dev *pci_dev,
+		struct imgpci_prvdata *data, int bank,
+		int bar, unsigned long base, unsigned long size)
+{
+	unsigned long bar_size = pci_resource_len(pci_dev, bar);
+	unsigned long bar_addr = pci_resource_start(pci_dev, bar);
+	unsigned long bar_max_size = bar_size - base;
+	BUG_ON((base > bar_size) || ((base+size) > bar_size));
+
+	data->reg_bank[bank].bar = bar;
+	data->reg_bank[bank].addr = bar_addr + base;
+	data->reg_bank[bank].size = min(size, bar_max_size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	data->reg_bank[bank].km_addr = devm_ioremap_nocache(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#else
+	data->reg_bank[bank].km_addr = devm_ioremap(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#endif
+
+	pr_debug("[bank %u] bar:%d addr:0x%lx size:0x%lx km:0x%px\n",
+			bank, bar, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size,
+			data->reg_bank[bank].km_addr);
+
+	return data->reg_bank[bank].km_addr == NULL;
+}
+
+int vha_plat_deinit(void)
+{
+	struct pci_dev *dev = vha_pci_drv.pci_dev;
+	int ret;
+
+	if (dev) {
+		struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+		if (data) {
+			/* reset the hardware */
+			reset_dut(data);
+		} else {
+			dev_dbg(&dev->dev,
+					"%s: prv data not found, HW reset omitted\n",
+					__func__);
+		}
+	} else {
+		/*pr_debug("%s: dev missing, HW reset omitted\n", __func__);*/
+	}
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#define NNA_REG_BAR (PCI_ODIN_DUT_REGS_BAR)
+#ifdef CFG_SYS_VAGUS
+#define NNA_REG_SIZE (_REG_SIZE + _REG_NNSYS_SIZE)
+#else
+#define NNA_REG_SIZE (_REG_SIZE)
+#endif
+
+#define NNA_REG_OFFSET (_REG_START)
+
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	/*pr_debug(
+		"Mapping %zu bytes into kernel memory (Phys:%08llX, Kptr:%p)\n",
+		size, addr, kptr);
+	pr_debug("[%c%c%c]\n",
+			 (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
+			 (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
+			 (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');*/
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+/*	pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);*/
+	iounmap(addr);
+	return 0;
+}
+#endif
+
+/*
+ * IO hooks.
+ * NOTE: using spinlock to avoid
+ * problems with multi threaded IO access
+ */
+static DEFINE_SPINLOCK(io_irq_lock);
+static unsigned long io_irq_flags;
+
+uint64_t vha_plat_read64(void *addr)
+{
+	u64 val;
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	val =(uint64_t)readl((const volatile void __iomem *)addr) |
+				((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+	return val;
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	spin_lock_irqsave(&io_irq_lock, io_irq_flags);
+	writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
+	writel((uint32_t)(val >> 32),        (volatile void __iomem *)addr + 4);
+	spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
+}
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int ret = 0;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	unsigned long vha_base_mem, vha_mem_size;
+	struct device *dev = &pci_dev->dev;
+	int heap;
+	uint32_t tmp;
+
+	dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+				__func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	ret = dma_set_mask(dev, DMA_BIT_MASK(36));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("Memory allocation error, aborting.\n");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+
+
+	/* Allocate odin core registers */
+	ret = odin_allocate_registers(pci_dev, data,
+				CORE_REG_BANK, PCI_ODIN_SYS_CTRL_REGS_BAR,
+				PCI_ODIN_SYS_CTRL_BASE_OFFSET,
+				PCI_ODIN_CORE_REG_SIZE);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for odin regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	/* Display some infos */
+	{
+		uint32_t odin_id  = odin_core_readreg32(data, PCI_ODIN_CORE_ID);
+		uint32_t odin_rev = odin_core_readreg32(data, PCI_ODIN_CORE_REVISION);
+		uint32_t odin_cs  = odin_core_readreg32(data, PCI_ODIN_CORE_CHANGE_SET);
+		uint32_t odin_ui  = odin_core_readreg32(data, PCI_ODIN_CORE_USER_ID);
+		uint32_t odin_ub  = odin_core_readreg32(data, PCI_ODIN_CORE_USER_BUILD);
+
+		pr_info("Found Odin lite board v%d.%d (ID:%X CS:%X UI:%X UB:%X)",
+			(odin_rev >> 8) & 0xF, odin_rev & 0xF, odin_id & 0x7, odin_cs, odin_ui, odin_ub);
+	}
+
+
+	
+	if (dut_id > MAX_DUT_ID) {
+		dev_err(dev, "Invalid DUT number (%lu), setting it to 0\n", dut_id);
+		dut_id = 0;
+	}
+
+	/* Allocate NNA register space */
+	ret = odin_allocate_registers(pci_dev, data,
+				NNA_REG_BANK, NNA_REG_BAR,
+				NNA_REG_OFFSET + odin_dut_register_offset[dut_id],
+				NNA_REG_SIZE);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for vha regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	/* Allocate DUT memory space */
+	vha_mem_size = pci_resource_len(pci_dev, PCI_ODIN_DUT_MEM_BAR);
+	if (vha_mem_size > maxmapsize)
+		vha_mem_size = maxmapsize;
+
+	vha_base_mem = pci_resource_start(pci_dev, PCI_ODIN_DUT_MEM_BAR);
+
+	/* change alloc size according to module parameter */
+	if (pci_size)
+		vha_mem_size = pci_size;
+
+	/* allocating memory only when static kernel mapping is requested,
+	 * so hand set values here: */
+	data->reg_bank[MEM_REG_BANK].bar = PCI_ODIN_DUT_MEM_BAR;
+	data->reg_bank[MEM_REG_BANK].addr = vha_base_mem;
+	data->reg_bank[MEM_REG_BANK].size = vha_mem_size;
+	if (mem_static_kptr) {
+		data->reg_bank[MEM_REG_BANK].km_addr = devm_ioremap(
+			&pci_dev->dev, data->reg_bank[MEM_REG_BANK].addr,
+			data->reg_bank[MEM_REG_BANK].size);
+		if (data->reg_bank[MEM_REG_BANK].km_addr == NULL) {
+			dev_err(dev, "Can't allocate memory for vha regs!");
+			ret = -ENOMEM;
+			goto out_release;
+		}
+	}
+
+	pr_debug("[bank %u] bar: %d addr: 0x%lx (kptr:%p) size: 0x%lx\n",
+			MEM_REG_BANK, PCI_ODIN_DUT_MEM_BAR,
+				data->reg_bank[MEM_REG_BANK].addr,
+				data->reg_bank[MEM_REG_BANK].km_addr,
+				data->reg_bank[MEM_REG_BANK].size);
+
+#ifdef FPGA_BUS_MASTERING
+	tmp = odin_core_readreg32(data, PCI_ODIN_CORE_DUT_CTRL1);
+	tmp &= ~DUT_CTRL1_DUT_MST_OFFSET;
+	odin_core_writereg32(data, PCI_ODIN_CORE_DUT_CTRL1, tmp);
+
+	tmp = odin_core_readreg32(data, PCI_ODIN_CORE_CORE_CONTROL);
+	tmp &= ODIN_CORE_CONTROL_DUT_OFFSET_MASK;
+	odin_core_writereg32(data, PCI_ODIN_CORE_CORE_CONTROL, tmp);
+#else
+	/* Set the Odin board in a similar way as the Apollo is,
+	 * DUT memory starting at 0x0 instead of 0x4_0000_0000
+	 */
+	tmp = odin_core_readreg32(data, PCI_ODIN_CORE_DUT_CTRL1);
+	tmp |= DUT_CTRL1_DUT_MST_OFFSET;
+	odin_core_writereg32(data, PCI_ODIN_CORE_DUT_CTRL1, tmp);
+
+	tmp = odin_core_readreg32(data, PCI_ODIN_CORE_CORE_CONTROL);
+	tmp &= ODIN_CORE_CONTROL_DUT_OFFSET_MASK;
+	tmp |= (4 << ODIN_CORE_CONTROL_DUT_OFFSET_SHIFT);
+	odin_core_writereg32(data, PCI_ODIN_CORE_CORE_CONTROL, tmp);
+#endif
+
+	/* Get the IRQ...*/
+	data->irq = pci_dev->irq;
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	reset_dut(data);
+
+	odin_reset_int(data);
+	odin_enable_int(data, odin_dut_interrupt_bit[dut_id]);
+
+	for (heap = 0; heap < vha_dev_fpga_heaps; heap++) {
+		struct heap_config *cfg = &vha_dev_fpga_heap_configs[heap];
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+			if (contig_size && contig_phys_start) {
+				/*
+				 * 2 types of carveout memory are supported:
+				 * memory carved out of the main DDR
+				 * memory region.
+				 * eg: linux boot option memmap=512M$0x5CAFFFFF
+				 * This is configured using module parameters:
+				 * contig_phys_start and size
+				 * DDR populated in the actual PCI card,
+				 * in BAR 4.
+				 * The module parameters take precedence
+				 * over PCI memory.
+				 */
+				cfg->options.carveout.phys = contig_phys_start;
+				cfg->options.carveout.size = contig_size;
+				cfg->to_dev_addr = NULL;
+				cfg->to_host_addr = NULL;
+				/*dev_info(dev, "using %dMB CARVEOUT at x%lx\n",
+					 contig_size/1024/1024,
+					 contig_phys_start);*/
+			} else {
+				cfg->options.carveout.phys =
+						data->reg_bank[MEM_REG_BANK].addr;
+				if (mem_static_kptr)
+					cfg->options.carveout.kptr =
+							data->reg_bank[MEM_REG_BANK].km_addr;
+				cfg->options.carveout.size =
+						data->reg_bank[MEM_REG_BANK].size;
+				cfg->options.carveout.offs = pci_offset;
+				cfg->to_dev_addr = carveout_to_dev_addr;
+				cfg->to_host_addr = carveout_to_host_addr;
+			/*  dev_info(dev,
+					"using %zuMB CARVEOUT from PCI at 0x%x\n",
+					cfg->options.carveout.size/1024/1024,
+					cfg->options.carveout.phys);*/
+			}
+			/* IO memory access callbacks */
+			if (!mem_static_kptr) {
+				/* Dynamic kernel memory mapping */
+				cfg->options.carveout.get_kptr = carveout_get_kptr;
+				cfg->options.carveout.put_kptr = carveout_put_kptr;
+			}
+			/* Allocation order */
+			cfg->options.carveout.pool_order = pool_alloc_order;
+
+			break;
+		}
+#endif
+
+		if (cfg->type == IMG_MEM_HEAP_TYPE_COHERENT) {
+			ret = dma_declare_coherent_memory(dev,
+					contig_phys_start,
+					contig_phys_start,
+					contig_size
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+					, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE
+#else
+													 , DMA_MEMORY_EXCLUSIVE
+#endif
+#endif
+					);
+			if (ret == 0) {
+				dev_err(dev, "failed to initialize coherent memory!\n");
+				/* We will fallback to the
+				 * default pool anyway
+					 goto out_release; */
+			}
+			break;
+		}
+	}
+
+	ret = vha_add_dev(dev,
+			vha_dev_fpga_heap_configs,
+			vha_dev_fpga_heaps,
+			data,
+			data->reg_bank[NNA_REG_BANK].km_addr,
+			data->reg_bank[NNA_REG_BANK].size);
+	if (ret) {
+		dev_err(dev, "failed to initialize driver core!\n");
+		goto out_heap_deinit;
+	}
+
+	/*
+	 * Reset FPGA DUT only after disabling clocks in
+	 * vha_add_dev()-> get properties.
+	 * This workaround is required to ensure that
+	 * clocks (on daughter board) are enabled for test slave scripts to
+	 * read FPGA build version register.
+	 * NOTE: Asserting other bits like DDR reset bit cause problems
+	 * with bus mastering feature, thus results in memory failures.
+	 */
+	reset_dut(data);
+
+	{
+
+		/*uint32_t fpga_rev = odin_readreg32(data, 1,
+				FPGA_IMAGE_REV_OFFSET) & FPGA_IMAGE_REV_MASK;
+		dev_dbg(dev, "fpga image revision: 0x%x\n", fpga_rev);
+		if (!fpga_rev || fpga_rev == 0xdead1) {
+			dev_err(dev, "fpga revision incorrect (0x%x)!\n",
+					fpga_rev);
+			goto out_rm_dev;
+		}*/
+	}
+
+	/* Install the ISR callback...*/
+	ret = devm_request_threaded_irq(dev, data->irq, &pci_isr_cb,
+			&pci_thread_irq, IRQF_SHARED, DEVICE_NAME,
+			(void *)pci_dev);
+	if (ret) {
+		dev_err(dev, "failed to request irq!\n");
+		goto out_rm_dev;
+	}
+	dev_dbg(dev, "registered irq %d\n", data->irq);
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+
+out_heap_deinit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(dev);
+#endif
+
+	/* Make sure int are no longer enabled */
+	odin_disable_int(data, odin_dut_interrupt_bit[dut_id]);
+
+out_release:
+	pci_release_regions(pci_dev);
+
+out_disable:
+	pci_disable_device(pci_dev);
+
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+	dev_dbg(&dev->dev, "removing device\n");
+
+
+	if (data == NULL) {
+		dev_err(&dev->dev, "PCI priv data missing!\n");
+	} else {
+		/*
+		 * We  need to disable interrupts for the
+		 * embedded device via the fpga interrupt controller...
+		 */
+		odin_disable_int(data, odin_dut_interrupt_bit[dut_id]);
+
+		/* Unregister int */
+		devm_free_irq(&dev->dev, data->irq, dev);
+
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(&dev->dev);
+#endif
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+#if 0
+#ifdef FPGA_BUS_MASTERING
+	vha_plat_fpga_heap_configs[0].type = fpga_heap_type;
+#endif
+#endif
+
+	ret = vha_init_plat_heaps(vha_plat_fpga_heap_configs, vha_plat_fpga_heaps);
+	if(ret) {
+		pr_err("failed to initialize global heaps\n");
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}

+ 1065 - 0
driver/vha/platform/vha_plat_orion.c

@@ -0,0 +1,1065 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+/*
+ * Things left to be done at a later point as of 28/02/2019:
+ *
+ * - Maybe add code to set the DUT clock
+ * FIXME: Find a way to get DUT register size from .def files
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
+#include <linux/dma-mapping.h>
+#else
+#include <linux/dma-map-ops.h>
+#endif
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/workqueue.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#if defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#else
+#error System configuration not supported!
+#endif
+
+#define DEVICE_NAME "vha"
+
+#define IS_SIRIUS_DEVICE(devid) ((devid) == PCI_SIRIUS_DEVICE_ID)
+
+/*
+ * from Sirius TRM rev 1.0.3
+ */
+
+#define PCI_SIRIUS_VENDOR_ID (0x1AEE)
+#define PCI_SIRIUS_DEVICE_ID (0x1020)
+
+/* Sirius - System control register bar */
+#define PCI_SIRIUS_SYS_CTRL_REGS_BAR (0)
+
+#define PCI_SIRIUS_SYS_CTRL_BASE_OFFSET (0x0000)
+/* srs_core */
+#define PCI_SIRIUS_SRS_CORE_ID                        (0x0000)
+#define PCI_SIRIUS_SRS_CORE_REVISION                  (0x0004)
+#define PCI_SIRIUS_SRS_CORE_CHANGE_SET                (0x0008)
+#define PCI_SIRIUS_SRS_CORE_USER_ID                   (0x000C)
+#define PCI_SIRIUS_SRS_CORE_USER_BUILD                (0x0010)
+#define PCI_SIRIUS_SRS_CORE_SOFT_RESETN               (0x0080)
+#define PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN           (0x0084)
+#define PCI_SIRIUS_SRS_CORE_SOFT_AUTO_RESETN          (0x0088)
+#define PCI_SIRIUS_SRS_CORE_CLK_GEN_RESET             (0x0090)
+#define PCI_SIRIUS_SRS_CORE_NUM_GPIO                  (0x0180)
+#define PCI_SIRIUS_SRS_CORE_GPIO_EN                   (0x0184)
+#define PCI_SIRIUS_SRS_CORE_GPIO                      (0x0188)
+#define PCI_SIRIUS_SRS_CORE_SPI_MASTER_IFACE          (0x018C)
+#define PCI_SIRIUS_SRS_CORE_SYS_IP_STATUS             (0x0200)
+#define PCI_SIRIUS_SRS_CORE_CORE_CONTROL              (0x020D)
+#define PCI_SIRIUS_SRS_CORE_REG_BANK_STATUS           (0x0208)
+#define PCI_SIRIUS_SRS_CORE_MMCM_LOCK_STATUS          (0x020C)
+#define PCI_SIRIUS_SRS_CORE_GIST_STATUS               (0x0210)
+#define PCI_SIRIUS_SRS_CORE_SENSOR_BOARD              (0x0214)
+
+/* srs_core bits definitions */
+#define DUT_SOFT_RESETN_DUT_SOFT_RESETN_EXTERNAL      (1 << 0)
+
+/* srs_clk_blk */
+#define PCI_SIRIUS_CLOCK_CTRL_BASE_OFFSET (0x2000)
+
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV1  (0x0020)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV2  (0x0024)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV3  (0x001C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_REG_CLK_OUT_DIV1   (0x0028)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_REG_CLK_OUT_DIV2   (0x002C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT1     (0x0050)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT2     (0x0054)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT3     (0x004C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_VLK_IN_DIV    (0x0058)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV1   (0x0220)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV2   (0x0224)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV3   (0x021C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_MEM_CLK_OUT_DIV1   (0x0228)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_MEM_CLK_OUT_DIV2   (0x022C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT1      (0x0250)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT2      (0x0254)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT3      (0x024C)
+#define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_IN_DIV     (0x0258)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV1 (0x0620)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV2 (0x0624)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV3 (0x061C)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_MEM_CLK_OUT_DIV1   (0x0628)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_MEM_CLK_OUT_DIV2   (0x062C)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT1    (0x0650)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT2    (0x0654)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT3    (0x064C)
+#define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_IN_DIV   (0x0658)
+
+#define PCI_SIRIUS_SRS_REG_SIZE                       (0x1000)
+
+/* Interrupts are part of CORE */
+#define PCI_SIRIUS_CORE_INTERRUPT_STATUS                (0x0218)
+#define PCI_SIRIUS_CORE_INTERRUPT_ENABLE                (0x021C)
+#define PCI_SIRIUS_CORE_INTERRUPT_CLR                   (0x0220)
+#define PCI_SIRIUS_CORE_INTERRUPT_TEST                  (0x0224)
+#define PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR           (0x0228)
+
+#define PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR_CLR       (1 << 1)
+
+/* interrupt bits definitions */
+#define SIRIUS_INTERRUPT_MASTER_ENABLE                  (1 << 31)
+
+#define SIRIUS_INTERRUPT_DUT0                           (1 << 0)
+#define SIRIUS_INTERRUPT_DUT1                           (1 << 1)
+#define SIRIUS_INTERRUPT_I2C                            (1 << 2)
+#define SIRIUS_INTERRUPT_SPI                            (1 << 3)
+#define SIRIUS_INTERRUPT_PDP                            (1 << 1)
+#define SIRIUS_INTERRUPT_APM                            (1 << 4)
+#define SIRIUS_INTERRUPT_ALL (SIRIUS_INTERRUPT_DUT0 | SIRIUS_INTERRUPT_DUT1 | SIRIUS_INTERRUPT_I2C | \
+			SIRIUS_INTERRUPT_SPI | SIRIUS_INTERRUPT_PDP | SIRIUS_INTERRUPT_APM)
+
+
+/* Sirius - Device Under Test (DUT) register bar */
+#define PCI_SIRIUS_DUT_REGS_BAR (2)
+#define PCI_SIRIUS_DUT_MEM_BAR  (4)
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+/* Parameters applicable when using bus master mode */
+static unsigned long contig_phys_start;
+module_param(contig_phys_start, ulong, 0444);
+MODULE_PARM_DESC(contig_phys_start, "Physical address of start of contiguous region");
+
+static uint32_t contig_size;
+module_param(contig_size, uint, 0444);
+MODULE_PARM_DESC(contig_size, "Size of contiguous region: takes precedence over any PCI based memory");
+
+static uint32_t fpga_heap_type = IMG_MEM_HEAP_TYPE_UNIFIED;
+module_param(fpga_heap_type, uint, 0444);
+MODULE_PARM_DESC(fpga_heap_type, "Fpga primary heap type");
+
+static unsigned long pci_size;
+module_param(pci_size, ulong, 0444);
+MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
+
+static unsigned long pci_offset;
+module_param(pci_offset, ulong, 0444);
+MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
+
+enum pci_irq_type {
+	IRQ_TYPE_AUTO = 0,
+	IRQ_TYPE_INTA = 1,
+	IRQ_TYPE_MSI  = 2,
+};
+
+static unsigned long pci_irq_type = IRQ_TYPE_AUTO;
+module_param(pci_irq_type, ulong, 0444);
+MODULE_PARM_DESC(pci_irq_type, "Type of IRQ: 0: Auto, 1: INTA, 2: MSI");
+
+/* Some Orion DUT images include two of them, so we need to allow to select which one to use at load time */
+static unsigned long dut_id = 0;
+module_param(dut_id, ulong, 0444);
+MODULE_PARM_DESC(dut_id, "DUT the driver try to address. valid: {0, 1}, (default: 0)");
+
+/* Maximum DUT_ID allowed */
+#define MAX_DUT_ID (1)
+
+static uint32_t sirius_dut_register_offset[] = {
+	0x00000000, /* DUT 0 */
+	0x20000000, /* DUT 1 */ 
+};
+
+static uint32_t sirius_dut_interrupt_bit[] = {
+	SIRIUS_INTERRUPT_DUT0, /* DUT 0 */
+	SIRIUS_INTERRUPT_DUT1, /* DUT 1 */
+};
+
+
+/*
+ * Special handling (not implemented) is required for the VHA device
+ * to be able to access both carveout buffers (internal memory) and
+ * dmabuf buffers (system memory).The latter have to go through
+ * the system bus to be accessed whereas the former do not.
+ */
+static struct heap_config vha_plat_fpga_heap_configs[] = {
+	/* Primary heap used for internal allocations */
+#ifdef FPGA_BUS_MASTERING
+#error Bus mastering not supported
+	{
+		.type = -1, /* selected with fpga_heap_type */
+		.options = {
+			.unified.gfp_type = GFP_DMA32 | __GFP_ZERO,
+			.coherent.gfp_flags = GFP_DMA32 | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+		.to_host_addr = NULL,
+	},
+#elif CONFIG_GENERIC_ALLOCATOR
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+		/* .to_host_addr to be filled at run time */
+		.cache_attr = IMG_MEM_ATTR_WRITECOMBINE,
+	},
+#else
+#error Neither FPGA_BUS_MASTERING or CONFIG_GENERIC_ALLOCATOR was defined
+#endif
+
+	/* Secondary heap used for importing an external memory */
+#ifdef CONFIG_DMA_SHARED_BUFFER
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+		.to_host_addr = NULL,
+	},
+#else
+#warning "Memory importing not supported!"
+#endif
+};
+
+static const int vha_plat_fpga_heaps = sizeof(vha_plat_fpga_heap_configs)/
+	sizeof(*vha_plat_fpga_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(PCI_SIRIUS_VENDOR_ID, PCI_SIRIUS_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+enum { SRS_REG_BANK, INTC_REG_BANK, DUT_REG_BANK, DUT_MEM_BANK };
+
+struct imgpci_prvdata {
+	int irq;
+
+	struct {
+		int bar;
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} reg_bank[4];
+
+	struct pci_dev *pci_dev;
+};
+
+
+struct img_pci_driver {
+	struct pci_dev *pci_dev;
+	struct pci_driver pci_driver;
+	struct delayed_work irq_work;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
+		vha_plat_suspend, vha_plat_resume);
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA Orion driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_orion",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.groups = drv_groups,
+			.pm = &vha_pm_plat_ops,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
+
+/*
+ * __regreg32 - Generic PCI bar read functions
+ */
+static inline unsigned int __readreg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	return ioread32(reg);
+}
+
+/*
+ * __writereg32 - Generic PCI bar write functions
+ */
+static inline void __writereg32(struct imgpci_prvdata *data,
+		int bank, unsigned long offset, int val)
+{
+	void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
+			offset);
+	/*pr_err(">>> Writing to bank %d, offset 0x%04X value 0x%08X\n",
+	 * bank, offset, val);*/
+	iowrite32(val, reg);
+}
+
+/*
+ * sirius_core_writereg32 - Write to Sirius control registers
+ */
+static inline void sirius_core_writereg32(struct imgpci_prvdata *data,
+		unsigned long offset, int val)
+{
+	__writereg32(data, SRS_REG_BANK, offset, val);
+}
+
+/*
+ * sirius_core_readreg32 - Read Sirius control registers
+ */
+static inline unsigned int sirius_core_readreg32(struct imgpci_prvdata *data,
+		unsigned long offset)
+{
+	return __readreg32(data, SRS_REG_BANK, offset);
+}
+
+/*
+ * sirius_intc_writereg32 - Write to Sirius control registers
+ */
+static inline void sirius_intc_writereg32(struct imgpci_prvdata *data,
+		unsigned long offset, int val)
+{
+	__writereg32(data, INTC_REG_BANK, offset, val);
+}
+
+/*
+ * sirius_intc_readreg32 - Read Sirius control registers
+ */
+static inline unsigned int sirius_intc_readreg32(struct imgpci_prvdata *data,
+		unsigned long offset)
+{
+	return __readreg32(data, INTC_REG_BANK, offset);
+}
+
+/*
+ * reset_dut - Reset the Device Under Test
+ */
+static void reset_dut(struct imgpci_prvdata *data)
+{
+	dev_dbg(&data->pci_dev->dev, "going to reset DUT fpga!\n");
+
+	sirius_core_writereg32(data, PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN, 0);
+
+	udelay(100); /* arbitrary delays, just in case! */
+
+	sirius_core_writereg32(data,
+			PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN,
+			DUT_SOFT_RESETN_DUT_SOFT_RESETN_EXTERNAL);
+
+	msleep(500);
+
+	dev_dbg(&data->pci_dev->dev, "DUT fpga reset done!\n");
+}
+
+/*
+ * sirius_enable_int - Enable an interrupt
+ */
+static inline void sirius_enable_int(struct imgpci_prvdata *data, uint32_t intmask)
+{
+	uint32_t irq_enabled = sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE);
+
+	/* Only accept to enable DUT interrupt */
+	intmask &= sirius_dut_interrupt_bit[dut_id];
+
+	sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE,
+							 irq_enabled | intmask | SIRIUS_INTERRUPT_MASTER_ENABLE);
+}
+
+/*
+ * sirius_disable_int - Disable an interrupt
+ */
+static inline void sirius_disable_int(struct imgpci_prvdata *data, uint32_t intmask)
+{
+	uint32_t irq_enabled = sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE);
+
+	/* Only accept to disable DUT interrupt */
+	intmask &= sirius_dut_interrupt_bit[dut_id];
+
+	sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE,
+							 irq_enabled & ~intmask);
+}
+
+/*
+ * sirius_read_int_status - Read interrupt status
+ */
+static inline uint32_t sirius_read_int_status(struct imgpci_prvdata *data)
+{
+	return sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_STATUS);
+}
+
+/*
+ * sirius_ack_int - Ack interrupts
+ */
+static inline void sirius_ack_int(struct imgpci_prvdata *data, uint32_t intstatus)
+{
+	unsigned int max_retries = 1000;
+
+	while ((sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--)
+		sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_CLR,
+								 (SIRIUS_INTERRUPT_MASTER_ENABLE | intstatus));
+
+		/**
+		 * Temporary until FPGA is updated:
+		 * Clear the "timeout" regardless to it's status to prevent some bugs in there
+		 */
+	sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR, PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR_CLR);
+}
+
+/*
+ * pci_thread_irq - High latency interrupt handler
+ */
+static irqreturn_t pci_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+/*
+ * pci_isr_cb - Low latency interrupt handler
+ */
+static irqreturn_t pci_isr_cb(int irq, void *dev_id)
+{
+	uint32_t intstatus;
+
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	struct imgpci_prvdata *data;
+
+	irqreturn_t ret = IRQ_NONE;
+
+	if (dev_id == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Spurious interrupt data/dev_id not initialised!\n");
+		goto exit;
+	}
+
+	data = vha_get_plat_data(&dev->dev);
+
+	if (data == NULL) {
+		/* Spurious interrupt: not yet initialised. */
+		pr_warn("Invalid driver private data!\n");
+		goto exit;
+	}
+
+	/* Read interrupt status register */
+	intstatus = sirius_read_int_status(data);
+
+	/* Now handle the ints */
+	if (intstatus & sirius_dut_interrupt_bit[dut_id]) {
+		/* call real irq handler */
+		ret = vha_handle_irq(&dev->dev);
+	} else {
+		/* Code made on purpose, on this target, the INT number cannot
+		 * be shared as we are using MSI. So any interrupt which are not
+		 * from the DUT are clearly spurious and unwanted interrupts and
+		 * meaning that one device on Sirius is not properly configured.
+		 */
+		dev_warn(&dev->dev,
+				"%s: unexpected or spurious interrupt [%x]!\n",
+				__func__, intstatus);
+		WARN_ON(1);
+	}
+
+	/* Ack the ints */
+	sirius_ack_int(data, intstatus);
+
+exit:
+	return ret;
+}
+
+/**
+ * sirius_allocate_registers - Allocate memory for a register (or memory) bank
+ * @pci_dev: the pci device
+ * @data: pointer to the data
+ * @bank: bank to set
+ * @bar: BAR where the register are
+ * @base: base address in the BAR
+ * @size: size of the register set
+ */
+static inline int sirius_allocate_registers(struct pci_dev *pci_dev,
+		struct imgpci_prvdata *data, int bank,
+		int bar, unsigned long base, unsigned long size)
+{
+	unsigned long bar_size = pci_resource_len(pci_dev, bar);
+	unsigned long bar_addr = pci_resource_start(pci_dev, bar);
+	unsigned long bar_max_size = bar_size - base;
+	BUG_ON((base > bar_size) || ((base+size) > bar_size));
+
+	data->reg_bank[bank].bar = bar;
+	data->reg_bank[bank].addr = bar_addr + base;
+	data->reg_bank[bank].size = min(size, bar_max_size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	data->reg_bank[bank].km_addr = devm_ioremap_nocache(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#else
+	data->reg_bank[bank].km_addr = devm_ioremap(
+			&pci_dev->dev, data->reg_bank[bank].addr,
+			data->reg_bank[bank].size);
+#endif
+
+	pr_debug("[bank %u] bar:%d addr:%pa size:0x%lx km:0x%p\n",
+			bank, bar, &data->reg_bank[bank].addr,
+			data->reg_bank[bank].size,
+			&data->reg_bank[bank].km_addr);
+
+	return data->reg_bank[bank].km_addr == NULL;
+}
+
+int vha_plat_deinit(void)
+{
+	struct pci_dev *dev = vha_pci_drv.pci_dev;
+	int ret;
+
+	if (dev) {
+		struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+		if (data) {
+			/* reset the hardware */
+			reset_dut(data);
+		} else {
+			dev_dbg(&dev->dev,
+					"%s: prv data not found, HW reset omitted\n",
+					__func__);
+		}
+	} else {
+		pr_debug("%s: dev missing, HW reset omitted\n",
+				__func__);
+	}
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#define VHA_REGISTERS_START                        (_REG_START)
+#define VHA_REGISTERS_END                          (_REG_SIZE)
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr - offset >= base && addr < base + size - offset)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
+			__func__, base, size, offset, addr);
+	WARN_ON(1);
+
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+	unsigned long offset = options->carveout.offs;
+
+	if (addr < size - offset)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
+				 __func__, base, size, offset, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	pr_debug("Mapping %zu bytes into kernel memory (Phys:%pa, Kptr:%p)\n", size, &addr, &kptr);
+	pr_debug("[%c%c%c]\n",
+			 (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
+			 (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
+			 (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');
+
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+	pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);
+	iounmap(addr);
+	return 0;
+}
+#endif
+
+/*
+ * IO hooks: We are on a 32bit system so only 32bit access available.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access.
+ *
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return (uint64_t)readl(addr) | ((uint64_t)readl(addr + 4) << 32);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writel(val & 0xffffffff, addr);
+	writel(((uint64_t)val >> 32), addr + 4);
+}
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int ret = 0;
+	unsigned int int_type;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	unsigned long vha_base_mem, vha_mem_size;
+	struct device *dev = &pci_dev->dev;
+	int heap;
+
+	dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	dev_info(dev, "%s dma_get_mask : %#llx\n",
+			__func__, dma_get_mask(dev));
+
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+				__func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n",
+				__func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	dev_info(dev, "%s dma_set_mask %#llx\n",
+			__func__, dma_get_mask(dev));
+	ret = dma_set_mask(dev, dma_get_mask(dev));
+
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("Memory allocation error, aborting.\n");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+
+	/* Allocate sirius base registers */
+	ret = sirius_allocate_registers(pci_dev, data,
+			SRS_REG_BANK, PCI_SIRIUS_SYS_CTRL_REGS_BAR,
+			PCI_SIRIUS_SYS_CTRL_BASE_OFFSET,
+			PCI_SIRIUS_SRS_REG_SIZE);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for sirius regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+
+	/* FIXME: Check if there is any way to know how many DUTs are on the system */
+	if (dut_id > MAX_DUT_ID) {
+		dev_err(dev, "Invalid DUT number (%ld), setting it to 0\n", dut_id);
+		dut_id = 0;
+	}
+
+	/* Allocate DUT register space */
+	ret = sirius_allocate_registers(pci_dev, data,
+			DUT_REG_BANK, PCI_SIRIUS_DUT_REGS_BAR,
+			VHA_REGISTERS_START + sirius_dut_register_offset[dut_id],
+			VHA_REGISTERS_END);
+	if (ret) {
+		dev_err(dev, "Can't allocate memory for vha regs!");
+		ret = -ENOMEM;
+		goto out_release;
+	}
+
+	/* Allocate DUT memory space */
+	vha_mem_size = pci_resource_len(pci_dev, PCI_SIRIUS_DUT_MEM_BAR);
+	if (vha_mem_size > maxmapsize)
+		vha_mem_size = maxmapsize;
+
+	vha_base_mem = pci_resource_start(pci_dev, PCI_SIRIUS_DUT_MEM_BAR);
+
+	/* change alloc size according to module parameter */
+	if (pci_size)
+		vha_mem_size = pci_size;
+
+	/* We are not really allocating memory for that reg bank,
+	 * so hand set values here: */
+	data->reg_bank[DUT_MEM_BANK].bar = PCI_SIRIUS_DUT_MEM_BAR;
+	data->reg_bank[DUT_MEM_BANK].addr = vha_base_mem;
+	data->reg_bank[DUT_MEM_BANK].size = vha_mem_size;
+	pr_debug("[bank %u] bar: %d addr: %pa size: 0x%lx\n",
+			DUT_MEM_BANK, PCI_SIRIUS_DUT_MEM_BAR,
+			&data->reg_bank[DUT_MEM_BANK].addr,
+			data->reg_bank[DUT_MEM_BANK].size);
+
+
+	/* Allocate MSI IRQ if any */
+	switch (pci_irq_type) {
+	default:
+		int_type = PCI_IRQ_ALL_TYPES;
+		break;
+	case IRQ_TYPE_INTA:
+		int_type = PCI_IRQ_LEGACY;
+		break;
+	case IRQ_TYPE_MSI:
+		int_type = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+		break;
+	}
+
+	ret = pci_alloc_irq_vectors(pci_dev, 1, 1, int_type);
+	if (ret < 0) {
+		dev_err(dev, "Can't reserve requested interrupt!");
+		goto out_release;
+	}
+
+	/* Get the proper IRQ */
+	data->irq  = pci_irq_vector(pci_dev, 0);
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	reset_dut(data);
+
+	/*
+	 * We need to enable interrupts for the embedded device
+	 * via the fpga interrupt controller...
+	 */
+	sirius_enable_int(data, sirius_dut_interrupt_bit[dut_id]);
+
+#if 0
+	/* Sirius does not seems to be able to do bus mastering,
+	 * at least there is not configuration for it */
+
+#ifdef FPGA_BUS_MASTERING
+	dev_dbg(dev, "enabling FPGA bus mastering\n");
+	sirius_core_writereg32(data, test_ctrl_reg, 0x0);
+#else
+	/* Route to internal RAM - this is reset value */
+	dev_dbg(dev, "disabling FPGA bus mastering\n");
+	sirius_core_writereg32(data, test_ctrl_reg, 0x1);
+#endif
+
+#endif
+
+	/* patch heap config with PCI memory addresses */
+	for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
+		struct heap_config *cfg = &vha_plat_fpga_heap_configs[heap];
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+			if (contig_size && contig_phys_start) {
+				/*
+				 * 2 types of carveout memory are supported:
+				 * memory carved out of the main DDR
+				 * memory region.
+				 * eg: linux boot option memmap=512M$0x5CAFFFFF
+				 * This is configured using module parameters:
+				 * contig_phys_start and size
+				 * DDR populated in the actual PCI card,
+				 * in BAR 4.
+				 * The module parameters take precedence
+				 * over PCI memory.
+				 */
+				cfg->options.carveout.phys = contig_phys_start;
+				cfg->options.carveout.size = contig_size;
+				cfg->to_dev_addr = NULL;
+				cfg->to_host_addr = NULL;
+				dev_info(dev, "using %dMB CARVEOUT at %pa\n",
+					 contig_size/1024/1024,
+					 &contig_phys_start);
+			} else {
+				cfg->options.carveout.phys =
+					data->reg_bank[DUT_MEM_BANK].addr;
+				cfg->options.carveout.size =
+					data->reg_bank[DUT_MEM_BANK].size;
+				cfg->options.carveout.offs = pci_offset;
+				cfg->to_dev_addr = carveout_to_dev_addr;
+				cfg->to_host_addr = carveout_to_host_addr;
+				dev_info(dev, "using %zuMB CARVEOUT from PCI at %pa\n",
+					 cfg->options.carveout.size/1024/1024,
+					 &cfg->options.carveout.phys);
+			}
+			/* IO memory access callbacks */
+			cfg->options.carveout.get_kptr = carveout_get_kptr;
+			cfg->options.carveout.put_kptr = carveout_put_kptr;
+
+			break;
+		}
+#endif
+
+		if (cfg->type == IMG_MEM_HEAP_TYPE_COHERENT) {
+			ret = dma_declare_coherent_memory(dev,
+					contig_phys_start,
+					contig_phys_start,
+					contig_size
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+					, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE
+#else
+													 , DMA_MEMORY_EXCLUSIVE
+#endif
+#endif
+					);
+			if (ret == 0) {
+				dev_err(dev, "failed to initialize coherent memory!\n");
+				/* We will fallback to the default pool anyway
+					 goto out_release; */
+			}
+			break;
+		}
+	}
+
+	ret = vha_add_dev(dev, vha_plat_fpga_heap_configs,
+			vha_plat_fpga_heaps, data,
+			data->reg_bank[DUT_REG_BANK].km_addr,
+			data->reg_bank[DUT_REG_BANK].size);
+	if (ret) {
+		dev_err(dev, "failed to initialize driver core!\n");
+		goto out_heap_deinit;
+	}
+
+	/*
+	 * Reset FPGA DUT only after disabling clocks in
+	 * vha_add_dev()-> get properties.
+	 * This workaround is required to ensure that
+	 * clocks (on daughter board) are enabled for test slave scripts to
+	 * read FPGA build version register.
+	 * NOTE: Asserting other bits like DDR reset bit cause problems
+	 * with bus mastering feature, thus results in memory failures.
+	 */
+	reset_dut(data);
+
+	/* Install the ISR callback...*/
+	ret = devm_request_threaded_irq(dev,
+			data->irq, &pci_isr_cb,
+			&pci_thread_irq, IRQF_SHARED,
+			DEVICE_NAME, (void *)pci_dev);
+	if (ret) {
+		dev_err(dev, "failed to request irq!\n");
+		goto out_rm_dev;
+	}
+
+	dev_dbg(dev, "registered irq %d\n", data->irq);
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+
+out_heap_deinit:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(dev);
+#endif
+
+	/* Make sure int are no longer enabled */
+	sirius_disable_int(data, sirius_dut_interrupt_bit[dut_id]);
+
+out_release:
+	pci_release_regions(pci_dev);
+
+out_disable:
+	pci_disable_device(pci_dev);
+
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
+
+	dev_dbg(&dev->dev, "removing device\n");
+
+	if (data == NULL) {
+		dev_err(&dev->dev, "PCI priv data missing!\n");
+	} else {
+		/*
+		 * We  need to disable interrupts for the
+		 * embedded device via the fpga interrupt controller...
+		 */
+		sirius_disable_int(data, sirius_dut_interrupt_bit[dut_id]);
+
+		/* Unregister int */
+		devm_free_irq(&dev->dev, data->irq, dev);
+
+		pci_free_irq_vectors(dev);
+#if 0
+#ifdef FPGA_BUS_MASTERING
+		/* Route to internal RAM - this is reset value */
+		dev_dbg(&dev->dev, "disabling FPGA bus mastering\n");
+		sirius_core_writereg32(data, PCI_SIRIUS_SYS_CTRL_REGS_BAR,
+				test_ctrl_reg, 0x1);
+#endif
+#endif
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+	/* Release any declared mem regions */
+	dma_release_declared_memory(&dev->dev);
+#endif
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+#if 0
+#ifdef FPGA_BUS_MASTERING
+	vha_plat_fpga_heap_configs[0].type = fpga_heap_type;
+#endif
+#endif
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}

+ 36 - 0
driver/vha/platform/vha_plat_param_thead_light_fpga_c910.h

@@ -0,0 +1,36 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_param_thead_light_fpga_c910.h
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (C) 2020 Alibaba Group Holding Limited
+ *
+ *****************************************************************************/
+
+#ifndef VHA_PLAT_PARAM_THEAD_LIGHT_FPGA_C910_H
+#define VHA_PLAT_PARAM_THEAD_LIGHT_FPGA_C910_H
+
+/* Core clock frequency: default 30MHz */
+#define VHA_CORE_CLOCK_MHZ 1000
+
+/* Core watchdog cycles default value */
+/* MMM can transfer any number of bytes at cost of higher cycles, setting it to ~100ms @800MHz */
+#define VHA_CORE_WDT_CYCLES (0x4ffffff*VHA_CORE_CLOCK_MHZ/800)
+/* Memory watchdog is set ~1ms @800MHz which is very safe value to avoid any false interrupts */
+#define VHA_MEM_WDT_CYCLES (0xfffff*VHA_CORE_CLOCK_MHZ/800)
+
+/* Memory burst size */
+#define VHA_CORE_MH_MAX_BURST_LENGTH 128
+/* SLC cache policy type (0-use cache, 1-bypass cache) */
+#define VHA_CORE_MH_SLC_CACHE_POLICY_TYPE 1
+/* GPU pipe coherent type */
+#define VHA_CORE_MH_GPU_PIPE_COHERENT_TYPE 1
+/* Persistence priority 0-lowest,3-highest */
+#define VHA_CORE_MH_PERSISTENCE_PRIO 0
+
+/* Suspend delay in ms after which the
+ * runtime suspend callback is called */
+#define VHA_CORE_SUSPEND_DELAY 10
+
+#endif /* VHA_PLAT_PARAM_THEAD_LIGHT_FPGA_C910_H */

+ 483 - 0
driver/vha/platform/vha_plat_pci.c

@@ -0,0 +1,483 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/mod_devicetable.h>
+
+#include "uapi/version.h"
+#include "vha_common.h"
+#include "vha_plat.h"
+
+#define DEVICE_NAME "vha"
+
+static unsigned long carveout_phys_start;
+module_param(carveout_phys_start, ulong, 0444);
+MODULE_PARM_DESC(carveout_phys_start,
+		"Physical address of start of carveout region");
+static uint32_t carveout_size;
+module_param(carveout_size, uint, 0444);
+MODULE_PARM_DESC(carveout_size,
+		"Size of carveout region: takes precedence over any PCI based memory");
+
+#define IMG_PCI_VENDOR_ID 0x1010
+#define IMG_PCI_DEVICE_ID 0x1002
+#define PCI_BAR_DEV       0
+/* PCI and PCI-E do not support 64bit devices on BAR1: 64bit uses 2 bars */
+#define PCI_BAR_RAM       2
+#define NUM_PCI_BARS      3
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0x7fffff
+
+static struct heap_config vha_plat_pci_heap_configs[] = {
+	/* first entry is the default heap */
+	{
+		.type = IMG_MEM_HEAP_TYPE_CARVEOUT,
+		/* .options.carveout to be filled at run time */
+		/* .to_dev_addr to be filled at run time */
+		/* .to_host_addr to be filled at run time */
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		/*.to_dev_addr = NULL,*/
+		/*.to_host_addr = NULL,*/
+	},
+};
+
+static const int vha_plat_fpga_heaps =
+	sizeof(vha_plat_pci_heap_configs)/sizeof(*vha_plat_pci_heap_configs);
+
+static const struct pci_device_id pci_pci_ids[] = {
+	{ PCI_DEVICE(IMG_PCI_VENDOR_ID, IMG_PCI_DEVICE_ID), },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_pci_ids);
+
+struct imgpci_prvdata {
+	int irq;
+	struct {
+		unsigned long addr;
+		unsigned long size;
+		void __iomem *km_addr;
+	} memmap[NUM_PCI_BARS];
+	struct pci_dev *pci_dev;
+};
+
+
+struct img_pci_driver {
+	struct pci_dev   *pci_dev;
+	struct pci_driver pci_driver;
+};
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id);
+static void vha_plat_remove(struct pci_dev *dev);
+
+static int vha_plat_suspend(struct device *dev);
+static int vha_plat_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
+		vha_plat_suspend, vha_plat_resume);
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA dummy driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct img_pci_driver vha_pci_drv = {
+	.pci_driver = {
+		.name = "vha_pci",
+		.id_table = pci_pci_ids,
+		.probe = vha_plat_probe,
+		.remove = vha_plat_remove,
+		.driver = {
+			.groups = drv_groups,
+			.pm = &vha_pm_plat_ops,
+		}
+	},
+};
+
+static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
+
+static irqreturn_t pci_thread_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+
+	return vha_handle_thread_irq(&dev->dev);
+}
+
+static irqreturn_t pci_handle_irq(int irq, void *dev_id)
+{
+	struct pci_dev *dev = (struct pci_dev *)dev_id;
+	irqreturn_t ret = IRQ_NONE;
+
+	ret = vha_handle_irq(&dev->dev);
+
+	return ret;
+}
+
+/*
+ * IO hooks.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return readq(addr);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writeq(val, addr);
+}
+
+int vha_plat_deinit(void)
+{
+	int ret;
+
+	/* Unregister the driver from the OS */
+	pci_unregister_driver(&(vha_pci_drv.pci_driver));
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+static phys_addr_t carveout_to_dev_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+
+	if (addr >= base && addr < base + size)
+		return addr - base;
+
+	pr_err("%s: unexpected addr! base %llx size %zu addr %#llx\n",
+				 __func__, base, size, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static phys_addr_t carveout_to_host_addr(union heap_options *options,
+					phys_addr_t addr)
+{
+	phys_addr_t base = options->carveout.phys;
+	size_t size = options->carveout.size;
+
+	if (addr < size)
+		return base + addr;
+
+	pr_err("%s: unexpected addr! base %llx size %zu addr %#llx\n",
+				 __func__, base, size, addr);
+	WARN_ON(1);
+	return addr;
+}
+
+static void *carveout_get_kptr(phys_addr_t addr,
+		size_t size, enum img_mem_attr mattr)
+{
+	/*
+	 * Device memory is I/O memory and as a rule, it cannot
+	 * be dereferenced safely without memory barriers, that
+	 * is why it is guarded by __iomem (return of ioremap)
+	 * and checked by sparse. It is accessed only through
+	 * ioread32(), iowrit32(), etc.
+	 *
+	 * In x86 this memory can be dereferenced and safely
+	 * accessed, i.e.  a * __iomem pointer can be casted to
+	 * a regular void* * pointer.  We cast this here
+	 * assuming FPGA is x86 and add __force to silence the
+	 * sparse warning
+	 *
+	 * Note: System memory carveout can be used with cached turned on.
+	 * */
+	void *kptr = NULL;
+
+	if (mattr & IMG_MEM_ATTR_UNCACHED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+		kptr = (void * __force *)ioremap_nocache(addr, size);
+#else
+		kptr = (void * __force *)ioremap(addr, size);
+#endif
+	else if (mattr & IMG_MEM_ATTR_CACHED)
+		kptr = (void * __force *)ioremap_cache(addr, size);
+	else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
+		kptr = (void * __force *)ioremap_wc(addr, size);
+
+	return kptr;
+}
+
+static int carveout_put_kptr(void *addr)
+{
+	iounmap(addr);
+	return 0;
+}
+#endif
+
+static int vha_plat_probe(struct pci_dev *pci_dev,
+		const struct pci_device_id *id)
+{
+	int bar, ret = 0;
+	struct imgpci_prvdata *data;
+	size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
+	struct device *dev = &pci_dev->dev;
+#ifdef CONFIG_GENERIC_ALLOCATOR
+	int heap;
+#endif
+
+	dev_info(dev, "probed a VHA device, pci_dev: %x:%x\n",
+		 pci_dev->vendor, pci_dev->device);
+
+	/* Enable the device */
+	if (pci_enable_device(pci_dev))
+		goto out_free;
+
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %#llx\n",
+			 __func__, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+	dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
+	ret = dma_set_mask(dev, dma_get_mask(dev));
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		goto out_disable;
+	}
+
+	/* Reserve PCI I/O and memory resources */
+	if (pci_request_regions(pci_dev, "imgpci"))
+		goto out_disable;
+
+	/* Create a kernel space mapping for each of the bars */
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
+	memset(data, 0, sizeof(*data));
+	for (bar = 0; bar < NUM_PCI_BARS; bar += PCI_BAR_RAM-PCI_BAR_DEV) {
+		data->memmap[bar].addr = pci_resource_start(pci_dev, bar);
+		data->memmap[bar].size = pci_resource_len(pci_dev, bar);
+		if (bar == PCI_BAR_RAM) {
+			/* Don't ioremap pci memory - it is mapped on demand */
+			continue;
+		}
+		if (data->memmap[bar].size > maxmapsize) {
+			/*
+			 * We avoid mapping too big regions: we do not need
+			 * such a big amount of memory and some times we do
+			 * not have enough contiguous 'vmallocable' memory.
+			 */
+			dev_warn(dev, "not mapping all mem for bar %u\n", bar);
+			data->memmap[bar].size = maxmapsize;
+		}
+		data->memmap[bar].km_addr = devm_ioremap(dev,
+				pci_resource_start(pci_dev, bar),
+				data->memmap[bar].size);
+		dev_info(dev, "[bar %u] addr: 0x%lx size: 0x%lx km: 0x%p\n",
+				bar, data->memmap[bar].addr,
+				data->memmap[bar].size,
+				data->memmap[bar].km_addr);
+	}
+
+	/* Get the IRQ...*/
+	data->irq = pci_dev->irq;
+	data->pci_dev = pci_dev;
+	vha_pci_drv.pci_dev = pci_dev;
+
+	/* patch heap config with PCI memory addresses */
+	for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
+		struct heap_config *cfg = &vha_plat_pci_heap_configs[heap];
+
+#ifdef CONFIG_GENERIC_ALLOCATOR
+		if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
+
+			if (carveout_size && carveout_phys_start) {
+				/*
+				 * 2 types of carveout memory are supported:
+				 * memory carved out of the main DDR
+				 * memory region.
+				 * eg: linux boot option memmap=512M$0x5CAFFFFF
+				 * This is configured using module parameters:
+				 * contig_phys_start and size
+				 * DDR populated in the actual PCI card,
+				 * in BAR 4.
+				 * The module parameters take precedence
+				 * over PCI memory.
+				 */
+				cfg->options.carveout.phys =
+					carveout_phys_start;
+				cfg->options.carveout.size =
+					carveout_size;
+				cfg->to_dev_addr = carveout_to_dev_addr;
+				cfg->to_host_addr = carveout_to_host_addr;
+				dev_info(dev, "using %dMB CARVEOUT at x%lx\n",
+					 carveout_size/1024/1024,
+					 carveout_phys_start);
+			} else if (data->memmap[PCI_BAR_RAM].size) {
+				cfg->options.carveout.phys =
+					data->memmap[PCI_BAR_RAM].addr;
+				if (carveout_size)
+					cfg->options.carveout.size =
+						carveout_size;
+				else
+					cfg->options.carveout.size =
+						data->memmap[PCI_BAR_RAM].size;
+				cfg->to_dev_addr = carveout_to_dev_addr;
+				cfg->to_host_addr = carveout_to_host_addr;
+				dev_info(dev, "using %ldMB CARVEOUT from PCI bar %d\n",
+					 cfg->options.carveout.size/1024/1024,
+					 PCI_BAR_RAM);
+			}
+			/* IO memory access callbacks */
+			cfg->options.carveout.get_kptr = carveout_get_kptr;
+			cfg->options.carveout.put_kptr = carveout_put_kptr;
+			break;
+		}
+#endif
+		/* THIS IS HACKY - just for testing dmabuf importing on qemu.
+		 * Dma buf config - using carveout type for dmabuf exporter.
+		 * Assuming memory just after carveout_phys_start +
+		 * carveout_size and size of carveout_size */
+		if (cfg->type == IMG_MEM_HEAP_TYPE_DMABUF) {
+			cfg->options.carveout.phys =
+				carveout_phys_start + carveout_size;
+			cfg->options.carveout.size = carveout_size;
+			cfg->to_dev_addr = carveout_to_dev_addr;
+		}
+	}
+
+	ret = vha_add_dev(dev, vha_plat_pci_heap_configs,
+			vha_plat_fpga_heaps, data,
+			data->memmap[PCI_BAR_DEV].km_addr,
+			data->memmap[PCI_BAR_DEV].size);
+	if (ret) {
+		dev_err(dev, "failed to intialize driver core!\n");
+		goto out_release;
+	}
+
+	/* Install the ISR callback...*/
+	ret = devm_request_threaded_irq(dev, data->irq, &pci_handle_irq,
+			&pci_thread_irq, IRQF_SHARED, DEVICE_NAME,
+			(void *)pci_dev);
+	if (ret) {
+		dev_err(dev, "failed to request irq!\n");
+		goto out_rm_dev;
+	}
+	dev_dbg(dev, "registerd irq %d\n", data->irq);
+
+	/* Try to calibrate the core if needed */
+	ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
+	if (ret) {
+		dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
+		goto out_rm_dev;
+	}
+	return ret;
+
+out_rm_dev:
+	vha_rm_dev(dev);
+out_release:
+	pci_release_regions(pci_dev);
+out_disable:
+	pci_disable_device(pci_dev);
+out_free:
+	return ret;
+}
+
+static void vha_plat_remove(struct pci_dev *dev)
+{
+	dev_dbg(&dev->dev, "removing device\n");
+
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+
+	vha_rm_dev(&dev->dev);
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	return vha_suspend_dev(dev);
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	return vha_resume_dev(dev);
+}
+#endif
+
+int vha_plat_init(void)
+{
+	int ret;
+
+	ret = pci_register_driver(&vha_pci_drv.pci_driver);
+	if (ret) {
+		pr_err("failed to register PCI driver!\n");
+		return ret;
+	}
+
+	/* pci_dev should be set in probe */
+	if (!vha_pci_drv.pci_dev) {
+		pr_err("failed to find VHA PCI dev!\n");
+		pci_unregister_driver(&vha_pci_drv.pci_driver);
+		return -ENODEV;
+	}
+
+	return 0;
+}

+ 371 - 0
driver/vha/platform/vha_plat_thead.c

@@ -0,0 +1,371 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_thead.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (C) 2020 Alibaba Group Holding Limited
+ *
+ *****************************************************************************/
+#define DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include <img_mem_man.h>
+#include "vha_common.h"
+#include "uapi/version.h"
+#include "vha_plat.h"
+#include "vha_plat_dt.h"
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/vagus_system.h>
+#elif defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#endif
+
+#define DEVICE_NAME "vha"
+
+/* Number of core cycles used to measure the core clock frequency */
+#define FREQ_MEASURE_CYCLES 0xfffffff
+
+static bool poll_interrupts;   /* Disabled by default */
+module_param(poll_interrupts, bool, 0444);
+MODULE_PARM_DESC(poll_interrupts, "Poll for interrupts? 0: No, 1: Yes");
+
+static unsigned int irq_poll_interval_ms = 100; /* 100 ms */
+module_param(irq_poll_interval_ms, uint, 0444);
+MODULE_PARM_DESC(irq_poll_interval_ms, "Time in ms between each interrupt poll");
+
+/* Global timer used when irq poll mode is switched on.
+ * NOTE: only single core instance is supported in polling mode */
+static struct poll_timer {
+	struct platform_device *pdev;
+	struct timer_list tmr;
+	bool enabled;
+
+} irq_poll_timer;
+
+static ssize_t info_show(struct device_driver *drv, char *buf);
+
+static irqreturn_t dt_plat_thread_irq(int irq, void *dev_id)
+{
+	struct platform_device *ofdev = (struct platform_device *)dev_id;
+
+	return vha_handle_thread_irq(&ofdev->dev);
+}
+
+static irqreturn_t dt_plat_isrcb(int irq, void *dev_id)
+{
+	struct platform_device *ofdev = (struct platform_device *)dev_id;
+
+	if (!ofdev)
+		return IRQ_NONE;
+
+	return vha_handle_irq(&ofdev->dev);
+}
+
+/* Interrupt polling function */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
+static void dt_plat_poll_interrupt(struct timer_list *t)
+{
+        struct poll_timer *poll_timer = from_timer(poll_timer, t, tmr);
+#else
+static void dt_plat_poll_interrupt(unsigned long ctx)
+{
+        struct poll_timer *poll_timer = (struct poll_timer *)ctx;
+#endif
+        struct platform_device *ofdev = poll_timer->pdev;
+        int ret;
+
+        if (!poll_timer->enabled)
+                return;
+
+        preempt_disable();
+        ret = vha_handle_irq(&ofdev->dev);
+        preempt_enable();
+        if (ret == IRQ_WAKE_THREAD)
+		vha_handle_thread_irq(&ofdev->dev);
+
+	/* retrigger */
+	mod_timer(&poll_timer->tmr,
+			jiffies + msecs_to_jiffies(irq_poll_interval_ms));
+}
+
+static int vha_plat_probe(struct platform_device *ofdev)
+{
+	int ret, module_irq;
+	struct resource res;
+	void __iomem *reg_addr;
+	uint32_t reg_size, core_size;
+	char info[256];
+
+	info_show(ofdev->dev.driver, info);
+	pr_info("%s: Version: %s\n", __func__, info);
+
+	ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+	if (ret) {
+		dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
+		return ret;
+	}
+	pr_info("%s: registers %#llx-%#llx\n", __func__,
+		(unsigned long long)res.start, (unsigned long long)res.end);
+
+	module_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+	if (module_irq == 0) {
+		dev_err(&ofdev->dev, "could not map IRQ\n");
+		return -ENXIO;
+	}
+
+	/* Assuming DT holds a single registers space entry that covers all regions,
+	 * So we need to do the split accordingly */
+	reg_size = res.end - res.start + 1;
+
+#ifdef CFG_SYS_VAGUS
+        core_size = _REG_SIZE + _REG_NNSYS_SIZE;
+#else
+        core_size = _REG_SIZE;
+#endif	
+	if ((res.start + _REG_START) > res.end) {
+		dev_err(&ofdev->dev, "wrong system conf for core region!\n");
+		return -ENXIO;
+	}
+
+	if ((res.start + _REG_START + core_size) > res.end) {
+		dev_warn(&ofdev->dev, "trimming system conf for core region!\n");
+		core_size = reg_size - _REG_START;
+	}
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+	reg_addr = devm_ioremap_nocache(&ofdev->dev, res.start +
+			_REG_START, core_size);
+#else
+	reg_addr = devm_ioremap(&ofdev->dev, res.start +
+			_REG_START, core_size);
+#endif
+	if (!reg_addr) {
+		dev_err(&ofdev->dev, "failed to map core registers\n");
+		return -ENXIO;
+	}
+
+	ret = vha_plat_dt_hw_init(ofdev);
+	if (ret) {
+		dev_err(&ofdev->dev, "failed to init platform-specific hw!\n");
+		goto out_add_dev;
+	}
+
+	/* no 'per device' memory heaps used */
+	ret = vha_add_dev(&ofdev->dev, NULL, 0,
+			  NULL /* plat priv data */, reg_addr, core_size);
+	if (ret) {
+		dev_err(&ofdev->dev, "failed to intialize driver core!\n");
+		goto out_add_dev;
+	}
+
+	if (!poll_interrupts) {
+		ret = devm_request_threaded_irq(&ofdev->dev, module_irq, &dt_plat_isrcb,
+				&dt_plat_thread_irq, IRQF_SHARED, DEVICE_NAME, ofdev);
+		if (ret) {
+			dev_err(&ofdev->dev, "failed to request irq\n");
+			goto out_irq;
+		}
+	} else {
+		irq_poll_timer.pdev = ofdev;
+		irq_poll_timer.enabled = true;
+		/* Setup and start poll timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)
+                timer_setup(&irq_poll_timer.tmr, dt_plat_poll_interrupt, 0);
+#else
+                setup_timer(&irq_poll_timer.tmr, dt_plat_poll_interrupt,
+                                (uintptr_t)&irq_poll_timer);
+#endif
+		mod_timer(&irq_poll_timer.tmr,
+				jiffies + msecs_to_jiffies(irq_poll_interval_ms));
+	}
+
+	/* Try to calibrate the core if needed */
+        ret = vha_dev_calibrate(&ofdev->dev, FREQ_MEASURE_CYCLES);
+        if (ret) {
+                dev_err(&ofdev->dev, "%s: Failed to start clock calibration!\n", __func__);
+                goto out_irq;
+        }
+	return ret;
+
+out_irq:
+	vha_rm_dev(&ofdev->dev);
+out_add_dev:
+	devm_iounmap(&ofdev->dev, reg_addr);
+	return ret;
+}
+
+static int vha_plat_remove(struct platform_device *ofdev)
+{
+	vha_rm_dev(&ofdev->dev);
+
+	vha_plat_dt_hw_destroy(ofdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int vha_plat_suspend(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_suspend_dev(dev);
+	if (ret)
+		dev_err(dev, "failed to suspend the core!\n");
+	else {
+		ret = vha_plat_dt_hw_suspend(ofdev);
+		if (ret)
+			dev_err(dev, "failed to suspend platform-specific hw!\n");
+	}
+
+	return ret;
+}
+
+static int vha_plat_resume(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_plat_dt_hw_resume(ofdev);
+	if (ret)
+		dev_err(dev, "failed to resume platform-specific hw!\n");
+	else {
+		ret = vha_resume_dev(dev);
+		if (ret)
+			dev_err(dev, "failed to resume the core!\n");
+	}
+
+	return ret;
+}
+
+static int vha_plat_runtime_idle(struct device *dev)
+{
+	return 0;
+}
+
+static int vha_plat_runtime_suspend(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_plat_dt_hw_suspend(ofdev);
+	if (ret)
+		dev_err(dev, "failed to suspend platform-specific hw!\n");
+
+	return ret;
+}
+
+static int vha_plat_runtime_resume(struct device *dev)
+{
+	struct platform_device *ofdev =
+		container_of(dev, struct platform_device, dev);
+	int ret = 0;
+
+	ret = vha_plat_dt_hw_resume(ofdev);
+	if (ret)
+		dev_err(dev, "failed to resume platform-specific hw!\n");
+
+	return ret;
+}
+
+#endif
+
+static struct dev_pm_ops vha_pm_plat_ops = {
+	SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
+			vha_plat_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
+};
+
+static ssize_t info_show(struct device_driver *drv, char *buf)
+{
+	return sprintf(buf, "VHA DT driver version : " VERSION_STRING "\n");
+}
+
+static DRIVER_ATTR_RO(info);
+static struct attribute *drv_attrs[] = {
+	&driver_attr_info.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(drv);
+
+static struct platform_driver vha_plat_drv = {
+	.probe  = vha_plat_probe,
+	.remove = vha_plat_remove,
+	.driver = {
+		.name = "ax3xxx-nna",
+		.groups = drv_groups,
+		.owner = THIS_MODULE,
+		.of_match_table = vha_plat_dt_of_ids,
+		.pm = &vha_pm_plat_ops,
+	},
+};
+
+int vha_plat_init(void)
+{
+	int ret = 0;
+
+	struct heap_config *heap_configs;
+	int num_heaps;
+
+	vha_plat_dt_get_heaps(&heap_configs, &num_heaps);
+	ret = vha_init_plat_heaps(heap_configs, num_heaps);
+	if (ret) {
+		pr_err("failed to initialize global heaps\n");
+		return -ENOMEM;
+	}
+
+	ret = platform_driver_register(&vha_plat_drv);
+	if (ret) {
+		pr_err("failed to register VHA driver!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int vha_plat_deinit(void)
+{
+	int ret;
+
+	if (poll_interrupts) {
+		irq_poll_timer.enabled = false;
+		del_timer_sync(&irq_poll_timer.tmr);
+	}
+
+	/* Unregister the driver from the OS */
+	platform_driver_unregister(&vha_plat_drv);
+
+	ret = vha_deinit();
+	if (ret)
+		pr_err("VHA driver deinit failed\n");
+
+	return ret;
+}
+
+/*
+ * coding style for emacs
+ *
+ * Local variables:
+ * indent-tabs-mode: t
+ * tab-width: 8
+ * c-basic-offset: 8
+ * End:
+ */

+ 181 - 0
driver/vha/platform/vha_plat_thead_light.c

@@ -0,0 +1,181 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_thead_light_fpga_c910.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (C) 2020 Alibaba Group Holding Limited
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <img_mem_man.h>
+#include "vha_plat.h"
+#include "vha_plat_dt.h"
+
+const struct of_device_id vha_plat_dt_of_ids[] = {
+	{ .compatible = "img,ax3386-nna" },
+	//{ .compatible = VHA_PLAT_DT_OF_ID },
+	{ }
+};
+
+static struct heap_config example_heap_configs[] = {
+	{
+		.type = IMG_MEM_HEAP_TYPE_UNIFIED,
+		.options.unified = {
+			.gfp_type = GFP_KERNEL | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+		.to_dev_addr = NULL,
+	},
+};
+
+struct npu_plat_if {
+        struct clk *npu_pclk;
+        struct clk *npu_aclk;
+};
+static struct npu_plat_if *g_npi;
+
+/*
+ * IO hooks.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return readq((volatile void __iomem *)addr);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writeq(val, (volatile void __iomem *)addr);
+}
+
+int vha_plat_dt_hw_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+	uint64_t dma_mask;
+
+	dev_dbg(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+			 __func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	/* Try alternative dma_mask setting from device tree */
+	if (!of_property_read_u64(pdev->dev.of_node, "dma-mask",
+				(uint64_t *)&dma_mask)) {
+		dev_info(dev, "%s forcing custom mask from DT : %#llx\n",
+				__func__, dma_mask);
+	} else {
+		/* If alternative mask not defined in
+		 * DT -> "dma-mask" property, use the default one (32bit) */
+		dma_mask = dma_get_mask(dev);
+	}
+	ret = dma_set_mask(dev, dma_mask);
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		return ret;
+	}
+
+	/* get clock domain, voltage regulator, set clock rate, etc */
+	g_npi = devm_kzalloc(&pdev->dev, sizeof(*g_npi), GFP_KERNEL);
+	if (!g_npi)
+		return -ENOMEM;
+
+	g_npi->npu_pclk = devm_clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(g_npi->npu_pclk)) {
+		dev_warn(&pdev->dev, "failed to get npu pclk");
+		g_npi->npu_pclk == NULL;
+	}
+
+	g_npi->npu_aclk = devm_clk_get(&pdev->dev, "aclk");
+	if (IS_ERR(g_npi->npu_aclk)) {
+		dev_warn(&pdev->dev, "failed to get npu aclk");
+		g_npi->npu_aclk == NULL;
+	}
+
+	return 0;
+}
+
+/* return platform global heaps */
+void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps)
+{
+	*heap_configs = example_heap_configs;
+	*num_heaps = sizeof(example_heap_configs)/sizeof(struct heap_config);
+}
+
+static int vha_plat_dt_clk_prepare_enable(struct npu_plat_if *npi)
+{
+	int ret;
+
+	if (npi->npu_pclk) {
+		ret = clk_prepare_enable(npi->npu_pclk);
+		if (ret)
+			return ret;
+	}
+
+	if (npi->npu_aclk) {
+		ret = clk_prepare_enable(npi->npu_aclk);
+		if (ret) {
+			clk_disable_unprepare(npi->npu_pclk);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void vha_plat_dt_clk_disable_unprepare(struct npu_plat_if *npi)
+{
+	if (npi->npu_aclk)
+		clk_disable_unprepare(npi->npu_aclk);
+	if (npi->npu_pclk)
+		clk_disable_unprepare(npi->npu_pclk);
+}
+
+void vha_plat_dt_hw_destroy(struct platform_device *pdev)
+{
+	/* Put any vendor related code:
+	 * put clock domain, voltage regulator, etc */
+}
+
+int vha_plat_dt_hw_suspend(struct platform_device *pdev)
+{
+	vha_plat_dt_clk_disable_unprepare(g_npi);
+
+	return 0;
+}
+
+int vha_plat_dt_hw_resume(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = vha_plat_dt_clk_prepare_enable(g_npi);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable npu clock(%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+//MODULE_DEVICE_TABLE(of, vha_plat_dt_of_ids);

+ 122 - 0
driver/vha/platform/vha_plat_thead_light_fpga_c910.c

@@ -0,0 +1,122 @@
+/*!
+ *****************************************************************************
+ *
+ * @File       vha_plat_thead_light_fpga_c910.c
+ * ---------------------------------------------------------------------------
+ *
+ * Copyright (C) 2020 Alibaba Group Holding Limited
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include <img_mem_man.h>
+#include "vha_plat.h"
+#include "vha_plat_dt.h"
+
+const struct of_device_id vha_plat_dt_of_ids[] = {
+	{ .compatible = "img,ax3386-nna" },
+	//{ .compatible = VHA_PLAT_DT_OF_ID },
+	{ }
+};
+
+static struct heap_config example_heap_configs[] = {
+	{
+		.type = IMG_MEM_HEAP_TYPE_UNIFIED,
+		.options.unified = {
+			.gfp_type = GFP_KERNEL | __GFP_ZERO,
+		},
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_DMABUF,
+		.to_dev_addr = NULL,
+	},
+	{
+		.type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
+		.to_dev_addr = NULL,
+	},
+};
+/*
+ * IO hooks.
+ * NOTE: customer may want to use spinlock to avoid
+ * problems with multi threaded IO access
+ */
+uint64_t vha_plat_read64(void *addr)
+{
+	return readq((volatile void __iomem *)addr);
+}
+
+void vha_plat_write64(void *addr, uint64_t val)
+{
+	writeq(val, (volatile void __iomem *)addr);
+}
+
+int vha_plat_dt_hw_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+	uint64_t dma_mask;
+
+	dev_dbg(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
+	if (dev->dma_mask) {
+		dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
+			 __func__, dev->dma_mask, *dev->dma_mask);
+	} else {
+		dev_info(dev, "%s mask unset, setting coherent\n", __func__);
+		dev->dma_mask = &dev->coherent_dma_mask;
+	}
+
+	/* Try alternative dma_mask setting from device tree */
+	if (!of_property_read_u64(pdev->dev.of_node, "dma-mask",
+				(uint64_t *)&dma_mask)) {
+		dev_info(dev, "%s forcing custom mask from DT : %#llx\n",
+				__func__, dma_mask);
+	} else {
+		/* If alternative mask not defined in
+		 * DT -> "dma-mask" property, use the default one (32bit) */
+		dma_mask = dma_get_mask(dev);
+	}
+	ret = dma_set_mask(dev, dma_mask);
+	if (ret) {
+		dev_err(dev, "%s failed to set dma mask\n", __func__);
+		return ret;
+	}
+
+	/* Put any vendor related code:
+	 * get clock domain, voltage regulator, set clock rate, etc */
+	return 0;
+}
+
+/* return platform global heaps */
+void vha_plat_dt_get_heaps(struct heap_config **heap_configs, int *num_heaps)
+{
+	*heap_configs = example_heap_configs;
+	*num_heaps = sizeof(example_heap_configs)/sizeof(struct heap_config);
+}
+
+void vha_plat_dt_hw_destroy(struct platform_device *pdev)
+{
+	/* Put any vendor related code:
+	 * put clock domain, voltage regulator, etc */
+}
+
+int vha_plat_dt_hw_suspend(struct platform_device *pdev)
+{
+	/* This is the place where vendor specific code shall be called:
+	 * eg. turn off voltage regulator/disable power domain */
+	return 0;
+}
+
+int vha_plat_dt_hw_resume(struct platform_device *pdev)
+{
+	/* This is the place where vendor specific code shall be called:
+	 * eg. turn on voltage regulator/enable power domain */
+	return 0;
+}
+
+//MODULE_DEVICE_TABLE(of, vha_plat_dt_of_ids);

+ 750 - 0
driver/vha/single/vha_cnn.c

@@ -0,0 +1,750 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include <uapi/vha.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+
+static uint32_t cnn_pdump_poll_count = 10000000;
+module_param(cnn_pdump_poll_count, uint, 0444);
+MODULE_PARM_DESC(cnn_pdump_poll_count,
+		"PDUMP: Number of times to poll for CNN status");
+
+static bool cnn_preloads_disable;
+module_param(cnn_preloads_disable, bool, 0444);
+MODULE_PARM_DESC(cnn_preloads_disable,
+		"Disables CNN preloads");
+
+/*
+ * submit a command stream to the CNN hardware
+ * input buffers:
+ *   command
+ *   input
+ *   coeff
+ * output buffers:
+ *   output
+ *   accum_load
+ * data:
+ *   none
+ */
+static int do_cmd_cnn_submit(struct vha_cmd *cmd)
+{
+	int i;
+	uint32_t val32;
+	const struct vha_user_cnn_submit_cmd *user_cmd =
+		(struct vha_user_cnn_submit_cmd *)&cmd->user_cmd;
+	struct vha_session *session = cmd->session;
+	struct vha_dev *vha = session->vha;
+	struct vha_buffer *buf = NULL;
+	struct vha_onchip_map *onchip_map = NULL;
+	int ret = -EINVAL;
+	uint64_t alt_addrs_used = 0;
+	size_t user_cmd_size;
+
+	if (vha->hw_bypass) {
+		ret = -EAGAIN;
+		dev_info(vha->dev, "%s skip\n", __func__);
+		goto out_error;
+	}
+
+	img_pdump_printf("-- CNN_SETUP_BEGIN\n");
+	/* Wait for the previous kick to be accepted */
+	if (vha->low_latency != VHA_LL_DISABLED) {
+		/* Sanity wait for the kick bit to be deasserted */
+		IOPOLL64_PDUMP(0, 1000, 10, (uint64_t)VHA_CR_OS(CNN_CONTROL_START_EN),
+							VHA_CR_OS(CNN_CONTROL));
+		if (cmd->queued &&
+				vha->low_latency == VHA_LL_SW_KICK)
+			goto hw_kick;
+	}
+
+	if (vha->pendcmd[VHA_CNN_CMD].cmd != NULL &&
+				vha->low_latency == VHA_LL_DISABLED) {
+		dev_err(vha->dev, "%s: trying to submit cnn cmd when hw busy!\n",
+			__func__);
+		goto out_error;
+	}
+
+	user_cmd_size = sizeof(*user_cmd);
+	if (user_cmd->subseg_num > 0)
+		user_cmd_size += (user_cmd->subseg_num - 1) * sizeof(struct vha_subseg_info);
+	if (cmd->size != user_cmd_size) {
+		dev_err(vha->dev, "%s: command buffer wrong size: %zu/%zu",
+			__func__, cmd->size, sizeof(*user_cmd));
+		goto out_error;
+	}
+
+	if (!vha_dev_check_hw_capab(vha, user_cmd->expected_ip_capab)) {
+		ret = -ENODEV;
+		goto out_error;
+	}
+
+	/* at least CMD and (IN or OUT)*/
+	if (user_cmd->msg.num_inbufs < 2 ||
+		/* and maybe TMP and others */
+		user_cmd->msg.num_bufs > VHA_CORE_MAX_ALT_ADDRS) {
+		dev_err(vha->dev, "%s: wrong number of bufs: %u,%u\n",
+			__func__,
+			user_cmd->msg.num_inbufs, user_cmd->msg.num_bufs);
+		goto out_error;
+	}
+
+	if (user_cmd->onchipram_map_id != 0) {
+		onchip_map = idr_find(&session->onchip_maps, user_cmd->onchipram_map_id);
+		if (!onchip_map) {
+			dev_warn(vha->dev, "%s: idr_find failed\n", __func__);
+		}
+	}
+
+	/*
+	 * write buffer address to each register,
+	 * and pdump LDB each of the the input buffers
+	 */
+	img_pdump_printf("-- Load inputs\n");
+	for (i = 0; i < user_cmd->msg.num_bufs; i++) {
+		uint32_t offset;
+		uint32_t size;
+		uint32_t reg;
+
+		/* buffer id == 0 means no buffer */
+		if (user_cmd->msg.data[i] == 0)
+			continue;
+
+		buf = vha_find_bufid(session, user_cmd->msg.data[i]);
+		if (buf == NULL) {
+			dev_err(vha->dev, "%s: invalid buffer id:%d\n",
+				__func__, user_cmd->msg.data[i]);
+			goto out_error;
+		}
+		if (buf->id == user_cmd->cmdbuf) {
+			/* cmdstream always starts at offset 0 */
+			if (user_cmd->subseg_info[cmd->subseg_current].cmdbuf_size)
+				size = cmd->stream_size = user_cmd->subseg_info[cmd->subseg_current].cmdbuf_size;
+			else
+				size = cmd->stream_size = buf->size;
+
+			offset = user_cmd->subseg_info[cmd->subseg_current].cmdbuf_offset;
+			if (size == 0) {
+				dev_err(vha->dev,
+					"%s: invalid cmdstream size\n",
+					__func__);
+				goto out_error;
+			}
+			reg = VHA_CR_OS(CNN_CMD_BASE_ADDRESS);
+			img_pdump_printf("-- Setup command stream\n");
+		} else {
+			/*
+			 * offset can be specified for all
+			 * buffers except cmdstream buf
+			 */
+			offset = user_cmd->bufoffsets[i-1];
+			size = user_cmd->bufsizes[i-1];
+
+			if (size + offset > buf->size) {
+				dev_err(vha->dev,
+					"%s: invalid size+offset: %x+%x > %zx\n",
+					__func__, size, offset, buf->size);
+				goto out_error;
+			}
+
+			reg = VHA_CR_OS(CNN_ALT_ADDRESS0)
+				+ user_cmd->regidx[i-1]
+				* (VHA_CR_OS(CNN_ALT_ADDRESS1)
+				- VHA_CR_OS(CNN_ALT_ADDRESS0));
+			/* record what alt address is in use */
+			alt_addrs_used |= 1 << user_cmd->regidx[i-1];
+#if defined(HW_AX3)
+			/* Alternative addresses from 8 to 15 are
+			 * located in different place */
+			if (user_cmd->regidx[i-1] >= 8) {
+				reg = VHA_CR_OS(CNN_ALT_ADDRESS8)
+				+ (user_cmd->regidx[i-1] - 8)
+				* (VHA_CR_OS(CNN_ALT_ADDRESS1)
+				- VHA_CR_OS(CNN_ALT_ADDRESS0));
+			}
+			alt_addrs_used |= buf->req_type <<
+				(VHA_CR_OS(CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT) +
+				user_cmd->regidx[i-1]);
+#elif defined(HW_AX2)
+			if (user_cmd->regidx[i-1] > 8) {
+				dev_err(vha->dev,
+						"%s: extended alternative addresses not supported!\n",
+						__func__);
+				goto out_error;
+			}
+#endif
+		}
+		/* pdump the input buffers (not filled by the hw),
+		 * try to cache buffers filled by SW,
+		 * to avoid unnecessary LDBs */
+		if (i < user_cmd->msg.num_inbufs &&
+				!(buf->status == VHA_BUF_FILLED_BY_HW))
+			vha_pdump_ldb_buf(session, PDUMP_PRM,
+					buf, offset, size,
+					buf->status == VHA_BUF_FILLED_BY_SW);
+
+		vha_dump_digest(session, buf, cmd);
+		/*
+		 * write to all of the index registers.
+		 * in no-MMU mode, write phys address of a contig buffer.
+		 * in MMU mode, write virt address of buffer.
+		 * If onchip_map selected, use different virt address of buffer
+		 */
+		if (onchip_map != NULL && onchip_map->bufid == buf->id)
+			IOWRITE64_PDUMP(onchip_map->devvirt + offset, reg);
+		else
+			IOWRITE_PDUMP_BUFADDR(session, buf, offset, reg);
+
+		if (vha_buf_needs_flush(session, buf->id))
+			img_mem_sync_cpu_to_device(session->mem_ctx, buf->id);
+	}
+
+	if (!cnn_preloads_disable) {
+		/* Inform the hw what alt addresses are in use,
+		 * so the command decoder can prefetch */
+		img_pdump_printf("-- Setup CNN prefetch register\n");
+		IOWRITE64_PDUMP(alt_addrs_used, VHA_CR_OS(CNN_ALT_ADDRESS_USED));
+	}
+
+	/* write the stream size only */
+	ret = 0;
+	if (vha->pendcmd[VHA_CNN_CMD].cmd) {
+		vha->queuedcmd[VHA_CNN_CMD].cmd = cmd;
+		cmd->queued = true;
+		vha->stats.cnn_kicks_queued++;
+		img_pdump_printf("-- CNN already kicked queueing!\n");
+		dev_dbg(vha->dev, "%s: -> kicked:%p queueing:%p\n",
+					__func__, vha->pendcmd[VHA_CNN_CMD].cmd, cmd);
+		if (vha->low_latency == VHA_LL_SW_KICK)
+			return ret;
+	}
+hw_kick:
+	/* Change mmu context */
+	ret = vha_mmu_setup(cmd->session);
+	if (ret) {
+		dev_err(vha->dev,
+			"%s: Error during MMU setup!\n", __func__);
+			goto out_error;
+	}
+	/* Setup memory stuff */
+	vha_dev_mh_setup(vha, session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id, NULL);
+
+	/* Prepare debug buffer registers */
+	vha_dbg_prepare_hwbufs(session, cmd, NULL);
+
+	/* Setup cnn hw watchdog before kicking the hw */
+	{
+		uint64_t cycles, mode;
+
+		ret = vha_dev_hwwdt_calculate(vha, cmd, &cycles, &mode);
+		if (!ret)
+			vha_dev_hwwdt_setup(session->vha, cycles, mode);
+		else if (ret != -EIO) {
+			dev_err(vha->dev,
+				"%s: can't obtain HWWDT info!\n",
+				__func__);
+				goto out_error;
+		}
+	}
+
+	if (CMD_EXEC_ON_HW(cmd)) {
+		cmd->in_hw = true;
+		if (!cmd->queued)
+			vha->pendcmd[VHA_CNN_CMD].cmd = cmd;
+	}
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+	/* Mark kick for dummy driver */
+	cmd->dummy_kicked = true;
+#endif
+
+	/* Consider this cmd as kicked. */
+	vha->pri_q_counters[cmd->user_cmd.priority]--;
+	cmd->subseg_current++;
+
+	ret = 0;
+	/* Setup kick info */
+	val32 = vha_dev_kick_prepare(vha, cmd,
+			session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id);
+
+	img_pdump_printf("-- CNN_SETUP_END\n");
+
+	/* Remember the time cnn is kicked */
+	GETNSTIMEOFDAY(&cmd->hw_proc_start);
+	vha->stats.hw_proc_start = cmd->hw_proc_start;
+	/* Need to generate proper pdump */
+	if (cmd->queued &&
+			vha->low_latency == VHA_LL_SW_KICK) {
+		/* Do not write to pdump
+		 * this needs to be done after irq POL*/
+		IOWRITE64(vha->reg_base, VHA_CR_OS(CNN_CONTROL), val32);
+		dev_dbg(vha->dev, "%s: CNN kick queued (%p)!\n",
+					__func__, cmd);
+		cmd->queued = false;
+	} else {
+		img_pdump_printf("-- CNN_KICK_BEGIN\n");
+		img_pdump_printf("-- CNN kick!\n");
+		IOWRITE64_PDUMP(val32, VHA_CR_OS(CNN_CONTROL));
+		dev_dbg(vha->dev, "%s: CNN kick %s (%p)!\n",
+					__func__, cmd->queued ? "queued" : "", cmd);
+		img_pdump_printf("-- CNN_KICK_END\n");
+	}
+
+	vha->stats.cnn_kicks++;
+
+	/* notify any observers of the submit event */
+	if (vha_observers.submitted)
+		vha_observers.submitted(vha->id, session->id, cmd->user_cmd.cmd_id,
+								(cmd->subseg_current == VHA_CMD_SUBSEG_NUM(cmd)),
+								cmd->user_cmd.priority);
+
+out_error:
+	if (ret != 0) {
+		/* Consider this cmd as kicked for errors too. */
+		vha->pri_q_counters[cmd->user_cmd.priority]--;
+		cmd->subseg_current++;
+	}
+	return ret;
+}
+
+/*
+ * append a string to the pdump TXT file
+ * buffers:
+ *   none
+ * data:
+ *   string to be printed
+ */
+static int do_cmd_cnn_pdump_msg(const struct vha_cmd *cmd)
+{
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+	struct vha_session *session = cmd->session;
+	struct vha_dev* vha = session->vha;
+	int ret = 0;
+
+	if (user_cmd->num_inbufs != 0 || user_cmd->num_bufs != 0) {
+		dev_err(session->vha->dev, ">0 buffers in cmd is wrong\n");
+		ret = -EINVAL;
+	}
+	/* remember the pdump message may not be null terminated */
+	img_pdump_printf("%.*s\n", (int)cmd->size, (char *)user_cmd->data);
+	return ret;
+}
+
+/*
+ * Simple procedure that generates watchdog interrupt
+ */
+void vha_cnn_start_calib(struct vha_dev *vha)
+{
+	uint64_t clk;
+	uint32_t start;
+
+	/* Setup hw watchdog before kicking the hw */
+	vha_dev_hwwdt_setup(vha, vha->calibration_cycles, 0);
+
+	/* Disabling command decoder, so we can generate wdt interrupt,
+	 * without providing any buffer address */
+	clk = IOREAD64(vha->reg_base, VHA_CR_CLK_CTRL0);
+	VHA_CR_CLEARBITS(clk, CLK_CTRL0, CNN_CMD);
+	IOWRITE64(vha->reg_base, VHA_CR_CLK_CTRL0, clk);
+
+	/* To be sure the cmd clock has switched off*/
+	udelay(100);
+
+	/* Enable MMU bypass */
+	IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
+		VHA_CR_OS(MMU_CTRL));
+
+	/* Set minimal command stream size */
+	start = (2048/32-1) << VHA_CR_OS(CNN_CONTROL_CMD_SIZE_MIN1_SHIFT);
+	start |= VHA_CR_OS(CNN_CONTROL_START_EN);
+	/* write the START bit */
+	IOWRITE64(vha->reg_base, VHA_CR_OS(CNN_CONTROL), start);
+	/* Remember the time cnn is kicked */
+	GETNSTIMEOFDAY(&vha->stats.hw_proc_start);
+}
+
+void vha_cnn_update_stats(struct vha_dev *vha)
+{
+	vha->stats.cnn_last_proc_us =
+		vha->stats.last_proc_us;
+	vha->stats.cnn_total_proc_us +=
+		vha->stats.last_proc_us;
+
+	if (vha->stats.cnn_kicks) {
+		uint64_t avg = vha->stats.cnn_total_proc_us;
+		do_div(avg, vha->stats.cnn_kicks);
+		vha->stats.cnn_avg_proc_us = avg;
+	}
+#if defined(HW_AX2)
+	vha->stats.cnn_last_cycles =
+			IOREAD64(vha->reg_base, VHA_CR_CNN_WDT_TIMER);
+#elif defined(HW_AX3)
+	vha->stats.cnn_last_cycles =
+			IOREAD64(vha->reg_base, VHA_CR_OS(CNN_PERFORMANCE));
+#endif
+	if (vha->stats.cnn_last_cycles && vha->freq_khz) {
+		uint64_t est_proc_us = 1000UL * vha->stats.cnn_last_cycles;
+		do_div(est_proc_us, vha->freq_khz);
+		vha->stats.cnn_last_est_proc_us = est_proc_us;
+	}
+	vha->stats.cnn_total_cycles += vha->stats.cnn_last_cycles;
+	if (vha->stats.cnn_kicks &&
+			vha->stats.cnn_total_cycles && vha->freq_khz) {
+		uint64_t avg = 1000UL * vha->stats.cnn_total_cycles;
+		do_div(avg, vha->stats.cnn_kicks);
+		do_div(avg, vha->freq_khz);
+		vha->stats.cnn_avg_est_proc_us = avg;
+	}
+}
+
+/*
+ * a command has completed. sent notification to user
+ */
+void vha_cnn_cmd_completed(struct vha_cmd *cmd, int status)
+{
+	struct vha_session *session = cmd->session;
+	struct vha_dev* vha = session->vha;
+	struct vha_rsp *rsp = NULL;
+	int i;
+	struct vha_user_cnn_submit_rsp * cnn_submit_rsp = NULL;
+
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+
+	switch (user_cmd->cmd_type) {
+	case VHA_CMD_CNN_SUBMIT:
+	{
+		size_t mem_usage;
+		/* allocate sufficient space for the response */
+		size_t sz = sizeof(*rsp)
+			+ sizeof(struct vha_user_cnn_submit_rsp)
+			- sizeof(struct vha_user_rsp);
+		uint32_t status_mask;
+		uint32_t ready_mask;
+		uint32_t cmpl_val = VHA_CR_OS(VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_EN);
+#if defined(HW_AX2)
+		/* status change: wait for any status change:
+		 * WDT, MMU_PF, ERROR, COMPLETE
+		 */
+		status_mask = 0xffffffff;
+		ready_mask = 0xffffffff;
+#elif defined(HW_AX3)
+		/* status mask: wait for a status change: either ERROR, COMPLETE:
+		 * note that, unlike the live driver, pdump will ignore the MMU_PF,
+		 * which will have to be detected by the WDT
+		 */
+		status_mask = VHA_CR_OS(VHA_EVENT_STATUS_VHA_ERROR_CLRMSK)
+				| VHA_CR_OS(VHA_EVENT_STATUS_VHA_CNN0_COMPLETE_CLRMSK);
+		ready_mask = VHA_CR_OS(VHA_EVENT_STATUS_VHA_READY_CLRMSK);
+
+		/* Ignore PARITY when waiting for status change */
+		status_mask &= VHA_CR_OS(VHA_EVENT_STATUS_PARITY_CLRMSK);
+#ifdef VHA_SCF
+		if (session->vha->hw_props.supported.parity &&
+				!session->vha->parity_disable) {
+			/* If complete bit is set then parity bit must be set as well ! */
+			cmpl_val |= VHA_CR_OS(VHA_EVENT_STATUS_PARITY_EN);
+		}
+#else
+		/* Ignore PARITY, so that non-SCF pdump may work with SC CSIM */
+		ready_mask &= VHA_CR_OS(VHA_EVENT_STATUS_PARITY_CLRMSK);
+#endif
+#endif
+		rsp = kzalloc(sz, GFP_KERNEL);
+		if (rsp == NULL) {
+			session->oom = true;
+			return;
+		}
+
+		cnn_submit_rsp = (struct vha_user_cnn_submit_rsp*)&rsp->user_rsp;
+		rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
+
+		if (session->vha->hw_bypass) {
+			session->vha->hw_bypass--;
+			break;
+		}
+
+		img_pdump_printf("-- CNN_WAIT_BEGIN\n");
+		/* pdump POL for status change
+		 * count=cnn_pdump_poll_count, delay=1000cycles
+		 */
+		img_pdump_printf("-- Wait for any CNN status\n"
+				"POL :REG:%#x 0 %#x 3 %u 1000\n",
+				VHA_CR_OS(VHA_EVENT_STATUS),
+				status_mask,
+				cnn_pdump_poll_count);
+
+		/* quick pdump POL for the status complete flag only:
+		 * count=1, delay=10cycles
+		 */
+		img_pdump_printf("-- Check for CNN_COMPLETE flag only\n"
+				"POL :REG:%#x %#x 0x%x 0 1 10\n",
+				VHA_CR_OS(VHA_EVENT_STATUS),
+				cmpl_val,
+				ready_mask);
+#ifdef VHA_SCF
+		if (session->vha->hw_props.supported.parity &&
+				!session->vha->parity_disable) {
+			/* Check CNN_STATUS parity */
+			uint32_t cnn_status = VHA_CR_SETBITS_OS(CNN_STATUS,
+					STREAM_COUNT, 1);
+			cnn_status |= VHA_CR_SETBITS_OS(CNN_STATUS,
+					PARITY, 1);
+			img_pdump_printf("-- Check for CNN_STATUS parity\n"
+					"POL :REG:%#x %#x 0xffffffff 0 1 10\n",
+					VHA_CR_OS(CNN_STATUS), cnn_status);
+		}
+#endif
+		/* quick pdump POL for AXI errors:
+		 * count=1, delay=10cycles
+		 */
+		img_pdump_printf("-- Post check of AXI status\n"
+				"POL :REG:%#x 0 0xffffffff 0 1 10\n",
+				VHA_CR_ACE_STATUS);
+
+		/* We do clear interrupts in the irq handler,
+		 * but this is not recorded into pdump because
+		 * of the irq context, so do it here */
+		img_pdump_printf("-- Clear CNN events\n"
+				"WRW64 :REG:%#x %#x\n",
+				VHA_CR_OS(VHA_EVENT_CLEAR),
+				VHA_CR_OS(VHA_EVENT_CLEAR_VHA_CNN0_COMPLETE_EN) |
+				VHA_CNN_ERR_EVNTS);
+
+		/* Try to flush hw debug buffers first
+		 * - this does pdump SAB when proper checkpoint is set */
+		vha_dbg_flush_hwbufs(session, 1, 0);
+
+		/* pdump SAB for each of the output buffers */
+		img_pdump_printf("-- Save outputs\n");
+		for (i = user_cmd->num_inbufs; i < user_cmd->num_bufs; i++) {
+			struct vha_buffer *buf;
+			struct vha_user_cnn_submit_cmd *msg =
+				container_of(user_cmd,
+						struct vha_user_cnn_submit_cmd,
+						msg);
+			uint32_t offset;
+			uint32_t size;
+
+			buf = vha_find_bufid(session, user_cmd->data[i]);
+			if (buf == NULL) {
+				dev_err(session->vha->dev,
+						"%s: invalid buffer id:%d\n",
+						__func__, user_cmd->data[i]);
+				continue;
+			}
+			if (buf->id == msg->cmdbuf) {
+				offset = 0;
+				size = buf->size;
+			} else {
+				offset = msg->bufoffsets[i-1];
+				size = msg->bufsizes[i-1];
+			}
+
+			vha_pdump_sab_buf(session, PDUMP_RES,
+					buf, offset, size);
+
+			/* Update status, do not signal fence yet,
+			 * it's is done explicitly below, after cache invalidation */
+			vha_set_buf_status(session, buf->id, VHA_BUF_FILLED_BY_HW,
+					VHA_SYNC_NONE, false);
+
+			if (vha_buf_needs_inval(session, buf->id) && !status)
+				img_mem_sync_device_to_cpu(session->mem_ctx, buf->id);
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+			img_mem_signal_fence(session->mem_ctx, buf->id);
+#endif
+			vha_dump_digest(session, buf, cmd);
+		}
+
+		if (session->vha->low_latency == VHA_LL_SW_KICK) {
+			struct vha_cmd *cmd =
+				session->vha->queuedcmd[VHA_CNN_CMD].cmd;
+
+			if (cmd && cmd->queued) {
+				/* Setup kick info */
+				uint64_t val = vha_dev_kick_prepare(session->vha, cmd,
+						session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id);
+				img_pdump_printf("-- CNN kick (queued)!\n");
+				img_pdump_printf("WRW64 :REG:%#x %#llx\n",
+					VHA_CR_OS(CNN_CONTROL), val);
+			}
+		}
+		img_pdump_printf("-- CNN_WAIT_END\n");
+
+		img_mem_get_usage(session->mem_ctx, NULL, &mem_usage);
+		/* send out an event when submit is complete */
+		if (vha_observers.completed)
+			vha_observers.completed(
+				session->vha->id,
+				session->id,
+				user_cmd->cmd_id,
+				status,
+				session->vha->stats.cnn_last_cycles,
+				mem_usage,
+				user_cmd->priority);
+
+		/* post some metrics about the hw to user space */
+#ifdef MEM_USAGE_LAST_METRICS_ARE_AVAILABLE
+		cnn_submit_rsp->mem_usage = mem_usage;
+#else
+		cnn_submit_rsp->mem_usage = ~0;
+#endif
+		cnn_submit_rsp->last_proc_us = cmd->proc_us;
+		cnn_submit_rsp->hw_cycles = cmd->hw_cycles;
+		dev_dbg(session->vha->dev, "%s: %p, hw_cycles %llx\n", __func__,
+				cmd, session->vha->stats.cnn_last_cycles);
+
+		if (session->vha->stats.cnn_last_cycles > (uint32_t)~0)
+			dev_warn(session->vha->dev,
+				"%s: hw_cycles %llx exceeds 32bit limit\n",
+				__func__,
+				session->vha->stats.cnn_last_cycles);
+		break;
+	}
+	case VHA_CMD_CNN_PDUMP_MSG:
+	default:
+		/* allocate space for standard response */
+		rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+		if (rsp == NULL) {
+			session->oom = true;
+			return;
+		}
+		rsp->size = sizeof(rsp->user_rsp);
+		break;
+	}
+
+	if (user_cmd->flags & VHA_CMDFLAG_NOTIFY) {
+		rsp->user_rsp.cmd_id = cmd->user_cmd.cmd_id;
+		rsp->user_rsp.err_no = session->vha->hw_bypass ? 0 : status;
+
+		cmd->rsp = rsp;
+	} else
+		kfree(rsp);
+}
+
+/*
+ * Perform a command, as requested by user.
+ * note: this function is called with vha_dev.lock == locked
+ */
+int vha_do_cnn_cmd(struct vha_cmd *cmd)
+{
+	struct vha_session *session = cmd->session;
+	const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
+	int status = -EINVAL;
+
+	dev_dbg(session->vha->dev,
+		"CNN command: id:%x type:%x nin:%x nbufs:%x\n",
+		user_cmd->cmd_id, user_cmd->cmd_type,
+		user_cmd->num_inbufs, user_cmd->num_bufs);
+#if 0
+	print_hex_dump_debug("VHA CMD: ", DUMP_PREFIX_NONE, 4, 4,
+				user_cmd, ALIGN(cmd->size, 4), false);
+#endif
+
+	switch (user_cmd->cmd_type) {
+	case VHA_CMD_CNN_SUBMIT:
+		status = do_cmd_cnn_submit(cmd);
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+		if (cmd->dummy_kicked) {
+			struct vha_dev *vha = cmd->session->vha;
+			const struct vha_user_cnn_submit_cmd *cnn_user_cmd =
+				(struct vha_user_cnn_submit_cmd *)&cmd->user_cmd;
+			uint32_t estimated_cycles = cnn_user_cmd->estimated_cycles;
+			if (estimated_cycles == 0)
+				estimated_cycles = VHA_DUMMY_HW_PROCESSING_TIME_CYCLES;
+			cmd->dummy_exec_time = (estimated_cycles / (vha->freq_khz / 1000));
+			schedule_delayed_work(&vha->dummy_dwork,
+														usecs_to_jiffies(cmd->dummy_exec_time));
+			cmd->dummy_kicked = false;
+		}
+#endif
+		break;
+	case VHA_CMD_CNN_PDUMP_MSG:
+		status = do_cmd_cnn_pdump_msg(cmd);
+	default:
+		break;
+	}
+
+	/*
+	 * Immediately send notification to user if not using hw at all
+	 * or submitting failed.
+	 */
+	if (!CMD_EXEC_ON_HW(cmd) || status) {
+		vha_cnn_cmd_completed(cmd, status);
+		vha_cmd_notify(cmd);
+		return 1;
+	}
+
+	return 0;
+}
+
+void vha_cnn_dump_status(struct vha_dev *vha)
+{
+	struct device *dev = vha->dev;
+
+	dev_err(dev, " CNN_STATUS:%llx ",
+		IOREAD64(vha->reg_base,
+			VHA_CR_OS(CNN_STATUS)));
+#ifdef HW_AX2
+	dev_err(dev, " CNN_WDT_COMPAREMATCH:%llx ",
+		IOREAD64(vha->reg_base,
+			VHA_CR_CNN_WDT_COMPAREMATCH));
+	dev_err(dev, " CNN_WDT_TIMER:%llx ",
+		IOREAD64(vha->reg_base,
+			VHA_CR_CNN_WDT_TIMER));
+#endif
+	dev_err(dev, " CNN_MEM_WDT_COMPAREMATCH:%llx ",
+		IOREAD64(vha->reg_base,
+			VHA_CR_CNN_MEM_WDT_COMPAREMATCH));
+	dev_err(dev, " CNN_MEM_WDT_TIMER:%llx ",
+		IOREAD64(vha->reg_base,
+			VHA_CR_CNN_MEM_WDT_TIMER));
+	dev_err(dev, " BIF_OUTSTANDING_READ:%llx\n",
+		IOREAD64(vha->reg_base,
+			VHA_CR_BIF_OUTSTANDING_READ));
+}

+ 1581 - 0
driver/vha/single/vha_dev.c

@@ -0,0 +1,1581 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <uapi/vha.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+
+#if defined(CFG_SYS_VAGUS)
+#include <hwdefs/nn_sys_cr_vagus.h>
+#endif
+
+#define ERR_EVENT_DESC(b) VHA_CR_OS(VHA_EVENT_STATUS_VHA_##b##_EN), __stringify(b)
+
+static void vha_dev_disable_events(struct vha_dev *vha)
+{
+	img_pdump_printf("-- Clear CNN events\n");
+	IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_CLEAR));
+	img_pdump_printf("-- Disable CNN events\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(VHA_EVENT_ENABLE));
+	/* Clear the START bit !
+	 * Note: It is stated that writing 0 to this bit has no effect,
+	 * however in error cases, some hw blocks may start
+	 * to process previous requests after turning on the clocks
+	 * which was previously disabled */
+	IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_CONTROL));
+
+	/* Disable core events */
+	img_pdump_printf("-- Disable CORE events\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(VHA_EVENT_ENABLE));
+}
+
+__maybe_unused
+static void vha_dev_enable_clocks(struct vha_dev *vha)
+{
+	uint64_t __maybe_unused sys_clks = 0;
+	uint64_t __maybe_unused main_clks = 0;
+
+	/* Always AUTO gating  when needed */
+	sys_clks = VHA_SYS_CLOCKS_DEFAULT(AUTO);
+	main_clks = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
+	/* Enable sys clocks ! */
+	img_pdump_printf("-- Enable SYS clocks\n");
+	IOWRITE64_PDUMP(sys_clks, VHA_CR_SYS_CLK_CTRL0);
+	/* Enable main clocks ! */
+	img_pdump_printf("-- Enable MAIN clocks\n");
+	IOWRITE64_PDUMP(main_clks, VHA_CR_CLK_CTRL0);
+#if defined(CFG_SYS_VAGUS)
+	img_pdump_printf("-- Enable NN_SYS clocks\n");
+	IOWRITE64_PDUMP_REGIO(NN_SYS_CR_CLK_CTRL_MODE_AUTO,
+			NN_SYS_CR_BASE, NN_SYS_CR_CLK_CTRL, "REG_NNSYS");
+#endif
+}
+
+static void vha_dev_ready(struct vha_dev *vha)
+{
+#ifndef CONFIG_VHA_DUMMY
+	if (!vha->is_ready)
+		return;
+#endif
+	dev_dbg(vha->dev, "%s\n", __func__);
+
+	vha_dev_wait(vha);
+
+	/* Finally enable ALL events */
+	img_pdump_printf("-- Enable ALL events\n");
+	IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_ENABLE));
+	img_pdump_printf("-- Clear ALL events\n");
+	IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_CLEAR));
+#ifdef HW_AX2
+	img_pdump_printf("-- Clear CNN status\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_STATUS));
+#endif
+	img_pdump_printf("-- Clear MMU fault status\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(MMU_FAULT_STATUS1));
+	img_pdump_printf("-- Clear SLC debug status\n");
+	IOWRITE64_PDUMP(0, VHA_CR_SLC_STATUS_DEBUG);
+	img_pdump_printf("-- Reset PERF counters\n");
+	IOWRITE64_PDUMP(0, VHA_CR_PERF_RESET_FULL);
+}
+
+__maybe_unused
+static int vha_dev_reset(struct vha_dev *vha)
+{
+	img_pdump_printf("-- Set RESET bits\n");
+#if defined(CFG_SYS_VAGUS)
+	IOWRITE64_PDUMP_REGIO(NN_SYS_CR_RESET_CTRL_NN_SYS_EN,
+			NN_SYS_CR_BASE, NN_SYS_CR_RESET_CTRL, "REG_NNSYS");
+#endif
+	/* Perform reset procedure */
+	IOWRITE64_PDUMP(VHA_RESET_DEFAULT, VHA_CR_RESET_CTRL);
+
+	/* poll for reset deassertion
+	 * count=16, delay=256cycles
+	 */
+	img_pdump_printf("-- Wait for RESET deassertion\n");
+#if defined(CFG_SYS_VAGUS)
+	IOPOLL64_PDUMP_REGIO(0, 16, 256, NN_SYS_CR_RESET_CTRL_MASKFULL,
+			NN_SYS_CR_BASE, NN_SYS_CR_RESET_CTRL, "REG_NNSYS");
+#endif
+	IOPOLL64_PDUMP(0, 16, 256, VHA_CR_RESET_CTRL_MASKFULL,
+					VHA_CR_RESET_CTRL);
+	return 0;
+}
+
+__maybe_unused
+static int vha_dev_disable_clocks(struct vha_dev *vha)
+{
+	/* If auto gating was turned on, wait for clocks idle state */
+	img_pdump_printf("-- Wait for clocks IDLE state\n");
+	IOPOLL64_PDUMP(0, 1000, 1000,
+			VHA_CR_CLK_STATUS0_MASKFULL,
+			VHA_CR_CLK_STATUS0);
+#if defined(CFG_SYS_VAGUS)
+	IOPOLL64_PDUMP_REGIO(0, 100, 1000, NN_SYS_CR_CLK_STATUS_MASKFULL,
+			NN_SYS_CR_BASE, NN_SYS_CR_CLK_STATUS, "REG_NNSYS");
+#endif
+	/* Wait for MMU,CCM,RDI,XBAR  IDLE state */
+	img_pdump_printf("-- Wait for memory bus interface IDLE state\n");
+	IOPOLL64_PDUMP(0xFFFF, 100, 1000, VHA_CR_SLC_IDLE_MASKFULL,
+			VHA_CR_SLC_IDLE);
+
+	/* Finally disable clocks */
+	img_pdump_printf("-- Disable MAIN clocks\n");
+	IOWRITE64_PDUMP(0, VHA_CR_CLK_CTRL0); /* main */
+	img_pdump_printf("-- Disable SYS clocks\n");
+	IOWRITE64_PDUMP(0, VHA_CR_SYS_CLK_CTRL0); /* sys */
+#if defined(CFG_SYS_VAGUS)
+	img_pdump_printf("-- NN_SYS clocks\n");
+	IOWRITE64_PDUMP_REGIO(0, NN_SYS_CR_BASE,
+			NN_SYS_CR_CLK_CTRL, "REG_NNSYS"); /* nn_sys */
+#endif
+	return 0;
+}
+
+/* start the device */
+int vha_dev_start(struct vha_dev *vha)
+{
+	int ret = 0;
+
+	/* Cancel APM request if new inference comes */
+	cancel_delayed_work(&vha->apm_dworks[0].dwork);
+
+	if (vha->state == VHA_STATE_ON)
+		return 0; /* not an error */
+
+	dev_dbg(vha->dev, "%s\n", __func__);
+
+/* Assuming OS0 is the privileged one */
+#if _OSID_ == 0 /* For HW_AX2 this is always true */
+	pm_runtime_get_sync(vha->dev);
+	/////////////// POWER ON //////////////////////////
+	img_pdump_printf("-- POWER_ON_BEGIN\n");
+	
+	/* Prepare device ...  */
+	ret = vha_dev_prepare(vha);
+	if (ret) {
+		dev_err(vha->dev, "%s: Error preparing device!\n", __func__);
+		return ret;
+	}
+	/* Reset device */
+	ret = vha_dev_reset(vha);
+	if (ret){
+		dev_err(vha->dev, "%s: Error reseting device!\n", __func__);
+		return ret;
+	}
+	/* Enable device clocks */
+	vha_dev_enable_clocks(vha);
+	img_pdump_printf("-- POWER_ON_END\n");
+	/* Call device specific setup */
+	vha_dev_setup(vha);
+	/////////////////////////////////////////////////////
+#endif
+
+	vha_dev_ready(vha);
+
+	vha->state = VHA_STATE_ON;
+	/* Remember the time hw is powered on */
+	GETNSTIMEOFDAY(&vha->stats.hw_start);
+	return ret;
+}
+
+/* stop the device */
+int vha_dev_stop(struct vha_dev *vha, bool reset)
+{
+	int ret = 0;
+
+	if (vha->state == VHA_STATE_OFF)
+		return 0;  /* not an error */
+
+	/* Cancel APM request if we are about to power off the core */
+	cancel_delayed_work(&vha->apm_dworks[0].dwork);
+
+	dev_dbg(vha->dev, "%s\n", __func__);
+	/* Disable events at first */
+	vha_dev_disable_events(vha);
+
+	vha->is_ready = false;
+/* Assuming OS0 is the privileged one */
+#if _OSID_ == 0 /* For HW_AX2 */
+	/////////////// POWER_OFF //////////////////////////
+	img_pdump_printf("-- POWER_OFF_BEGIN\n");
+	/* Reset core in case of error or pending inference */
+	if (reset) {
+		/* ensure that clocks are set to AUTO before reset */
+		vha_dev_enable_clocks(vha);
+		ret = vha_dev_reset(vha);
+	}
+	if(ret)
+		dev_warn(vha->dev,
+			"%s: Problem with resetting device!\n",
+			__func__);
+
+	/* Disable device clocks */
+	ret = vha_dev_disable_clocks(vha);
+	if(ret)
+		dev_warn(vha->dev,
+					"%s: Problem with disabling clocks!\n",
+					__func__);
+
+	img_pdump_printf("-- POWER_OFF_END\n");
+	/////////////////////////////////////////////////////
+	if (reset) {
+		pm_runtime_mark_last_busy(vha->dev);
+		pm_runtime_put_sync_autosuspend(vha->dev);
+	} else {
+		pm_runtime_put_sync(vha->dev);
+	}
+#endif
+
+	vha->state = VHA_STATE_OFF;
+	/* Update the up time of the core */
+	if (!vha->do_calibration) {
+		uint64_t tmp = 0;
+		struct TIMESPEC now;
+		GETNSTIMEOFDAY(&now);
+		if (get_timespan_us(&vha->stats.hw_start, &now, &tmp)) {
+			do_div(tmp, 1000UL);
+			vha->stats.uptime_ms += tmp;
+			if (vha->stats.uptime_ms)
+				vha_update_utilization(vha);
+			else
+				dev_dbg(vha->dev,
+					"%s Too short execution time to calculate utilization!\n",
+					__func__);
+		} else
+			WARN_ON(1);
+	}
+
+	vha->active_mmu_ctx = VHA_INVALID_ID;
+
+	spin_lock_irq(&vha->irq_lock);
+	vha->irq_status = 0;
+	vha->irq_count = 0;
+	vha->stream_count = 0;
+	spin_unlock_irq(&vha->irq_lock);
+
+	return ret;
+}
+
+void vha_update_utilization(struct vha_dev *vha)
+{
+	uint64_t tmp;
+	tmp = vha->stats.cnn_total_proc_us;
+	do_div(tmp, vha->stats.uptime_ms);
+	vha->stats.cnn_utilization = tmp;
+}
+
+#ifdef VHA_EVENT_INJECT
+/*
+ * Inject EVENT_STATUS bits, requested by respective debugfs nodes, to
+ * the status register.
+ */
+static inline void __inject_event_regs(struct vha_dev* vha, uint64_t* event_status)
+{
+	if(!__EVENT_INJECT())
+		return;
+
+	if (*event_status & (1 << VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_COMPLETE_SHIFT))
+		*event_status |= vha->injection.vha_cr_event;
+}
+#endif
+
+/* Top half */
+irqreturn_t vha_handle_irq(struct device *dev)
+{
+	struct vha_dev *vha = vha_dev_get_drvdata(dev);
+	int ret = IRQ_HANDLED;
+	uint64_t event_status;
+
+	if (!vha)
+		return IRQ_NONE;
+
+	event_status = IOREAD64(vha->reg_base, VHA_CR_OS(VHA_EVENT_STATUS));
+	event_status &= IOREAD64(vha->reg_base, VHA_CR_OS(VHA_EVENT_ENABLE));
+	/* On fpga platform it is possible to get
+	 * a spurious interrupt when the hw died
+	 * Do not proceed, just throw a warning */
+	if (event_status == VHA_DEAD_HW || event_status == ~0) {
+		WARN_ONCE(1, "Hardware is dead!");
+		return IRQ_NONE;
+	}
+
+#ifdef VHA_EVENT_INJECT
+	__inject_event_regs(vha, &event_status);
+#endif
+
+#ifdef VHA_SCF
+	if (vha->hw_props.supported.parity &&
+			!vha->parity_disable) {
+		bool par_bit = img_mem_calc_parity(event_status &
+				~VHA_CR_BITMASK(VHA_EVENT_STATUS_TYPE, PARITY));
+		if (par_bit !=
+				VHA_CR_GETBITS(VHA_EVENT_STATUS_TYPE, PARITY,
+						event_status)) {
+			dev_err(dev, "Event status register parity error!\n");
+			/* Use the real event to indicate the error */
+			event_status |=  VHA_CR_OS(VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN);
+		}
+		/* Clear the PARITY bit - it's not a valid event */
+		VHA_CR_CLEARBITS(event_status, VHA_EVENT_STATUS_TYPE, PARITY);
+	}
+#endif
+
+	if (event_status & VHA_EVNTS_DEFAULT) {
+		uint64_t cnn_status;
+		uint8_t count;
+
+		/* clear the interrupt:
+		 * best not to write pdump in interrupts */
+		IOWRITE64(vha->reg_base, VHA_CR_OS(VHA_EVENT_CLEAR),
+				event_status & VHA_EVNTS_DEFAULT);
+
+		/* Read the stream count as single IRQ may be raised for multiple kicks */
+		cnn_status = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_STATUS));
+
+#ifdef VHA_SCF
+		if (vha->hw_props.supported.parity &&
+				!vha->parity_disable) {
+			bool par_bit = img_mem_calc_parity(cnn_status &
+					~VHA_CR_BITMASK_OS(CNN_STATUS, PARITY));
+			if (par_bit != VHA_CR_GETBITS_OS(CNN_STATUS, PARITY, cnn_status)) {
+				dev_err(dev, "CNN status register parity error!\n");
+				/* Use the real event to indicate the error */
+				event_status |=  VHA_CR_OS(VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN);
+			}
+		}
+#endif
+		if (vha->is_ready) {
+			/* Post check for AXI bus errors */
+			uint64_t ace_status = IOREAD64(vha->reg_base, VHA_CR_ACE_STATUS);
+			if (ace_status) {
+				dev_err(vha->dev, "AXI bus protocol error: %#llx\n",
+							ace_status);
+				/* Use AXI error event to indicate that */
+				event_status |=  VHA_CR_OS(VHA_EVENT_STATUS_VHA_AXI_ERROR_EN);
+			}
+		}
+
+		/* Read the stream count as single IRQ may be raised for multiple kicks */
+		count = VHA_CR_GETBITS_OS(CNN_STATUS, STREAM_COUNT, cnn_status);
+
+		spin_lock(&vha->irq_lock);
+		/* store the status to be processed later */
+		if (vha->do_calibration ||
+				vha_is_busy(vha)) {
+			vha->irq_status |= event_status;
+
+			if (vha->low_latency == VHA_LL_SELF_KICK)
+				/* Two separate IRQs may be raised for multiple kicks */
+				vha->irq_count += count - vha->stream_count;
+			else
+				/* Only single IRQ may be raised otherwise ... */
+				vha->irq_count = count - vha->stream_count;
+
+			vha->stream_count = count;
+			/* Record hw processing end timestamps */
+			vha->stats.hw_proc_end_prev = vha->stats.hw_proc_end;
+			GETNSTIMEOFDAY(&vha->stats.hw_proc_end);
+		} else {
+			/* Command may have been aborted before this handler is executed */
+			vha->irq_status = 0;
+			vha->irq_count = 0;
+			vha->stream_count = 0;
+		}
+		spin_unlock(&vha->irq_lock);
+
+		ret = IRQ_WAKE_THREAD;
+	} else
+		return IRQ_NONE;
+
+	dev_dbg(dev, "IRQ 0x%08llx\n", event_status);
+
+	return ret;
+}
+
+static bool vha_rollback_cnn_cmds(struct vha_dev *vha)
+{
+	bool processing = false;
+	/* Not processed commands are still on the pending list
+	 * of each session, so just mark the hw pending lists as empty */
+	if (vha->pendcmd[VHA_CNN_CMD].cmd) {
+		struct vha_cmd *pendcmd = vha->pendcmd[VHA_CNN_CMD].cmd;
+		pendcmd->in_hw = false;
+		pendcmd->queued = false;
+		pendcmd->rolled_back = true;
+		processing = true;
+		vha->stats.cnn_kicks_aborted += pendcmd->subseg_current;
+		vha->stats.cnn_kicks_completed -= pendcmd->subsegs_completed;
+		vha->pri_q_counters[pendcmd->user_cmd.priority] += pendcmd->subseg_current;
+		pendcmd->subseg_current = 0;
+		pendcmd->subsegs_completed = 0;
+		vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
+	}
+	/* low_latency ...*/
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd) {
+		struct vha_cmd *queuedcmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
+		queuedcmd->in_hw = false;
+		queuedcmd->queued = false;
+		queuedcmd->rolled_back = true;
+		vha->stats.cnn_kicks_aborted += queuedcmd->subseg_current;
+		vha->stats.cnn_kicks_completed -= queuedcmd->subsegs_completed;
+		vha->pri_q_counters[queuedcmd->user_cmd.priority] += queuedcmd->subseg_current;
+		queuedcmd->subseg_current = 0;
+		queuedcmd->subsegs_completed = 0;
+		vha->queuedcmd[VHA_CNN_CMD].cmd = NULL;
+	}
+	dev_dbg(vha->dev, "%s: (%d)\n", __func__, processing);
+
+	return processing;
+}
+
+bool vha_rollback_cmds(struct vha_dev *vha)
+{
+	return vha_rollback_cnn_cmds(vha);
+}
+
+static bool vha_is_processing(struct vha_dev *vha)
+{
+	return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
+}
+
+int vha_dev_suspend_work(struct vha_dev *vha)
+{
+	bool processing = false;
+	int ret;
+
+	/* Check if anything is being processed right now. */
+	processing = vha_is_processing(vha);
+	/* Forcing hardware disable. */
+	ret = vha_dev_stop(vha, processing);
+	/* Rollback commands after hw is stopped. */
+	vha_rollback_cmds(vha);
+
+	return ret;
+}
+
+/*
+ * handles the command already processed by the hw.
+ */
+static bool vha_handle_cmd(struct vha_dev *vha, int status)
+{
+	struct vha_cmd *cmd = NULL;
+
+	cmd = vha->pendcmd[VHA_CNN_CMD].cmd;
+	if (unlikely(!cmd)) {
+		dev_dbg(vha->dev, "No command. Probably it has been aborted\n");
+		return false;
+	}
+
+	{
+		uint64_t proc_time = 0;
+		struct TIMESPEC *from = &cmd->hw_proc_start;
+		struct TIMESPEC *to = &vha->stats.hw_proc_end;
+
+		if (TIMESPEC_COMPARE(&vha->stats.hw_proc_end_prev, &cmd->hw_proc_start) >= 0)
+			from = &vha->stats.hw_proc_end_prev;
+
+		if (get_timespan_us(from, to, &proc_time)) {
+			vha->stats.last_proc_us = proc_time;
+		} else {
+			vha->stats.last_proc_us = 0;
+		}
+		/* Update cnn stats */
+		vha_cnn_update_stats(vha);
+
+		/* Update cmd stats. */
+		cmd->proc_us += vha->stats.cnn_last_proc_us;
+		cmd->hw_cycles += vha->stats.cnn_last_cycles;
+	}
+
+	/* Mark this subsegment as completed. */
+	if (status == 0)
+		vha->pendcmd[VHA_CNN_CMD].cmd->subsegs_completed++;
+	/* If this isn't the last subsegment, just return to process the next one. */
+	if ((cmd->subseg_current < VHA_CMD_SUBSEG_NUM(cmd)) && (status == 0)) {
+		vha->pendcmd[VHA_CNN_CMD].cmd->in_hw = false;
+		vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
+		return true;
+	}
+
+	vha_cnn_cmd_completed(cmd, status);
+
+	if (status) {
+		/* Rollback any queued command ... */
+		vha_rollback_cnn_cmds(vha);
+		/* Adjust for just rolled back pending cmd. */
+		vha->pri_q_counters[cmd->user_cmd.priority] -= VHA_CMD_SUBSEG_NUM(cmd);
+		/* Notify immediately current command */
+		vha_cmd_notify(cmd);
+
+		return false;
+	}
+
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd)
+		vha->pendcmd[VHA_CNN_CMD].cmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
+	else
+		vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
+
+	vha->queuedcmd[VHA_CNN_CMD].cmd = NULL;
+	dev_dbg(vha->dev,
+			"%s: %p -> new pending %p\n",
+			__func__, cmd, vha->pendcmd[VHA_CNN_CMD].cmd);
+
+	vha_cmd_notify(cmd);
+
+	return true;
+}
+
+static void vha_do_queued_cmd(struct vha_dev *vha)
+{
+	struct vha_cmd *cmd, *pend;
+
+	cmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
+
+	dev_dbg(vha->dev,
+			"%s: queued %p pending %p\n",
+			__func__, cmd, vha->pendcmd[VHA_CNN_CMD].cmd);
+
+	if (!cmd || (cmd &&
+				((vha->low_latency == VHA_LL_DISABLED ||
+				vha->low_latency == VHA_LL_SELF_KICK) ||
+						!cmd->queued))) {
+		dev_dbg(vha->dev, "%s: skipping!\n", __func__);
+		return;
+	}
+
+	/* store actual pending command as it will be modified */
+	pend = vha->pendcmd[VHA_CNN_CMD].cmd;
+
+	/* at this point we should be able to process the cmd */
+	vha_do_cnn_cmd(cmd);
+
+	/* restore pending */
+	vha->pendcmd[VHA_CNN_CMD].cmd = pend;
+}
+
+static int vha_report_failure(struct vha_dev *vha, uint64_t status,
+		const struct vha_biterr bits[], int bits_size)
+{
+	int error = 0;
+	int i;
+	int cmdid = -1;
+	int sesid = -1;
+
+	if (vha->pendcmd[VHA_CNN_CMD].cmd) {
+		cmdid = vha->pendcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id;
+		sesid = vha->pendcmd[VHA_CNN_CMD].cmd->session->id;
+	}
+
+	if (vha_observers.error)
+		vha_observers.error(vha->id, sesid, cmdid, status);
+
+	/* event status in human readable form */
+	for (i = 0; i < bits_size; i++) {
+		if (status & bits[i].b) {
+			dev_err(vha->dev,
+				" event status: %s\n",
+				bits[i].s);
+			/* convert from register bits into POSIX errno
+			* if multiple errors, then arbitrary errno choice */
+			error = bits[i].e;
+		}
+	}
+
+	return error;
+}
+
+/* if vha event register reports CNN events, so handle them */
+static int vha_handle_cnn_event(struct vha_dev *vha, uint64_t event_status)
+{
+	int err = 0;
+
+	if (vha_check_calibration(vha))
+		return 0;
+
+	if (event_status & VHA_CNN_ERR_EVNTS) {
+		static const struct vha_biterr err_bits[] = {
+			{-ETIMEDOUT, ERR_EVENT_DESC(CNN0_MEM_WDT)},
+#ifdef HW_AX2
+			{-ETIMEDOUT, ERR_EVENT_DESC(CNN0_WDT)},
+#endif
+			{-EIO,       ERR_EVENT_DESC(CNN0_ERROR)}
+		};
+
+		err = vha_report_failure(vha,
+				event_status, err_bits, ARRAY_SIZE(err_bits));
+
+		vha_cnn_dump_status(vha);
+	}
+
+	/* Poke the hw if there were already
+	 * command queued in the hw */
+	if (!err)
+		vha_do_queued_cmd(vha);
+	/* Handle actual command */
+	if (vha_handle_cmd(vha, err) == false)
+		err = -ENOENT;
+
+	return err;
+}
+
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+/* Simulating hw execution time by scheduling this delayed work. */
+void vha_dummy_worker(struct work_struct *work)
+{
+	struct vha_dev *vha = container_of(work, struct vha_dev, dummy_dwork.work);
+
+	mutex_lock(&vha->lock);
+
+	if (vha->pendcmd[VHA_CNN_CMD].cmd) {
+		/* Record hw processing end timestamps */
+		vha->stats.hw_proc_end_prev = vha->stats.hw_proc_end;
+		GETNSTIMEOFDAY(&vha->stats.hw_proc_end);
+		/* Handle current pending command */
+		vha_handle_cnn_event(vha, VHA_CNN_CMPLT_EVNT);
+		vha->stats.cnn_kicks_completed++;
+		/* Schedule following commands */
+		vha_chk_cmd_queues(vha, true);
+	}
+
+	mutex_unlock(&vha->lock);
+}
+#endif
+
+/* Bottom half */
+irqreturn_t vha_handle_thread_irq(struct device *dev)
+{
+	struct vha_dev *vha = vha_dev_get_drvdata(dev);
+	irqreturn_t ret = IRQ_HANDLED;
+	uint64_t status;
+	uint8_t count, c = 0;
+	int err = 0;
+
+	if (!vha)
+		return IRQ_NONE;
+
+	mutex_lock(&vha->lock);
+
+#ifdef CONFIG_FAULT_INJECTION
+	if (!vha->irq_bh_pid)
+		vha->irq_bh_pid = task_pid_nr(current);
+
+	if (vha->fault_inject & VHA_FI_IRQ_WORKER)
+		current->make_it_fail = true;
+	else
+		current->make_it_fail = false;
+#endif
+
+	spin_lock_irq(&vha->irq_lock);
+	status = vha->irq_status;
+	vha->irq_status = 0;
+	count = vha->irq_count;
+	vha->irq_count = 0;
+	if (!count) {
+		uint64_t proc_time = 0;
+
+		if (get_timespan_us(&vha->stats.hw_proc_start, &vha->stats.hw_proc_end,
+					&proc_time)) {
+			vha->stats.last_proc_us = proc_time;
+		} else {
+			vha->stats.last_proc_us = 0;
+		}
+	}
+	spin_unlock_irq(&vha->irq_lock);
+	/* Command may have been aborted before this handler is executed */
+	if (!status)
+		goto exit;
+
+	/* There can be two inferences already finished for self kick mode,
+	 * otherwise, only single inference at the time */
+	if ((vha->low_latency == VHA_LL_SELF_KICK && count > 2) ||
+			(vha->low_latency != VHA_LL_SELF_KICK && count > 1))
+		WARN_ON(1);
+
+	dev_dbg(dev, "%s: status:%llx count:%d\n",
+			__func__, status, count);
+
+	do {
+		if (status & VHA_CORE_EVNTS) {
+			static const struct vha_biterr err_bits[] = {
+				{-EIO,       ERR_EVENT_DESC(AXI_ERROR)},
+				{-EFAULT,    ERR_EVENT_DESC(MMU_PAGE_FAULT)},
+#ifdef HW_AX3
+#ifdef VHA_SCF
+				{-EIO,       ERR_EVENT_DESC(MMU_PARITY_ERROR)},
+				{-EIO,       ERR_EVENT_DESC(PARITY_ERROR)},
+				{-EIO,       ERR_EVENT_DESC(LOCKSTEP_ERROR)},
+#endif
+				{-ETIMEDOUT, ERR_EVENT_DESC(HL_WDT)},
+				{-EIO,       ERR_EVENT_DESC(ERROR)}
+#endif
+			};
+
+#ifdef HW_AX3
+			if (status & VHA_EVENT_TYPE(HL_WDT)
+					&& vha->is_ready)
+				if (vha_check_calibration(vha))
+					break;
+
+			if ((status & VHA_CORE_EVNTS)==
+					VHA_EVENT_TYPE(READY)
+					&& !vha->is_ready) {
+				vha->is_ready = true;
+				vha_dev_ready(vha);
+				if (vha->do_calibration) {
+					vha_cnn_start_calib(vha);
+					break;
+				} else
+					vha_chk_cmd_queues(vha, true);
+			}
+#endif
+
+			err = vha_report_failure(vha, status,
+					err_bits, ARRAY_SIZE(err_bits));
+			if (err) {
+				dev_err(vha->dev, "NNA hw failure: %llx\n", status);
+				dev_err(vha->dev, "   CLK_STATUS0:%llx ",
+					IOREAD64(vha->reg_base, VHA_CR_CLK_STATUS0));
+				dev_err(vha->dev, " VHA_EVENT_STATUS:%llx ", status);
+			}
+
+			if (status & VHA_EVENT_TYPE(MMU_PAGE_FAULT))
+				/* dump mmu status */
+				vha_mmu_status(vha);
+		}
+
+		/* If no core level error process cnn events */
+		if (!err && status & VHA_CNN_EVNTS)
+			err = vha_handle_cnn_event(vha, status);
+#ifdef HW_AX3
+		else if (status == VHA_EVENT_TYPE(ERROR)) {
+			/* Resubmit command next time if no CNN error detected
+			 * and only ERROR bit is set.
+			 * That means other OS caused the error */
+			vha_rollback_cnn_cmds(vha);
+		}
+#endif
+		else if (err && vha->is_ready) { /* Core level error */
+			if (vha_handle_cmd(vha, err) == false)
+				err = -ENOENT;
+		}
+
+		c++;
+	} while (c < count && !err);
+
+	if (err) {
+		vha->stats.total_failures += count ? count : 1;
+		vha_dev_stop(vha, true);
+		/* Check queues ... */
+		vha_chk_cmd_queues(vha, true);
+	} else {
+		/* Run in BH context! */
+		vha_chk_cmd_queues(vha, false);
+	}
+	vha->stats.cnn_kicks_completed += count;
+
+exit:
+#ifdef CONFIG_FAULT_INJECTION
+	if (vha->fault_inject & VHA_FI_IRQ_WORKER)
+		current->make_it_fail = false;
+#endif
+	mutex_unlock(&vha->lock);
+
+	return ret;
+}
+
+bool vha_rm_session_cmds(struct vha_session *session)
+{
+	struct vha_dev *vha = session->vha;
+	bool pend_removed = false;
+	bool queued_removed = false;
+	bool reschedule = false;
+	struct vha_cmd *cur_cmd, *tmp_cmd;
+	uint8_t pri;
+
+	/* Check if pend/queued commands will be removed. */
+	if (vha->pendcmd[VHA_CNN_CMD].cmd &&
+			vha->pendcmd[VHA_CNN_CMD].cmd->session == session) {
+		dev_warn(vha->dev,
+				"Removing a session while cnn cmd is still pending\n");
+		pend_removed = true;
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+		cancel_delayed_work(&vha->dummy_dwork);
+#endif
+	}
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
+			vha->queuedcmd[VHA_CNN_CMD].cmd->session == session) {
+		dev_warn(vha->dev,
+				"Removing a session while cnn cmd is still queued\n");
+		queued_removed = true;
+	}
+
+	/* Update session scheduling. */
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
+			(pend_removed && !queued_removed)) {
+		uint8_t pri = vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.priority;
+		if (vha->queuedcmd[VHA_CNN_CMD].cmd->session !=
+					list_entry(&vha->sched_sessions[pri], struct vha_session,
+								sched_list[pri]))
+			while(list_first_entry(&vha->sched_sessions[pri], struct vha_session,
+						sched_list[pri]) != vha->queuedcmd[VHA_CNN_CMD].cmd->session)
+				list_rotate_left(&vha->sched_sessions[pri]);
+	}
+
+	/* Remove pend/queued commands if needed. */
+	if (pend_removed || queued_removed) {
+		vha_rollback_cnn_cmds(vha);
+		/* Need to reschedule too. */
+		reschedule = true;
+	}
+
+	/* Remove session related commands. */
+	for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
+		list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
+			/* rsp didn't make it to rsps list, free it now */
+			kfree(cur_cmd->rsp);
+
+			list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
+			vha->pri_q_counters[cur_cmd->user_cmd.priority] -=
+								(VHA_CMD_SUBSEG_NUM(cur_cmd) - cur_cmd->subseg_current);
+			if (vha_observers.canceled)
+				vha_observers.canceled(vha->id, session->id, cur_cmd->user_cmd.cmd_id,
+										cur_cmd->user_cmd.priority);
+			kfree(cur_cmd);
+		}
+	}
+
+	return reschedule;
+}
+
+bool vha_rm_session_cmds_masked(struct vha_session *session, uint32_t cmd_id,
+		uint32_t cmd_id_mask)
+{
+	struct vha_dev *vha = session->vha;
+	bool reschedule = false;
+	bool pend_removed = false;
+	uint32_t pend_aborted_kicks_adj_val = 0;
+	bool queued_removed = false;
+	uint32_t queued_aborted_kicks_adj_val = 0;
+
+	/* Check if pend/queued commands will be removed. */
+	if (vha->pendcmd[VHA_CNN_CMD].cmd &&
+			(vha->pendcmd[VHA_CNN_CMD].cmd->session == session) &&
+			(vha->pendcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id & cmd_id_mask)
+																	== cmd_id) {
+		pend_removed = true;
+		vha->stats.cnn_kicks_cancelled += vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
+		pend_aborted_kicks_adj_val = vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
+#ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
+		cancel_delayed_work(&vha->dummy_dwork);
+#endif
+	}
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
+			(vha->queuedcmd[VHA_CNN_CMD].cmd->session == session) &&
+			(vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id & cmd_id_mask)
+																	== cmd_id) {
+		queued_removed = true;
+		vha->stats.cnn_kicks_cancelled += vha->queuedcmd[VHA_CNN_CMD].cmd->subseg_current;
+		queued_aborted_kicks_adj_val = vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
+	}
+
+	/* Update session scheduling. */
+	if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
+			(pend_removed && !queued_removed)) {
+		uint8_t pri = vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.priority;
+		if (vha->queuedcmd[VHA_CNN_CMD].cmd->session !=
+					list_entry(&vha->sched_sessions[pri], struct vha_session,
+								sched_list[pri]))
+			while(list_first_entry(&vha->sched_sessions[pri], struct vha_session,
+						sched_list[pri]) != vha->queuedcmd[VHA_CNN_CMD].cmd->session)
+				list_rotate_left(&vha->sched_sessions[pri]);
+	}
+
+	/* Remove pend/queued commands if needed. */
+	if (pend_removed || queued_removed) {
+		vha_rollback_cnn_cmds(vha);
+		/* Correct aborted stats. */
+		if (queued_removed)
+			vha->stats.cnn_kicks_aborted -= queued_aborted_kicks_adj_val;
+		if (pend_removed)
+			vha->stats.cnn_kicks_aborted -= pend_aborted_kicks_adj_val;
+		reschedule = true;
+	}
+
+	return reschedule;
+}
+
+int vha_rm_cmds(struct vha_session *session, uint32_t cmd_id,
+		uint32_t cmd_id_mask, bool respond)
+{
+	struct vha_dev *vha = session->vha;
+	struct vha_cmd *cur_cmd, *tmp_cmd;
+	struct vha_rsp *cur_rsp, *tmp_rsp;
+	bool reschedule = false;
+	bool respond_aux = false;
+	int ret = 0;
+	uint8_t pri;
+
+	mutex_lock(&vha->lock);
+
+	/* Remove pend/queued session commands that match the cmd_id. */
+	reschedule = vha_rm_session_cmds_masked(session, cmd_id, cmd_id_mask);
+
+	/* Remove session related commands matching command id template. */
+	for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
+		list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
+			if ((cur_cmd->user_cmd.cmd_id & cmd_id_mask) == cmd_id) {
+
+#ifdef KERNEL_DMA_FENCE_SUPPORT
+				switch (cur_cmd->user_cmd.cmd_type)
+				{
+				case VHA_CMD_CNN_SUBMIT:
+				{
+					struct vha_user_cnn_submit_cmd *cnn_cmd =
+							(struct vha_user_cnn_submit_cmd *)&cur_cmd->user_cmd;
+					int j;
+					for (j = 0; j < (cnn_cmd->msg.num_bufs - 1); j++) {
+						struct vha_buffer *buf = vha_find_bufid(session, cnn_cmd->bufs[j]);
+						if (buf == NULL) {
+							dev_warn(vha->dev, "%s: could not find buf %x\n", __func__,
+											cnn_cmd->bufs[j]);
+						} else {
+							vha_rm_buf_fence(session, buf);
+						}
+					}
+					break;
+				}
+				default:
+					dev_warn(vha->dev, "%s: invalid cmd type %x\n", __func__,
+								cur_cmd->user_cmd.cmd_type);
+					break;
+				}
+#endif
+
+				/* rsp didn't make it to rsps list; free it now. */
+				kfree(cur_cmd->rsp);
+
+				list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
+				vha->pri_q_counters[cur_cmd->user_cmd.priority] -=
+								(VHA_CMD_SUBSEG_NUM(cur_cmd) - cur_cmd->subseg_current);
+				if (vha_observers.canceled)
+					vha_observers.canceled(vha->id, session->id, cur_cmd->user_cmd.cmd_id,
+											cur_cmd->user_cmd.priority);
+				kfree(cur_cmd);
+
+				/* There were commands matching command id template in the list,
+				 * so respond to wake user space. */
+				respond_aux = true;
+			}
+		}
+	}
+
+	/* Remove responses for session related commands
+	 * matching command id template. */
+	list_for_each_entry_safe(cur_rsp, tmp_rsp, &session->rsps, list) {
+		if ((cur_rsp->user_rsp.cmd_id & cmd_id_mask) == cmd_id) {
+			list_del(&cur_rsp->list);
+			kfree(cur_rsp);
+			respond_aux = true;
+		}
+	}
+
+	/* Reset hardware if required. */
+	if (reschedule)
+		ret = vha_dev_stop(vha, reschedule);
+
+	/* Generate "cancel" response if any commands matching command id template
+	 * were removed. */
+	if (respond_aux && respond) {
+		/* Calculate space for the response. */
+		size_t sz = sizeof(struct vha_rsp)
+			+ sizeof(struct vha_user_cnn_submit_rsp)
+			- sizeof(struct vha_user_rsp);
+		/* Allocate space for standard response. */
+		struct vha_rsp *rsp = kzalloc(sz, GFP_KERNEL);
+		if (rsp == NULL) {
+			dev_crit(session->vha->dev,
+					"Failed to allocate memory to notify cancel for cmds 0x%08x\n", cmd_id);
+			session->oom = true;
+		} else {
+			rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
+			rsp->user_rsp.cmd_id = cmd_id;
+			list_add_tail(&rsp->list, &session->rsps);
+		}
+		wake_up(&session->wq);
+	}
+
+	mutex_unlock(&vha->lock);
+
+	/* Just return in case of oom. */
+	if (session->oom)
+		return -ENOMEM;
+
+	/* Reschedule once all commands matching command id template are removed. */
+	if (reschedule)
+		vha_chk_cmd_queues(vha, true);
+
+	return ret;
+}
+
+bool vha_is_busy(struct vha_dev *vha)
+{
+#ifndef CONFIG_VHA_DUMMY
+	if (!vha->is_ready)
+		return true;
+#endif
+
+	if (vha->low_latency != VHA_LL_DISABLED) {
+		return vha->pendcmd[VHA_CNN_CMD].cmd != NULL ||
+				vha->queuedcmd[VHA_CNN_CMD].cmd != NULL;
+	}
+	return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
+}
+
+/* returns true if the cmd queue is full */
+bool vha_is_queue_full(struct vha_dev *vha, struct vha_cmd *cmd)
+{
+	if (vha->low_latency != VHA_LL_DISABLED) {
+		if (vha->low_latency == VHA_LL_SELF_KICK
+#ifdef HW_AX3
+			/* if current command we are trying to queue belongs to a different session than pending one */
+			&& (vha->pendcmd[VHA_CNN_CMD].cmd != NULL && cmd != NULL &&
+					vha->pendcmd[VHA_CNN_CMD].cmd->session != cmd->session)
+			/* if session of the command we are trying to queue, shares the hw mmu ctx with the session of pending cmd */
+			&& (cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id ==
+					vha->pendcmd[VHA_CNN_CMD].cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id)
+			/* Sanity if hw mmu ctx is really shared at this point */
+			&& (vha->mmu_ctxs[cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1)
+			) {
+#else
+			) {
+			dev_warn(vha->dev, "%s: LL=2 not supported!\n", __func__);
+#endif
+			/* skip low latency mode */
+			return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
+		}
+
+		return vha->pendcmd[VHA_CNN_CMD].cmd != NULL &&
+					vha->queuedcmd[VHA_CNN_CMD].cmd != NULL;
+	}
+	return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
+}
+
+/* check all input buffers are filled and ready to go */
+bool vha_is_waiting_for_inputs(struct vha_session *session,
+	struct vha_cmd *cmd)
+{
+	if (!cmd->inbufs_ready) {
+		const struct vha_user_cnn_submit_cmd *user_cmd =
+			(struct vha_user_cnn_submit_cmd *)&cmd->user_cmd;
+		int i;
+
+		for (i = 0; i < cmd->user_cmd.num_inbufs - 1; i++) {
+			struct vha_buffer *buf = vha_find_bufid(session, user_cmd->bufs[i]);
+
+			if (buf && buf->status == VHA_BUF_UNFILLED) {
+				dev_dbg(session->vha->dev,
+					"%s: cmd %u waiting for input "
+					"buf %d to be ready\n",
+					__func__,
+					cmd->user_cmd.cmd_id,
+					buf->id);
+				return true;
+			}
+		}
+	}
+
+	cmd->inbufs_ready = true;
+	return false;
+}
+
+static bool vha_can_schedule(struct vha_dev *vha)
+{
+#ifndef CONFIG_VHA_DUMMY
+	if (!vha->is_ready)
+		return false;
+#endif
+
+	if (vha->low_latency != VHA_LL_DISABLED) {
+		return vha->pendcmd[VHA_CNN_CMD].cmd == NULL ||
+				vha->queuedcmd[VHA_CNN_CMD].cmd == NULL;
+	}
+	return vha->pendcmd[VHA_CNN_CMD].cmd == NULL;
+}
+
+static void vha_scheduler_set_starting_session(struct vha_dev *vha,
+	uint8_t priority, struct vha_session *session, bool set_next)
+{
+	/* Rotate scheduling list to the current session
+	 * to make it a starting point for the next scheduling round. */
+	if (session != list_entry(&vha->sched_sessions[priority],
+								struct vha_session, sched_list[priority]))
+		while(list_first_entry(&vha->sched_sessions[priority],
+								struct vha_session, sched_list[priority]) != session)
+			list_rotate_left(&vha->sched_sessions[priority]);
+	/* Set a starting point session for the next scheduling round
+	 * to next to the current one if requested. */
+	if (set_next)
+		list_rotate_left(&vha->sched_sessions[priority]);
+}
+
+static uint8_t vha_scheduler_get_priority(struct vha_dev *vha)
+{
+	uint8_t pri;
+
+	/* Calculate current total window width. */
+	for (pri = VHA_MAX_PRIORITIES - 1; (int8_t)pri >= 0; pri--)
+		if (vha->pri_q_counters[pri] > 0)
+			return pri;
+
+	/* If there's no priority with WLs to schedule, just return 0. */
+	return VHA_INVALID_PRI;
+}
+
+void vha_scheduler_loop(struct vha_dev *vha)
+{
+	struct vha_cmd *cmd, *tmp;
+	struct vha_session *session = NULL;
+	enum do_cmd_status cmd_status = CMD_OK;
+	bool scheduled = false;
+	uint8_t current_pri = VHA_DEFAULT_PRI;
+
+	if (vha_is_queue_full(vha, NULL)) {
+		/* Postpone worker task if command queue is full. */
+		dev_dbg(vha->dev, "%s Queue full. Postpone worker task!\n", __func__);
+		return;
+	}
+
+	do {
+		scheduled = false;
+		current_pri = vha_scheduler_get_priority(vha);
+		if (current_pri == VHA_INVALID_PRI)
+			break;
+		list_for_each_entry(session, &vha->sched_sessions[current_pri], sched_list[current_pri]) {
+			list_for_each_entry_safe(cmd, tmp, &session->cmds[current_pri], list[current_pri]) {
+
+				/* For hw commands... */
+				if (CMD_EXEC_ON_HW(cmd)) {
+					if (!VHA_IS_DUMMY(vha)) {
+						/* Start device. */
+						if(vha_dev_start(vha))
+							return;
+					}
+				}
+
+				/* Skip this workload as it's already scheduled. */
+				if (cmd->queued || cmd->in_hw)
+					continue;
+
+				dev_dbg(vha->dev, "%s cur_prio=<%d>\n", __func__,current_pri);
+				/* Attempt to schedule command for execution. */
+				cmd_status = vha_do_cmd(cmd);
+
+				/* Update scheduling loop based on command scheduling status. */
+				if ((cmd_status == CMD_OK) || (cmd_status == CMD_HW_BUSY)) {
+					bool set_next = false;
+					if (cmd_status == CMD_OK) {
+						scheduled = true;
+						if (cmd->subseg_current == VHA_CMD_SUBSEG_NUM(cmd))
+							set_next = true;
+					}
+					vha_scheduler_set_starting_session(vha, current_pri, session, set_next);
+					goto exit_session_loop;
+				}
+			}
+		}
+exit_session_loop:;
+	/* Iterate until a workload was scheduled and no other can be scheduled. */
+	} while (vha_can_schedule(vha) && scheduled);
+
+	if (!VHA_IS_DUMMY(vha)) {
+		/* Schedule APM if needed */
+		if (!vha_is_busy(vha) &&
+				!vha->no_clock_disable) {
+			if (!vha->pm_delay) {
+				if (vha_dev_stop(vha, false)) {
+					dev_warn(vha->dev, "%s: Failed to soft stop device. trying with reset",
+						__func__);
+					if (vha_dev_stop(vha, true))
+						dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
+				}
+			}
+			else {
+				vha->apm_dworks[0].delay_ms = vha->pm_delay;
+				vha_sched_apm(vha, &vha->apm_dworks[0]);
+			}
+		}
+	}
+}
+
+void vha_dev_apm_stop(struct vha_dev *vha, struct vha_apm_work *apm_work)
+{
+	if (!vha->do_calibration &&
+			(vha->pendcmd[VHA_CNN_CMD].cmd == NULL &&
+			vha->queuedcmd[VHA_CNN_CMD].cmd == NULL))
+		if (vha_dev_stop(vha, false)) {
+			dev_warn(vha->dev, "%s: Failed to soft stop device. trying with reset",
+				__func__);
+			if (vha_dev_stop(vha, true))
+				dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
+		}
+}
+
+int vha_dev_get_props(struct vha_dev *vha, uint32_t onchipmem_size)
+{
+	struct vha_hw_props *props = &vha->hw_props;
+	uint64_t ip_config;
+	uint32_t ocm_size_kb = 0;
+
+	memset(props, 0, sizeof(*props));
+
+#ifdef CONFIG_VHA_DUMMY
+	/* Note: dummy dev always reads zeroes from registers */
+	props->product_id  = 0x8070605040302010ULL;
+	props->core_id  = (long)HW_SERIES << (int)VHA_CR_CORE_ID_BRANCH_ID_SHIFT;
+	props->core_id += 0x010203040505ULL;   // provide a dummy core id
+	props->dummy_dev = true;
+	props->num_cnn_core_devs = 1;
+#else
+	props->product_id  = IOREAD64(vha->reg_base, VHA_CR_PRODUCT_ID);
+	props->core_id  = IOREAD64(vha->reg_base, VHA_CR_CORE_ID);
+#endif
+	props->skip_bvnc_check = false;
+	/*
+	 * New mmu version 3 and onwards operates on 40bit physical & virtual addresses
+	 */
+	props->mmu_width = 40;
+
+	/* HW from 1.1 onwards */
+	ip_config = IOREAD64(vha->reg_base, VHA_CR_CORE_IP_CONFIG);
+#ifdef HW_AX3
+	props->mmu_ver = VHA_CR_GETBITS(CORE_IP_CONFIG, MMU_VERSION, ip_config);
+#endif
+	/* Mirage uses MMU version 3 hardware */
+	if (!props->mmu_ver)
+		props->mmu_ver = 3;
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, CNN_SUPPORTED, ip_config))
+		props->num_cnn_core_devs = 1;
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, RTM_SUPPORTED, ip_config))
+		props->supported.rtm = 1;
+#ifdef HW_AX3
+	if (VHA_CR_GETBITS(CORE_IP_CONFIG, PARITY_REGISTERS, ip_config))
+		props->supported.parity = 1;
+
+#if defined(CONFIG_VHA_DUMMY) && defined(VHA_SCF)
+	/* Force parity for pdump generation */
+	props->supported.parity = 1;
+#endif
+#endif
+
+	if ((props->num_cnn_core_devs == 0)
+		|| VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id) != HW_SERIES) {
+		dev_err(vha->dev, "%s: Wrong core configuration detected. "
+			"Expected BVNC %d.x.x.x, got %llu.x.x.x. "
+			"Maybe kernel module was built with wrong params.\n",
+			__func__, HW_SERIES,
+			VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id));
+		return -ENODEV;
+	}
+
+	props->soc_axi  = IOREAD64(vha->reg_base, VHA_CR_SOC_AXI);
+
+	dev_info(vha->dev, "%s: Product id: %#llx\n",
+			__func__, props->product_id);
+	dev_info(vha->dev, "%s: Core id: %#llx\n",
+			__func__, props->core_id);
+	dev_info(vha->dev, "%s: MMU version:%d (%dbit)\n",
+			__func__, props->mmu_ver, props->mmu_width);
+	dev_dbg(vha->dev, "%s: supported: %#x\n",
+			__func__, props->features);
+	dev_dbg(vha->dev, "%s: soc_axi: %#llx\n",
+			__func__, props->soc_axi);
+	{
+		uint64_t tmp = IOREAD64(vha->reg_base,
+				VHA_CR_CORE_IP_INTEGRATOR_ID);
+		dev_dbg(vha->dev, "%s: ip integrator id: %#llx\n",
+				__func__, tmp);
+		tmp = IOREAD64(vha->reg_base, VHA_CR_CORE_IP_CHANGELIST);
+		dev_dbg(vha->dev, "%s: ip change list: %llu\n", __func__, tmp);
+	}
+
+#if defined(CFG_SYS_VAGUS)
+	ocm_size_kb = IOREAD64(vha->reg_base, NN_SYS_CR(CORE_IP_CONFIG)) &
+				~NN_SYS_CR_CORE_IP_CONFIG_NN_SYS_OCM_RAM_SIZE_4KB_CLRMSK;
+	ocm_size_kb *= 4;
+#endif
+
+	if (ocm_size_kb) {
+		vha->hw_props.locm_size_bytes = ocm_size_kb * 1024;
+		/* User may wanted to limit OCM ... */
+		if (onchipmem_size) {
+			if (onchipmem_size < vha->hw_props.locm_size_bytes) {
+				dev_warn(vha->dev, "%s:Limiting onchip memory to %u bytes (available:%u)\n",
+						__func__, onchipmem_size, vha->hw_props.locm_size_bytes);
+				vha->hw_props.locm_size_bytes = onchipmem_size;
+			} else if (onchipmem_size > vha->hw_props.locm_size_bytes) {
+				dev_err(vha->dev, "%s: User defined onchip memory size exceeded (%u > %u))\n",
+						__func__, onchipmem_size, vha->hw_props.locm_size_bytes);
+			}
+		}
+	} else {
+		vha->hw_props.locm_size_bytes = onchipmem_size;
+	}
+
+	dev_info(vha->dev, "%s: Total onchip memory: %u [kB]\n",
+			__func__, vha->hw_props.locm_size_bytes / 1024);
+
+	dev_info(vha->dev, "%s: Devices: DUMMY:%u CNN:%u\n", __func__,
+			props->dummy_dev ? props->num_cnn_core_devs : 0,
+			props->dummy_dev ? 0 : props->num_cnn_core_devs);
+
+	return 0;
+}
+
+void vha_dev_ocm_configure(struct vha_dev *vha)
+{
+#if defined(CFG_SYS_VAGUS)
+	dev_dbg(vha->dev, "%s: OCM address range: %#lx - %#lx\n",
+			__func__, vha->ocm_paddr,
+			vha->ocm_paddr + vha->hw_props.locm_size_bytes - 1);
+	IOWRITE64(vha->reg_base, NN_SYS_CR(NOC_LOWER_ADDR1), vha->ocm_paddr);
+	IOWRITE64(vha->reg_base, NN_SYS_CR(NOC_UPPER_ADDR1),
+			vha->ocm_paddr + vha->hw_props.locm_size_bytes - 1);
+	img_pdump_printf("-- Setup NN_SYS OCM phys address range\n"
+		"WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:0x0\n"
+		"WRW64 :REG_NNSYS:%#x "_PMEM_":$0\n"
+		"WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:%#x\n"
+		"WRW64 :REG_NNSYS:%#x "_PMEM_":$0\n",
+		NN_SYS_CR_NOC_LOWER_ADDR1, vha->hw_props.locm_size_bytes-1,
+		NN_SYS_CR_NOC_UPPER_ADDR1);
+#endif
+}
+
+/* prepare CRC and DEBUG data buffers */
+void vha_dbg_prepare_hwbufs(struct vha_session *session, struct vha_cmd *cmd,
+		struct vha_crc_config_regs *regs)
+{
+	struct vha_dev *vha = session->vha;
+	(void)cmd;
+
+	if (session->cnn_dbg.cnn_crc_buf[0]) {
+		struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[0];
+		uint64_t val64;
+
+		/* enable CRC: address + mode */
+		val64 = VHA_CR_SETBITS_OS(CNN_CRC_CONTROL, CNN_CRC_ENABLE,
+				session->cnn_dbg.cnn_crc_mode);
+		img_pdump_printf("-- CRC_CONTROL=%u buf 'CRC' size=%zx\n",
+				session->cnn_dbg.cnn_crc_mode, buf->size);
+		IOWRITE_PDUMP_BUFADDR(session, buf, 0, VHA_CR_OS(CNN_CRC_ADDRESS));
+
+		IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_CRC_CONTROL));
+
+#ifdef HW_AX3
+		img_pdump_printf("-- CRC_MASK=%#x\n", session->cnn_dbg.cnn_crc_mask);
+		IOWRITE64_PDUMP(session->cnn_dbg.cnn_crc_mask, VHA_CR_OS(CNN_CRC_MASK_CTRL));
+#endif
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
+		struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[0];
+		uint64_t val64;
+
+		/* enable DEBUG: address, perf mode, band mode */
+		img_pdump_printf("-- DEBUG_CONTROL=%u,%u buf 'DBG' size=%zx\n",
+				GET_CNN_DBG_MODE(PERF, session), GET_CNN_DBG_MODE(BAND, session),
+				buf->size);
+		IOWRITE_PDUMP_BUFADDR(session, buf, 0,
+							VHA_CR_OS(CNN_DEBUG_ADDRESS));
+		val64 = VHA_CR_ALIGN_SETBITS_OS(CNN_DEBUG_SIZE,
+								CNN_DEBUG_SIZE,
+								buf->size);
+		IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_DEBUG_SIZE));
+
+		/* Set the CONTROL register only if requested */
+		if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
+			val64 = VHA_CR_SETBITS_OS(CNN_DEBUG_CONTROL, CNN_PERF_ENABLE,
+										GET_CNN_DBG_MODE(PERF, session));
+			val64 |= VHA_CR_SETBITS_OS(CNN_DEBUG_CONTROL, CNN_BAND_ENABLE,
+										GET_CNN_DBG_MODE(BAND, session));
+			IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_DEBUG_CONTROL));
+		}
+	}
+}
+
+/* flush CRC and DEBUG data buffers */
+void vha_dbg_flush_hwbufs(struct vha_session *session, char checkpoint, uint8_t mask)
+{
+	struct vha_dev* vha = session->vha;
+	(void)mask;
+	if (session->cnn_dbg.cnn_dbg_flush != checkpoint)
+		return;
+
+	if (session->cnn_dbg.cnn_crc_buf[0]) {
+		struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[0];
+		/*
+		 * TOBEDONE: calculate CRC buffer size based
+		 * on num passes, num layers, etc
+		 */
+		img_pdump_printf("-- Save signatures\n");
+		img_pdump_printf("IF CHECK_CRCS\n");
+		img_pdump_printf("COM Checking CRCs ...\n");
+		vha_pdump_sab_buf(session, PDUMP_CRC,
+					buf, 0, buf->size);
+		img_pdump_printf("ELSE CHECK_CRCS\n");
+		img_pdump_printf("COM Not checking CRCs!\n");
+		img_pdump_printf("FI CHECK_CRCS\n");
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
+		struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[0];
+		/* read the size of the DEBUG buffer */
+		uint64_t size = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_DEBUG_STATUS));
+		/*
+		 * SAB the DBG buffer, even though "it is not deterministic"
+		 */
+		size = VHA_CR_GETBITS_OS(CNN_DEBUG_STATUS,
+							CNN_DEBUG_OFFSET,
+							size);
+		img_pdump_printf("-- Save DEBUG info\n");
+		
+		vha_pdump_sab_buf(session, PDUMP_DBG, buf, 0, buf->size);
+	}
+}
+
+/* stop capturing CRC and DEBUG data */
+void vha_dbg_stop_hwbufs(struct vha_session *session, uint8_t mask)
+{
+	struct vha_dev *vha = session->vha;
+	(void)mask;
+
+	/* Flush hw debug buffers */
+	vha_dbg_flush_hwbufs(session, 0, 0);
+
+	if (session->cnn_dbg.cnn_crc_buf[0]) {
+		IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_CRC_CONTROL));
+	}
+	if (session->cnn_dbg.cnn_dbg_buf[0]) {
+		/* read the size of the DEBUG buffer */
+		uint64_t size = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_DEBUG_STATUS));
+
+		if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
+			IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_DEBUG_CONTROL));
+			/* just give a hint in the pdump:
+			 * dummy device returns 0 */
+			img_pdump_printf(
+					"-- POL64 :REG:%#x 0 0 0 1 1 -- DEBUG_STATUS=%llx\n",
+					 VHA_CR_OS(CNN_DEBUG_STATUS),
+				size);
+		}
+	}
+}
+
+uint64_t vha_dbg_rtm_read(struct vha_dev *vha, uint64_t addr)
+{
+	/* Turn on all clocks forcefully */
+	IOWRITE64(vha->reg_base, VHA_CR_SYS_CLK_CTRL0, VHA_SYS_CLOCKS_DEFAULT(ON));
+	IOWRITE64(vha->reg_base, VHA_CR_CLK_CTRL0, VHA_MAIN_CLOCKS_DEFAULT(ON));
+
+	/* Set up address of the signal */
+	IOWRITE64(vha->reg_base, VHA_CR_RTM_CTRL, addr | VHA_CR_RTM_CTRL_RTM_ENABLE_EN);
+
+	
+	/* but N_OF_RTM_STAGES is not accessible by SW*/
+	/* so waiting 1 ms for now */
+	msleep(1);
+
+	/* Read the data */
+	return IOREAD64(vha->reg_base, VHA_CR_RTM_DATA);
+}
+
+/* List of predefined registers to be shown in debugfs */
+const struct vha_reg vha_regs[] = {
+#define REG_DESC(reg) VHA_CR_##reg, VHA_CR_##reg##_MASKFULL
+#define REG_DESC_OS(reg) VHA_CR_OS(reg), VHA_CR_OS(reg##_MASKFULL)
+	{"main_clocks_control  ", REG_DESC(CLK_CTRL0)},
+	{"main_clocks_status   ", REG_DESC(CLK_STATUS0)},
+	{"sys_clocks_control   ", REG_DESC(SYS_CLK_CTRL0)},
+	{"sys_clocks_status    ", REG_DESC(SYS_CLK_STATUS0)},
+	{"product_id           ", REG_DESC(PRODUCT_ID)},
+	{"core_id              ", REG_DESC(CORE_ID)},
+	{"soc_axi              ", REG_DESC(SOC_AXI)},
+	{"integrator_id        ", REG_DESC(CORE_IP_INTEGRATOR_ID)},
+	{"ip_changelist        ", REG_DESC(CORE_IP_CHANGELIST)},
+	{"core_ip_config       ", REG_DESC(CORE_IP_CONFIG)},
+	{"reset                ", REG_DESC(RESET_CTRL)},
+	{"event_enable         ", REG_DESC_OS(VHA_EVENT_ENABLE)},
+	{"event_status         ", REG_DESC_OS(VHA_EVENT_STATUS)},
+	{"cnn_control          ", REG_DESC_OS(CNN_CONTROL)},
+	{"cnn_status           ", REG_DESC_OS(CNN_STATUS)},
+#ifdef HW_AX2
+	{"cnn_wdt_cmpmatch     ", REG_DESC(CNN_WDT_COMPAREMATCH)},
+	{"cnn_wdt_control      ", REG_DESC(CNN_WDT_CTRL)},
+	{"cnn_wdt_timer        ", REG_DESC(CNN_WDT_TIMER)},
+#endif
+	{"cnn_mem_wdt_cmpmatch ", REG_DESC(CNN_MEM_WDT_COMPAREMATCH)},
+	{"cnn_mem_wdt_control  ", REG_DESC(CNN_MEM_WDT_CTRL)},
+	{"cnn_mem_wdt_timer    ", REG_DESC(CNN_MEM_WDT_TIMER)},
+	{"mmu_control          ", REG_DESC_OS(MMU_CTRL)},
+	{"mmu_context          ", REG_DESC_OS(MMU_CBASE_MAPPING_CONTEXT)},
+	{"mmu_mapping          ", REG_DESC_OS(MMU_CBASE_MAPPING)},
+	{"mmu_status           ", REG_DESC(MMU_STATUS)},
+	{"mmu_fault_status1    ", REG_DESC_OS(MMU_FAULT_STATUS1)},
+	{"mmu_fault_status2    ", REG_DESC_OS(MMU_FAULT_STATUS2)},
+	{"slc_control          ", REG_DESC(SLC_CTRL)},
+#if 0
+	{"slc_bypass_control   ", REG_DESC(SLC_BYPASS_CTRL)},
+#endif
+	{"slc_status1          ", REG_DESC(SLC_STATUS1)},
+	{"slc_status2          ", REG_DESC(SLC_STATUS2)},
+	{"slc_status3          ", REG_DESC(SLC_STATUS3)},
+	{"slc_idle             ", REG_DESC(SLC_IDLE)},
+	{"bif_outstanding_read ", REG_DESC(BIF_OUTSTANDING_READ)},
+#undef REG_DESC
+#undef REG_DESC_OS
+	{NULL                   , 0},
+};
+

+ 190 - 0
driver/vha/single/vha_dev_ax2.c

@@ -0,0 +1,190 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+
+static long cnn_wdt_cycles = VHA_CORE_WDT_CYCLES;
+module_param(cnn_wdt_cycles, long, 0444);
+MODULE_PARM_DESC(cnn_wdt_cycles,
+		"CNN hw watchdog expiration cycles, -1=use estimated cycles, 0=disable watchdog, >0=predefined");
+static uint32_t cnn_wdt_cycles_margin = 40;
+module_param(cnn_wdt_cycles_margin, uint, 0444);
+MODULE_PARM_DESC(cnn_wdt_cycles_margin,
+		 "CNN estimated hw watchdog percentage overhead. default:40% additional margin added");
+
+void vha_dev_mh_setup(struct vha_dev *vha, int ctx_id, struct vha_mh_config_regs *regs)
+{
+	uint64_t val64 = 0;
+	uint8_t burst = ilog2(VHA_CORE_MH_MAX_BURST_LENGTH/32);
+
+	WARN_ON(burst & ~VHA_CR_MH_CONTROL_MAX_BURST_LENGTH_MASK);
+	val64 |= VHA_CR_SETBITS(CNN_CMD_MH_CONTROL,
+			MAX_BURST_LENGTH, burst);
+	val64 |= VHA_CR_SETBITS(CNN_CMD_MH_CONTROL,
+			GPU_PIPE_COHERENT, VHA_CORE_MH_GPU_PIPE_COHERENT_TYPE);
+	val64 |= VHA_CR_SETBITS(CNN_CMD_MH_CONTROL,
+			SLC_CACHE_POLICY, VHA_CORE_MH_SLC_CACHE_POLICY_TYPE);
+	val64 |= VHA_CR_SETBITS(CNN_CMD_MH_CONTROL,
+			PERSISTENCE, VHA_CORE_MH_PERSISTENCE_PRIO);
+
+	img_pdump_printf("-- CNN mem hierarchy setup CTXT_PASID:%d\n", ctx_id);
+	val64 |= VHA_CR_SETBITS(CNN_CMD_MH_CONTROL,
+			CTXT_PASID, ctx_id);
+
+	/* Note: CMD reg has different layout than IBUF,CBUF,ABUFF,OUPACK */
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_CMD_MH_CONTROL);
+
+	val64 = 0;
+	val64 |= VHA_CR_SETBITS(CNN_IBUF_MH_CONTROL,
+			MAX_BURST_LENGTH, burst);
+	val64 |= VHA_CR_SETBITS(CNN_IBUF_MH_CONTROL,
+			GPU_PIPE_COHERENT, VHA_CORE_MH_GPU_PIPE_COHERENT_TYPE);
+	val64 |= VHA_CR_SETBITS(CNN_IBUF_MH_CONTROL,
+			PERSISTENCE, VHA_CORE_MH_PERSISTENCE_PRIO);
+
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_IBUF_MH_CONTROL);
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_CBUF_MH_CONTROL);
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_ABUF_MH_CONTROL);
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_OUTPACK_MH_CONTROL);
+	IOWRITE64_PDUMP(val64, VHA_CR_CNN_ELEMENTOPS_MH_CONTROL);
+}
+
+void vha_dev_hwwdt_setup(struct vha_dev *vha, uint64_t cycles, uint64_t mode)
+{
+	if (!mode)
+		mode = VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS;
+
+	dev_dbg(vha->dev, "%s: cycles:%llx mode:%llx\n", __func__, cycles, mode);
+	/* Note: Do not pdump the main watchdog as it may trigger
+	 * during memory latency/stalling testing */
+	if (cycles) {
+		IOWRITE64(vha->reg_base, VHA_CR_CNN_WDT_COMPAREMATCH,
+			cycles & VHA_CR_CNN_WDT_COMPAREMATCH_MASKFULL);
+		IOWRITE64(vha->reg_base, VHA_CR_CNN_WDT_CTRL,
+			mode & VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_MASK);
+	} else {
+		IOWRITE64(vha->reg_base, VHA_CR_CNN_WDT_CTRL,
+			VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_NONE);
+	}
+	/* Clear timer value just for sanity */
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_WDT_TIMER, 0);
+	/* Note: We are not enabling MEM_WDT because it will not detect
+	 * issues due to the BIF/MMU or the customers memory fabric.
+	 * We could in theory enable this in customer systems,
+	 * but there is always a risk that it would result in false negatives
+	 * if there memory latency went very high temporarily.
+	 * HW team set this watchdog externally */
+#if 0
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_MEM_WDT_COMPAREMATCH, 0xfffff);
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_MEM_WDT_CTRL,
+		VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS);
+	/* Clear timer value */
+	IOWRITE64(vha->reg_base + VHA_CR_CNN_MEM_WDT_TIMER, 0);
+#endif
+}
+
+int vha_dev_hwwdt_calculate(struct vha_dev *vha, struct vha_cmd *cmd,
+		uint64_t *cycles, uint64_t *mode)
+{
+	const struct vha_user_cnn_submit_cmd *user_cmd =
+		(struct vha_user_cnn_submit_cmd *)&cmd->user_cmd;
+
+	if (!cycles || !mode)
+		return -EINVAL;
+
+	if (user_cmd && user_cmd->estimated_cycles && cnn_wdt_cycles == -1) {
+		/* allow 40%, by default, above the estimated cycle count.
+		 * Clamp at uint32_t maximum */
+		uint64_t wdt_cycles = user_cmd->estimated_cycles;
+		uint64_t margin = wdt_cycles * cnn_wdt_cycles_margin;
+
+		do_div(margin, 100UL);
+		dev_dbg(vha->dev,
+			"%s: estimated wdt cycles:%llx + margin:%llx\n",
+			__func__, wdt_cycles, margin);
+		wdt_cycles += margin;
+		if (wdt_cycles > 0xffffffff)
+			wdt_cycles = 0xffffffff;
+		/* estimated cycle is per segment */
+		*cycles = wdt_cycles;
+		*mode = VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK;
+	} else {
+		/* default value is per pass.
+		 * If default is 0 cycles it disables the watchdog */
+		*cycles = cnn_wdt_cycles;
+		*mode = VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS;
+	}
+	vha->wdt_mode = *mode;
+
+	return 0;
+}
+
+int vha_dev_prepare(struct vha_dev *vha)
+{
+	/* Nothing to do */
+	return 0;
+}
+
+void vha_dev_setup(struct vha_dev *vha)
+{
+	vha->is_ready = true;
+}
+
+void vha_dev_wait(struct vha_dev *vha)
+{
+	/* Nothing to do */
+}
+
+uint32_t vha_dev_kick_prepare(struct vha_dev *vha,
+				 struct vha_cmd *cmd, int ctx_id)
+{
+	/* write to the START bit */
+	uint32_t val = (min(2048U, cmd->stream_size)/32-1)
+		<< VHA_CR_OS(CNN_CONTROL_CMD_SIZE_MIN1_SHIFT);
+	val |= VHA_CR_OS(CNN_CONTROL_START_EN);
+
+	return val;
+}

+ 207 - 0
driver/vha/single/vha_dev_ax3.c

@@ -0,0 +1,207 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+
+#include "vha_common.h"
+#include "vha_plat.h"
+#include "vha_regs.h"
+
+static uint32_t os_priority = _OSID_;
+module_param(os_priority, uint, 0444);
+MODULE_PARM_DESC(os_priority, "Kick priority for this driver instance: <0,3>");
+
+static uint32_t prio_limits;
+module_param(prio_limits, uint, 0444);
+MODULE_PARM_DESC(prio_limits, "Priority limits. Valid for OS0 only. See TRM");
+
+static uint32_t hl_wdt_cycles = VHA_CORE_WDT_CYCLES;
+module_param(hl_wdt_cycles, uint, 0444);
+MODULE_PARM_DESC(hl_wdt_cycles, "High level watchdog cycles");
+
+static uint32_t hl_wdt_mode = 1;
+module_param(hl_wdt_mode, uint, 0444);
+MODULE_PARM_DESC(hl_wdt_mode, "High level watchdog mode: 1-pass; 2-layer group. See TRM");
+
+void vha_dev_mh_setup(struct vha_dev *vha, int ctx_id, struct vha_mh_config_regs *regs)
+{
+	uint64_t val64 = 0;
+
+	val64 |= VHA_CR_SETBITS_OS(CNN_PRELOAD_CONTROL, CBUF_N_REQS,
+				VHA_CR_CNN_PRELOAD_CTRL_N_64);
+	/* Setup preload for MMM */
+	val64 |= VHA_CR_SETBITS_OS(CNN_PRELOAD_CONTROL, MMM_RD_N_REQS, VHA_CR_CNN_PRELOAD_CTRL_N_256);
+	val64 |= VHA_CR_SETBITS_OS(CNN_PRELOAD_CONTROL, MMM_WR_N_REQS, VHA_CR_CNN_PRELOAD_CTRL_N_256);
+
+	IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_PRELOAD_CONTROL));
+}
+
+void vha_dev_hwwdt_setup(struct vha_dev *vha, uint64_t cycles, uint64_t mode)
+{
+	img_pdump_printf("-- Setup High level watchdog\n");
+	IOWRITE64_PDUMP((cycles & VHA_CR_CNN_HL_WDT_COMPAREMATCH_MASKFULL),
+			VHA_CR_CNN_HL_WDT_COMPAREMATCH);
+	IOWRITE64_PDUMP(hl_wdt_mode,
+			VHA_CR_CNN_HL_WDT_CTRL);
+	IOWRITE64_PDUMP(0, VHA_CR_CNN_HL_WDT_TIMER);
+
+	/* Setup memory watchdog */
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_MEM_WDT_COMPAREMATCH, VHA_CORE_MEM_WDT_CYCLES);
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_MEM_WDT_CTRL,
+			VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS);
+	IOWRITE64(vha->reg_base, VHA_CR_CNN_MEM_WDT_TIMER, 0);
+}
+
+int vha_dev_hwwdt_calculate(struct vha_dev *vha, struct vha_cmd *cmd,
+		uint64_t *cycles, uint64_t *mode)
+{
+	if (!cycles || !mode)
+		return -EINVAL;
+
+	return -EIO;
+}
+
+int vha_dev_prepare(struct vha_dev *vha)
+{
+	/* Enable core events */
+	img_pdump_printf("-- Enable CORE events\n");
+	IOWRITE64_PDUMP(VHA_CORE_EVNTS, VHA_CR_OS(VHA_EVENT_ENABLE));
+	img_pdump_printf("-- Clear CORE events\n");
+	IOWRITE64_PDUMP(VHA_CORE_EVNTS, VHA_CR_OS(VHA_EVENT_CLEAR));
+
+	return 0;
+}
+
+void vha_dev_setup(struct vha_dev *vha)
+{
+	uint64_t val64;
+
+	vha_dev_hwwdt_setup(vha, hl_wdt_cycles, 0);
+	if (prio_limits) {
+		img_pdump_printf("-- Set priority limits\n");
+		IOWRITE64_PDUMP(prio_limits, VHA_CR_CNN_CMD_PRIORITY_LIMITS);
+	}
+
+	img_pdump_printf("-- MMU set virtual address range0:%#llx-%#llx\n",
+			IMG_MEM_VA_HEAP1_BASE, IMG_MEM_VA_HEAP1_SIZE);
+	val64 = (uint64_t)vha->mmu_page_size <<
+			VHA_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT;
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_ONE,
+		BASE_ADDR, IMG_MEM_VA_HEAP1_BASE);
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_ONE,
+		END_ADDR, (IMG_MEM_VA_HEAP1_BASE + IMG_MEM_VA_HEAP1_SIZE));
+	IOWRITE64_PDUMP(val64, VHA_CR_MMU_PAGE_SIZE_RANGE_ONE);
+
+	img_pdump_printf("-- MMU set virtual address range1:%#llx-%#llx\n",
+			IMG_MEM_VA_HEAP2_BASE, IMG_MEM_VA_HEAP2_SIZE);
+	val64 = (uint64_t)vha->mmu_page_size <<
+			VHA_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT ;
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_TWO,
+		BASE_ADDR, IMG_MEM_VA_HEAP2_BASE);
+	val64 |= VHA_CR_ALIGN_SETBITS(MMU_PAGE_SIZE_RANGE_TWO,
+		END_ADDR, (IMG_MEM_VA_HEAP2_BASE + IMG_MEM_VA_HEAP2_SIZE));
+	IOWRITE64_PDUMP(val64, VHA_CR_MMU_PAGE_SIZE_RANGE_TWO);
+}
+
+void vha_dev_wait(struct vha_dev *vha)
+{
+	uint32_t ready_val = VHA_CR_OS(VHA_EVENT_STATUS_VHA_READY_EN);
+	uint32_t ready_mask = 0xffffffff;
+	/* Ignore PARITY when waiting for status change */
+	uint32_t status_mask = VHA_CR_OS(VHA_EVENT_STATUS_PARITY_CLRMSK);
+
+#ifdef VHA_SCF
+		if (vha->hw_props.supported.parity &&
+				!vha->parity_disable) {
+			/* If READY bit is set then parity bit must be set as well ! */
+			ready_val |= VHA_CR_OS(VHA_EVENT_STATUS_PARITY_EN);
+		}
+#else
+		/* Ignore PARITY, so that non-SCF pdump may work with SC CSIM */
+		ready_mask &= VHA_CR_OS(VHA_EVENT_STATUS_PARITY_CLRMSK);
+#endif
+
+	/* Wait for READY interrupt as well
+	 * pdump POL for any status flag:
+	 * count=100, delay=100cycles
+	 */
+	img_pdump_printf("-- Wait for any CORE status change\n"
+			"POL :REG:%#x 0 %#x 3 1000 1000\n",
+			 VHA_CR_OS(VHA_EVENT_STATUS), status_mask);
+
+	/* quick pdump POL for the status READY flag only:
+	 * count=1, delay=10cycles
+	 */
+	img_pdump_printf("-- Check for READY flag only\n"
+			"POL :REG:%#x %#x %#x 0 1 10\n",
+			 VHA_CR_OS(VHA_EVENT_STATUS),
+			 ready_val, ready_mask);
+	/* We do clear interrupts in the irq handler,
+	 * but this is not recorded into pdump because
+	 * of the irq context, so do it here */
+	img_pdump_printf("-- Clear CORE events\n"
+			"WRW64 :REG:%#x %#x\n",
+			 VHA_CR_OS(VHA_EVENT_CLEAR),
+			 VHA_CR_OS(VHA_EVENT_CLEAR_VHA_READY_EN) |
+			 VHA_CR_OS(VHA_EVENT_CLEAR_VHA_ERROR_EN) |
+			 VHA_CR_OS(VHA_EVENT_CLEAR_VHA_HL_WDT_EN));
+}
+
+uint32_t vha_dev_kick_prepare(struct vha_dev *vha,
+				 struct vha_cmd *cmd, int ctx_id)
+{
+	/* write to the START bit */
+	uint32_t val32 = (min(2048U, cmd->stream_size)/32-1)
+		<< VHA_CR_OS(CNN_CONTROL_CMD_SIZE_MIN1_SHIFT);
+	val32 |= VHA_CR_OS(CNN_CONTROL_START_EN);
+
+	/* This is odd, hw uses two contexts, we provide the base one,
+	 * but the other is always used in pair */
+	img_pdump_printf("-- CNN setup CTXT_PASID:%d PRIO:%d\n",
+			ctx_id, os_priority);
+	val32 |= VHA_CR_SETBITS_OS(CNN_CONTROL,
+			CTXT_PASID, ctx_id);
+	val32 |= VHA_CR_SETBITS_OS(CNN_CONTROL,
+			PRIORITY, os_priority);
+
+	return val32;
+}

+ 241 - 0
driver/vha/single/vha_mmu.c

@@ -0,0 +1,241 @@
+/*
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include <uapi/vha.h>
+#include "vha_common.h"
+#include "vha_plat.h"
+#include <vha_regs.h>
+
+static void mmu_flush(const struct device *dev,
+		struct vha_dev *vha, int ctx_id)
+{
+	uint64_t inval = VHA_CR_OS(MMU_CTRL_INVAL_PC_EN) |
+				VHA_CR_OS(MMU_CTRL_INVAL_PD_EN) |
+				VHA_CR_OS(MMU_CTRL_INVAL_PT_EN);
+
+	/* No need to handle mmu cache, when core is already offline */
+	if (vha->state == VHA_STATE_OFF)
+		return;
+
+#if defined(HW_AX3)
+	{
+		uint64_t pend = VHA_CR_OS(MMU_CTRL_INVAL_STATUS_PENDING_EN);
+#ifdef VHA_SCF
+		if (vha->hw_props.supported.parity &&
+				!vha->parity_disable) {
+			/* If pending bit is set then parity bit must be set as well ! */
+			pend |= VHA_CR_OS(MMU_CTRL_INVAL_STATUS_PARITY_EN);
+		}
+#endif
+		IOPOLL64_PDUMP(0, 20, 150, pend, VHA_CR_OS(MMU_CTRL_INVAL_STATUS));
+	}
+#endif
+
+	if (unlikely(ctx_id == VHA_INVALID_ID))
+		inval |= VHA_CR_OS(MMU_CTRL_INVAL_ALL_CONTEXTS_EN);
+	else {
+		inval |= ctx_id << VHA_CR_OS(MMU_CTRL_INVAL_CONTEXT_SHIFT);
+	}
+	dev_dbg(dev, "%s: ctx_id:%d (0x%llx)\n", __func__, ctx_id, inval);
+
+	img_pdump_printf("-- MMU invalidate TLB caches\n");
+	IOWRITE64_PDUMP(inval, VHA_CR_OS(MMU_CTRL_INVAL));
+}
+
+/* this function is called from img_mmu, to handle cache issues */
+int vha_mmu_callback(enum img_mmu_callback_type callback_type,
+			int buf_id, void *data)
+{
+	struct vha_session *session = data;
+	struct vha_dev *vha = session->vha;
+	int ctx_id;
+	int ret = 0;
+	if (!vha)
+		return 0;
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
+		mmu_flush(vha->dev, vha, session->mmu_ctxs[ctx_id].hw_id);
+#if defined(VHA_MMU_MIRRORED_CTX_SUPPORT) && defined(HW_AX3)
+	{
+		/* Need to flush auxilary hw context */
+		int hw_id = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id +
+				VHA_MMU_AUX_HW_CTX_SHIFT;
+		mmu_flush(vha->dev, vha, hw_id);
+	}
+#endif
+	return ret;
+}
+
+static void do_mmu_ctx_setup(struct vha_dev *vha,
+			uint8_t hw_id, int pc_bufid, uint32_t pc_baddr)
+{
+	img_pdump_printf("-- Setup MMU context:%d\n", hw_id);
+	IOWRITE64_PDUMP(hw_id, VHA_CR_OS(MMU_CBASE_MAPPING_CONTEXT));
+
+	if (!vha->mmu_base_pf_test) {
+		IOWRITE64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING), pc_baddr);
+
+		/* This is physical address so we need use MEM_OS0:BLOCK tag
+		 * when pdump'ing. */
+		img_pdump_printf("-- Setup MMU base address\n"
+				"WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:0 -- 'PC'\n"
+				"SHR "_PMEM_":$0 "_PMEM_":$0 %d\n"
+				"WRW64 :REG:%#x "_PMEM_":$0\n", pc_bufid,
+				IMG_MMU_PC_ADDR_SHIFT,
+				VHA_CR_OS(MMU_CBASE_MAPPING));
+		dev_dbg(vha->dev, "%s: setting hardware ctx id:%u\n", __func__, hw_id);
+	} else
+		dev_info(vha->dev, "Bringup test: force MMU base page fault\n");
+}
+
+int vha_mmu_setup(struct vha_session *session)
+{
+	struct vha_dev *vha = session->vha;
+	int ctx_id;
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
+		dev_dbg(vha->dev,
+				"%s: mode:%d session ctxid:%x active ctxid:%x\n",
+				__func__, vha->mmu_mode,
+				session->mmu_ctxs[ctx_id].id,
+				vha->active_mmu_ctx);
+
+
+	if (vha->mmu_mode == VHA_MMU_DISABLED) {
+		img_pdump_printf("-- MMU bypass ON\n");
+		IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
+			VHA_CR_OS(MMU_CTRL));
+		return 0;
+	}
+
+	/* Using model context to track active context */
+	if (session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id == vha->active_mmu_ctx)
+		return 0;
+
+	img_pdump_printf("-- MMU_SETUP_BEGIN\n");
+	img_pdump_printf("-- MMU bypass OFF\n");
+	IOWRITE64_PDUMP(0, VHA_CR_OS(MMU_CTRL));
+
+	for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
+		do_mmu_ctx_setup(vha, session->mmu_ctxs[ctx_id].hw_id,
+				session->mmu_ctxs[ctx_id].pc_bufid,
+				session->mmu_ctxs[ctx_id].pc_baddr);
+		/* If there are multiple sessions using the same mmu hardware context
+		 * we need to flush caches for the old context (id is the same).
+		 * This will happen when number of processes is > VHA_MMU_MAX_HW_CTXS */
+		if (vha->mmu_ctxs[session->mmu_ctxs[ctx_id].hw_id] > 1)
+			mmu_flush(vha->dev, vha, session->mmu_ctxs[ctx_id].hw_id);
+	}
+#if defined(VHA_MMU_MIRRORED_CTX_SUPPORT) && defined(HW_AX3)
+	{
+		/* Need to program auxilary hw context to
+		 * point the same page tables as base context */
+		int hw_id = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id +
+				VHA_MMU_AUX_HW_CTX_SHIFT;
+		do_mmu_ctx_setup(vha, hw_id,
+				session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_bufid,
+				session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_baddr);
+		if (vha->mmu_ctxs[session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1)
+			mmu_flush(vha->dev, vha, hw_id);
+	}
+#endif
+
+	/* Using model context to track context change */
+	vha->active_mmu_ctx = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id;
+	dev_dbg(vha->dev, "%s: update ctx id active:%x pc:%#x\n",
+			__func__, vha->active_mmu_ctx,
+			session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_baddr <<
+			VHA_CR_OS(MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT));
+
+	img_pdump_printf("-- MMU_SETUP_END\n");
+
+	return 0;
+}
+
+void vha_mmu_status(struct vha_dev *vha)
+{
+	const char levels[][5] = {"PT", "PD", "PC", "BASE"};
+
+	uint64_t status1 = IOREAD64(vha->reg_base,
+		VHA_CR_OS(MMU_FAULT_STATUS1));
+	uint64_t status2 = IOREAD64(vha->reg_base,
+		VHA_CR_OS(MMU_FAULT_STATUS2));
+
+	uint64_t addr = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, ADDRESS, status1);
+	uint8_t level = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, LEVEL, status1);
+	uint8_t req_id = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, REQ_ID, status1);
+	uint8_t ctx = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, CONTEXT, status1);
+	uint8_t rnw = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, RNW, status1);
+	uint8_t type = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, TYPE, status1);
+	uint8_t fault = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, FAULT, status1);
+
+	uint8_t bif_id = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, BIF_ID, status2);
+	uint8_t tlb_entry = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, TLB_ENTRY, status2);
+	uint8_t slc_bank = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, BANK, status2);
+	uint64_t mapping = 0;
+
+	/* Select context and read current pc */
+	IOWRITE64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING_CONTEXT), ctx);
+	mapping = IOREAD64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING));
+
+	/* false alarm ? */
+	if (!fault)
+		return;
+
+	dev_dbg(vha->dev, "%s: MMU FAULT: s1:%llx s2:%llx\n",
+			__func__, status1, status2);
+
+	dev_warn(vha->dev, "%s: MMU fault while %s @ 0x%llx\n",
+			__func__, (rnw) ? "reading" : "writing", addr << 4);
+	dev_warn(vha->dev, "%s: level:%s Requestor:%x Context:%x Type:%s\n",
+			__func__, levels[level], req_id, ctx,
+			(type == 0) ? "VALID" :
+			(type == 2) ? "READ-ONLY" :
+			"UNKNOWN");
+	dev_warn(vha->dev, "%s: bif_id:%x tlb_entry:%x slc_bank:%x\n",
+			__func__, bif_id, tlb_entry, slc_bank);
+	dev_warn(vha->dev, "%s: current mapping@context%d:%#llx\n",
+			__func__, ctx,
+			mapping <<
+			VHA_CR_OS(MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT));
+}

+ 191 - 0
driver/vha/single/vha_regs.h

@@ -0,0 +1,191 @@
+/*!
+ *****************************************************************************
+ * Copyright (c) Imagination Technologies Ltd.
+ *
+ * The contents of this file are subject to the MIT license as set out below.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of the
+ * GNU General Public License Version 2 ("GPL")in which case the provisions of
+ * GPL are applicable instead of those above.
+ *
+ * If you wish to allow use of your version of this file only under the terms
+ * of GPL, and not to allow others to use your version of this file under the
+ * terms of the MIT license, indicate your decision by deleting the provisions
+ * above and replace them with the notice and other provisions required by GPL
+ * as set out in the file called "GPLHEADER" included in this distribution. If
+ * you do not delete the provisions above, a recipient may use your version of
+ * this file under the terms of either the MIT license or GPL.
+ *
+ * This License is also included in this distribution in the file called
+ * "MIT_COPYING".
+ *
+ *****************************************************************************/
+#include "../vha_io.h"
+
+#if defined(HW_AX2)
+#include <hwdefs/vha_cr_mirage.h>
+#elif defined(HW_AX3)
+#include <hwdefs/vha_cr_aura.h>
+#else
+#error "No HW layout defined"
+#endif
+
+#if defined(CFG_SYS_MAGNA)
+#include <hwdefs/magna_system.h>
+#elif defined(CFG_SYS_VAGUS)
+#include <hwdefs/vagus_system.h>
+#elif defined(CFG_SYS_AURA)
+#include <hwdefs/aura_system.h>
+#elif defined(CFG_SYS_MIRAGE)
+#include <hwdefs/mirage_system.h>
+#endif
+
+/* HW Series AURA or MIRAGE */
+#if defined(HW_AX2)
+#define HW_SERIES (23U)
+#elif defined(HW_AX3)
+#if defined(CONFIG_VHA_NEXEF)
+/* 3NX-F use a different B. value */
+#define HW_SERIES (32U)
+#else
+#define HW_SERIES (28U)
+#endif
+#else
+#error "No HW Series defined"
+#endif
+
+/* Events macros definition */
+#define VHA_EVENT_TYPE(name) \
+		VHA_CR_VHA_EVENT_TYPE_VHA_##name##_EN
+
+#if defined(HW_AX2)
+#define VHA_CNN_ERR_EVNTS (VHA_EVENT_TYPE(CNN0_ERROR) |\
+			VHA_EVENT_TYPE(CNN0_MEM_WDT) |\
+			VHA_EVENT_TYPE(CNN0_WDT))
+
+#define VHA_CORE_EVNTS (VHA_EVENT_TYPE(MMU_PAGE_FAULT) |\
+			VHA_EVENT_TYPE(AXI_ERROR))
+#elif defined(HW_AX3)
+#define VHA_CNN_ERR_EVNTS (VHA_EVENT_TYPE(CNN0_ERROR) |\
+			VHA_EVENT_TYPE(CNN0_MEM_WDT))
+
+#ifdef VHA_SCF
+#define VHA_CORE_EVNTS ( \
+			VHA_EVENT_TYPE(MMU_PARITY_ERROR) |\
+			VHA_EVENT_TYPE(PARITY_ERROR) |\
+			VHA_EVENT_TYPE(LOCKSTEP_ERROR) |\
+			VHA_EVENT_TYPE(READY) |\
+			VHA_EVENT_TYPE(ERROR) |\
+			VHA_EVENT_TYPE(HL_WDT) |\
+			VHA_EVENT_TYPE(MMU_PAGE_FAULT) |\
+			VHA_EVENT_TYPE(AXI_ERROR))
+#else  /*!VHA_SCF */
+#define VHA_CORE_EVNTS ( \
+			VHA_EVENT_TYPE(READY) |\
+			VHA_EVENT_TYPE(ERROR) |\
+			VHA_EVENT_TYPE(HL_WDT) |\
+			VHA_EVENT_TYPE(MMU_PAGE_FAULT) |\
+			VHA_EVENT_TYPE(AXI_ERROR))
+#endif /* VHA_SCF */
+#endif  /* HW_AX3 */
+
+/* ignore bottom 4 bits of CONFIG_ID: they identify different build variants */
+#define VHA_CR_CORE_ID_BVNC_CLRMSK (0xfffffffffffffff0ULL)
+#define VHA_CNN_CMPLT_EVNT (VHA_EVENT_TYPE(CNN0_COMPLETE))
+#define VHA_CNN_EVNTS (VHA_CNN_ERR_EVNTS | VHA_CNN_CMPLT_EVNT)
+
+#define VHA_EVNTS_DEFAULT ( ( \
+		VHA_CNN_EVNTS | VHA_CORE_EVNTS \
+		) & VHA_CR_OS(VHA_EVENT_ENABLE_MASKFULL))
+
+#define VHA_SYS_CLOCK_MODE(name, mode) \
+		VHA_CR_SYS_CLK_CTRL0_##name##_##mode \
+
+#define VHA_SYS_CLOCKS_DEFAULT(mode) ( (\
+			VHA_SYS_CLOCK_MODE(SLC, mode) \
+			) & VHA_CR_SYS_CLK_CTRL0_MASKFULL)
+
+/* Clocks macros definition */
+#define VHA_MAIN_CLOCK_MODE(name, mode) \
+		VHA_CR_CLK_CTRL0_##name##_##mode \
+
+#if defined(HW_AX2)
+#define VHA_MAIN_CLOCKS_DEFAULT(mode) ( (\
+			VHA_MAIN_CLOCK_MODE(CNN_EWO, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_PACK, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_OIN, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_POOL, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_SB, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_XBAR, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_NORM, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACT, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACCUM, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CNV, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CBUF, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_IBUF, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CMD, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN, mode) | \
+			VHA_MAIN_CLOCK_MODE(SLC, mode) | \
+			VHA_MAIN_CLOCK_MODE(BIF, mode) \
+			) & VHA_CR_CLK_CTRL0_MASKFULL)
+#elif defined(HW_AX3)
+#define VHA_MAIN_CLOCKS_DEFAULT(mode) ( (\
+			VHA_MAIN_CLOCK_MODE(CNN_CORE_XBAR, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_MMM, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_EWO, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_PACK, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_OIN, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_POOL, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_SB, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_NORM, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACT, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_ACCUM, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CNV, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CBUF, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_IBUF, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_CMD, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_TRS_A, mode) | \
+			VHA_MAIN_CLOCK_MODE(CNN_TRS_B, mode) | \
+			VHA_MAIN_CLOCK_MODE(SLC, mode) | \
+			VHA_MAIN_CLOCK_MODE(BIF, mode) \
+			) & VHA_CR_CLK_CTRL0_MASKFULL)
+#endif
+
+/* Reset macros definition */
+#define VHA_RESET_EN(name) \
+		VHA_CR_RESET_CTRL_VHA_##name##_EN
+
+#define VHA_RESET_DEFAULT ( ( \
+			VHA_RESET_EN(SYS_SOFT_RESET) | \
+			VHA_RESET_EN(AXI_SOFT_RESET) | \
+			VHA_RESET_EN(CNN0_SOFT_RESET) | \
+			VHA_RESET_EN(SLC_SOFT_RESET) | \
+			VHA_RESET_EN(BIF_SOFT_RESET) | \
+			VHA_RESET_EN(SOFT_RESET) \
+			) & VHA_CR_RESET_CTRL_MASKFULL)
+
+/* NN_SYS register macros */
+#define NN_SYS_CR_BASE \
+		(_REG_NNSYS_START)
+
+#define NN_SYS_CR(reg) \
+		(_REG_NNSYS_START + NN_SYS_CR_##reg)

Some files were not shown because too many files changed in this diff