Browse Source

Linux_SDK_V1.0.3

thead_admin 1 year ago
parent
commit
77ba9cc0a2
73 changed files with 37668 additions and 70 deletions
  1. 148 0
      Makefile
  2. 0 36
      README.en.md
  3. 9 34
      README.md
  4. 49 0
      addons/run_face_detect.sh
  5. 49 0
      addons/run_face_detect_physical.sh
  6. 50 0
      addons/run_hhb_dw_2fd_src_test.sh
  7. 46 0
      addons/run_hhb_dw_fd_src_test.sh
  8. 46 0
      addons/run_hhb_dw_src_test.sh
  9. 49 0
      addons/run_hhb_g2d_sink_test.sh
  10. 49 0
      addons/run_hhb_npu_fd_sink_src_test.sh
  11. 50 0
      addons/run_hhb_npu_fd_sink_test.sh
  12. 49 0
      addons/run_hhb_npu_sink_src_test.sh
  13. 50 0
      addons/run_hhb_npu_sink_test.sh
  14. 48 0
      addons/run_hhb_npu_src_test.sh
  15. 9 0
      addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_2fd_stirde.sh
  16. 8 0
      addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_fd_stirde.sh
  17. 8 0
      addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_physical_stride.sh
  18. 9 0
      addons/run_scenario_plink_PseudoDW_NPU_face_detect_2fd_stirde.sh
  19. 8 0
      addons/run_scenario_plink_PseudoDW_NPU_face_detect_fd_stirde.sh
  20. 8 0
      addons/run_scenario_plink_PseudoDW_NPU_face_detect_physical_stride.sh
  21. BIN
      input/install/face_detect/data.0.bin
  22. BIN
      input/install/face_detect/fish_304_padded.bgr
  23. BIN
      input/install/face_detect/horse_304_padded.bgr
  24. 1075 0
      lib/install_nn2/include/csi_nn.h
  25. 1013 0
      lib/install_nn2/include/csinn_data_structure.h
  26. 87 0
      lib/install_nn2/include/csinn_runtime.h
  27. 40 0
      lib/install_nn2/include/shl_asp.h
  28. 30 0
      lib/install_nn2/include/shl_c860.h
  29. 519 0
      lib/install_nn2/include/shl_c906.h
  30. 348 0
      lib/install_nn2/include/shl_c908.h
  31. 293 0
      lib/install_nn2/include/shl_debug.h
  32. 82 0
      lib/install_nn2/include/shl_e804.h
  33. 609 0
      lib/install_nn2/include/shl_gref.h
  34. 144 0
      lib/install_nn2/include/shl_i805.h
  35. 33 0
      lib/install_nn2/include/shl_memory.h
  36. 55 0
      lib/install_nn2/include/shl_node.h
  37. 389 0
      lib/install_nn2/include/shl_ovx.h
  38. 272 0
      lib/install_nn2/include/shl_pnna.h
  39. 1206 0
      lib/install_nn2/include/shl_ref.h
  40. 77 0
      lib/install_nn2/include/shl_ref_i805.h
  41. 732 0
      lib/install_nn2/include/shl_thead_rvv.h
  42. 97 0
      lib/install_nn2/include/shl_utils.h
  43. BIN
      lib/install_nn2/lib/libshl_pnna.so
  44. 1 0
      lib/install_nn2/version
  45. 154 0
      lib/plink/include/process_linker.h
  46. 164 0
      lib/plink/include/process_linker_types.h
  47. BIN
      lib/plink/lib/libplink.so
  48. 56 0
      lib/vmem/include/video_mem.h
  49. BIN
      lib/vmem/lib/libvmem.so
  50. BIN
      model/install/face_detect/shl.hhb.bm
  51. BIN
      model/install/face_detect/shl.hhb.fd.bm
  52. 2046 0
      test/.debug
  53. BIN
      test/face_detect/004545.jpg
  54. 84 0
      test/face_detect/Makefile
  55. 247 0
      test/face_detect/detect.cpp
  56. 49 0
      test/face_detect/detect.h
  57. 506 0
      test/face_detect/dw_2fd_src_test.c
  58. 500 0
      test/face_detect/dw_fd_src_test.c
  59. 500 0
      test/face_detect/dw_src_test.c
  60. 159 0
      test/face_detect/g2d_sink_test.c
  61. BIN
      test/face_detect/graph_info.bin
  62. 220 0
      test/face_detect/io.c
  63. 47 0
      test/face_detect/io.h
  64. 218 0
      test/face_detect/main.c
  65. 271 0
      test/face_detect/main_det.c
  66. 3803 0
      test/face_detect/model.c
  67. BIN
      test/face_detect/model.params
  68. 3803 0
      test/face_detect/model_fd.c
  69. 622 0
      test/face_detect/npu_sink_src_test.c
  70. 550 0
      test/face_detect/npu_sink_test.c
  71. 15338 0
      test/face_detect/output_120_out0_nchw_1_2_7668_1.h
  72. 432 0
      test/face_detect/process.c
  73. 55 0
      test/face_detect/process.h

+ 148 - 0
Makefile

@@ -0,0 +1,148 @@
+##
+ # Copyright (C) 2020 Alibaba Group Holding Limited
+##
+test = $(shell if [ -f "../.param" ]; then echo "exist"; else echo "noexist"; fi)
+ifeq ("$(test)", "exist")
+  include ../.param
+endif
+
+SDK_VER=v0.9
+
+CONFIG_DEBUG_MODE=1
+
+CONFIG_BUILD_LIB_EXTRA_PARAM:=""
+CONFIG_BUILD_TST_EXTRA_PARAM:=""
+
+ifeq ("$(BUILD_SYSTEM)","YOCTO_BUILD")
+  export PATH_TO_SYSROOT=${SYSROOT_DIR}
+  export TOOLSCHAIN_PATH=${TOOLCHAIN_DIR}
+  export TOOLCHAIN_HOST=${CROSS_COMPILE}
+else
+  export PATH_TO_SYSROOT=${BUILDROOT_DIR}/output/host/riscv64-buildroot-linux-gnu/sysroot
+  export TOOLSCHAIN_PATH=${BUILDROOT_DIR}/output/host
+  export TOOLCHAIN_HOST=${TOOLSCHAIN_PATH}/bin/riscv64-unknown-linux-gnu-
+endif
+export PATH_TO_BUILDROOT=$(BUILDROOT_DIR)
+
+# Board
+CONFIG_BOARD_LIGHT_C910_ARRAY:=light_fpga_fm_c910 light-fm-fpga light-fm light-%
+
+ifneq ($(filter $(CONFIG_BOARD_LIGHT_C910_ARRAY),$(BOARD_NAME)),)
+  TARGET_DEMO="plink_AI"
+else
+  $(error "Undefined target board:$(BOARD_NAME)")
+endif
+
+
+DIR_TARGET_LIB=lib
+DIR_TARGET_TEST=test
+DIR_TARGET_MODEL=model
+DIR_TARGET_INPUT=input
+
+BUILD_LOG_START="\033[47;30m>>> $(TARGET_DEMO) $@ begin\033[0m"
+BUILD_LOG_END  ="\033[47;30m<<< $(TARGET_DEMO) $@ end\033[0m"
+
+#
+# Do a parallel build with multiple jobs, based on the number of CPUs online
+# in this system: 'make -j8' on a 8-CPU system, etc.
+#
+# (To override it, run 'make JOBS=1' and similar.)
+#
+ifeq ($(JOBS),)
+  JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null)
+  ifeq ($(JOBS),)
+    JOBS := 1
+  endif
+endif
+
+all:    info lib test install_local_output install_rootfs install_tests
+.PHONY: info lib test install_local_output install_rootfs \
+        install_prepare install_addons install_tests install_lib clean_test clean_lib clean_output clean
+
+info:
+	@echo $(BUILD_LOG_START)
+	@echo "  ====== Build Info from repo project ======"
+	@echo "    BUILD_SYSTEM="$(BUILD_SYSTEM)
+	@echo "    BUILDROOT_DIR="$(BUILDROOT_DIR)
+	@echo "    SYSROOT_DIR="$(SYSROOT_DIR)
+	@echo "    CROSS_COMPILE="$(CROSS_COMPILE)
+	@echo "    LINUX_DIR="$(LINUX_DIR)
+	@echo "    ARCH="$(ARCH)
+	@echo "    KBUILD_CFLAGS="$(KBUILD_CFLAGS)
+	@echo "    KBUILD_AFLAGS="$(KBUILD_AFLAGS)
+	@echo "    KBUILD_LDFLAGS="$(KBUILD_LDFLAGS)
+	@echo "    BOARD_NAME="$(BOARD_NAME)
+	@echo "    KERNEL_ID="$(KERNELVERSION)
+	@echo "    KERNEL_DIR="$(LINUX_DIR)
+	@echo "    CC="$(CC)
+	@echo "    CXX="$(CXX)
+	@echo "    LD="$(LD)
+	@echo "    LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)
+	@echo "    rpath="$(rpath)
+	@echo "    rpath-link="$(rpath-link)
+	@echo "    INSTALL_DIR_ROOTFS="$(INSTALL_DIR_ROOTFS)
+	@echo "    INSTALL_DIR_SDK="$(INSTALL_DIR_SDK)
+	@echo "  ====== Build configuration by settings ======"
+	@echo "    TARGET_DEMO="$(TARGET_DEMO)
+	@echo "    CONFIG_DEBUG_MODE="$(CONFIG_DEBUG_MODE)
+	@echo "    CONFIG_OUT_ENV="$(CONFIG_OUT_ENV)
+	@echo "    JOBS="$(JOBS)
+	@echo "    SDK_VERSION="$(SDK_VER)
+	@echo $(BUILD_LOG_END)
+
+test:
+	@echo $(BUILD_LOG_START)
+	make -C test/face_detect ARCH=$(ARCH) CROSS=$(CROSS_COMPILE)
+	@echo $(BUILD_LOG_END)
+
+clean_test:
+	@echo $(BUILD_LOG_START)
+	make -C test/face_detect clean
+	@echo $(BUILD_LOG_END)
+
+install_prepare:
+	mkdir -p ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_TEST)
+	mkdir -p ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_INPUT)
+	mkdir -p ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_MODEL)
+
+install_addons: install_prepare
+	@if [ -d addons ]; then                                 \
+	    cp -rf addons/* ./output/rootfs/$(TARGET_DEMO);                    \
+	fi
+
+install_tests: install_addons test
+	@if [ -d test/output ]; then                                 \
+	    cp -rf test/output/* ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_TEST); \
+	fi
+
+install_input: install_prepare
+	@if [ -d input/install ]; then                                 \
+	    cp -rf input/install/* ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_INPUT); \
+	fi
+
+install_model: install_prepare
+	@if [ -d model/install ]; then                                 \
+	    cp -rf model/install/* ./output/rootfs/$(TARGET_DEMO)/$(DIR_TARGET_MODEL); \
+	fi
+
+install_lib: ;
+
+install_local_output: test \
+					install_addons install_lib install_tests install_input install_model
+	@echo $(BUILD_LOG_START)
+	@if [ `command -v tree` != "" ]; then \
+	    tree ./output/rootfs;             \
+	fi
+	@echo $(BUILD_LOG_END)
+
+install_rootfs: install_local_output
+	@echo $(BUILD_LOG_START)
+	@echo $(BUILD_LOG_END)
+
+clean_output:
+	@echo $(BUILD_LOG_START)
+	rm -rf ./output
+	@echo $(BUILD_LOG_END)
+
+clean: clean_output clean_test clean_lib
+

+ 0 - 36
README.en.md

@@ -1,36 +0,0 @@
-# plink_AI
-
-#### Description
-AI 链路 demo 源码目录
-
-#### Software Architecture
-Software architecture description
-
-#### Installation
-
-1.  xxxx
-2.  xxxx
-3.  xxxx
-
-#### Instructions
-
-1.  xxxx
-2.  xxxx
-3.  xxxx
-
-#### Contribution
-
-1.  Fork the repository
-2.  Create Feat_xxx branch
-3.  Commit your code
-4.  Create Pull Request
-
-
-#### Gitee Feature
-
-1.  You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md
-2.  Gitee blog [blog.gitee.com](https://blog.gitee.com)
-3.  Explore open source project [https://gitee.com/explore](https://gitee.com/explore)
-4.  The most valuable open source project [GVP](https://gitee.com/gvp)
-5.  The manual of Gitee [https://gitee.com/help](https://gitee.com/help)
-6.  The most popular members  [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)

+ 9 - 34
README.md

@@ -1,37 +1,12 @@
-# plink_AI
+# Comments
+- plink-AI is a test demo used to implement multi-media application with AI feature.
 
-#### 介绍
-AI 链路 demo 源码目录
+# How to get the code
+- git clone git@gitlab.alibaba-inc.com:thead-linux-private/plink-AI.git
 
-#### 软件架构
-软件架构说明
+# Description of each directories
+- test/: Test cases and demo app.
 
-
-#### 安装教程
-
-1.  xxxx
-2.  xxxx
-3.  xxxx
-
-#### 使用说明
-
-1.  xxxx
-2.  xxxx
-3.  xxxx
-
-#### 参与贡献
-
-1.  Fork 本仓库
-2.  新建 Feat_xxx 分支
-3.  提交代码
-4.  新建 Pull Request
-
-
-#### 特技
-
-1.  使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
-2.  Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
-3.  你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
-4.  [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
-5.  Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
-6.  Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
+# How to build
+  $ make clean
+  $ make

+ 49 - 0
addons/run_face_detect.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/face_detect ${MODEL_PATH}/${MODEL}/shl.hhb.fd.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 49 - 0
addons/run_face_detect_physical.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/face_detect ${MODEL_PATH}/${MODEL}/shl.hhb.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 50 - 0
addons/run_hhb_dw_2fd_src_test.sh

@@ -0,0 +1,50 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+IN_FILE2=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:s:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    s)
+    IN_FILE2=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "dw_src_test " "MODEL="${MODEL}, "IN_FILE="${IN_FILE} "IN_FILE2="${IN_FILE2} "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+${TEST_PATH}/dw_2fd_src_test ${INPUT_PATH}/${MODEL}/${IN_FILE} ${INPUT_PATH}/${MODEL}/${IN_FILE2}
+

+ 46 - 0
addons/run_hhb_dw_fd_src_test.sh

@@ -0,0 +1,46 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "dw_src_test " "MODEL="${MODEL}, "IN_FILE="${IN_FILE} "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+${TEST_PATH}/dw_fd_src_test ${INPUT_PATH}/${MODEL}/${IN_FILE}
+

+ 46 - 0
addons/run_hhb_dw_src_test.sh

@@ -0,0 +1,46 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "dw_src_test " "MODEL="${MODEL}, "IN_FILE="${IN_FILE} "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+${TEST_PATH}/dw_src_test ${INPUT_PATH}/${MODEL}/${IN_FILE}
+

+ 49 - 0
addons/run_hhb_g2d_sink_test.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf /dev/shm/npu_g2d
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/g2d_sink_test ${MODEL_PATH}/${MODEL}/shl.hhb.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 49 - 0
addons/run_hhb_npu_fd_sink_src_test.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/npu_fd_sink_src_test ${MODEL_PATH}/${MODEL}/shl.hhb.fd.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 50 - 0
addons/run_hhb_npu_fd_sink_test.sh

@@ -0,0 +1,50 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "npu_sink_test " "MODEL="${MODEL}, "IN_FILE="${IN_FILE} "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+#rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/npu_fd_sink_test ${MODEL_PATH}/${MODEL}/shl.hhb.fd.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 49 - 0
addons/run_hhb_npu_sink_src_test.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/npu_sink_src_test ${MODEL_PATH}/${MODEL}/shl.hhb.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 50 - 0
addons/run_hhb_npu_sink_test.sh

@@ -0,0 +1,50 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "npu_sink_test " "MODEL="${MODEL}, "IN_FILE="${IN_FILE} "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+#rm -rf /dev/shm/ispnpu
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/npu_sink_test ${MODEL_PATH}/${MODEL}/shl.hhb.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 48 - 0
addons/run_hhb_npu_src_test.sh

@@ -0,0 +1,48 @@
+#!/bin/sh
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+OUT_PATH=${TEST_PATH}/out
+
+REPEAT_COUNT=1
+TIMEOUT=1000 # // 1000ms
+TASK_COUNT=1
+MODEL=face_detect
+OCM_SIZE=OCM_1M
+IN_FILE=data.0.bin
+TOP_NUM=0
+echo "Usage: ./run_hhb_rgb_test.sh [-m model_name] [-r repeat_cnt]"
+echo "For example: ./run_hhb_rgb_test.sh -m mobilenetv1_rgb"
+while getopts ":r:t:m:o:i:p:" opt
+do
+  case $opt in
+    r)
+    REPEAT_COUNT=$OPTARG
+    ;;
+    t)
+    TIMEOUT=$OPTARG
+    ;;
+    m)
+    MODEL=$OPTARG
+    ;;
+    o)
+    OCM_SIZE=$OPTARG
+    ;;
+    i)
+    IN_FILE=$OPTARG
+    ;;
+    p)
+    TOP_NUM=$OPTARG
+    ;;
+    ?)
+    echo "unsupport parameter!!!"
+    exit 1;;
+  esac
+done
+echo "MODEL="${MODEL}, "OCM="${OCM_SIZE}, "REPEAT_COUNT="${REPEAT_COUNT}
+
+rm -rf ${OUT_PATH}/${MODEL}/${OCM_SIZE}; mkdir -p ${OUT_PATH}/${MODEL}/${OCM_SIZE}
+
+${TEST_PATH}/npu_src_test ${MODEL_PATH}/${MODEL}/shl.hhb.bm \
+  ${INPUT_PATH}/${MODEL}/${IN_FILE}

+ 9 - 0
addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_2fd_stirde.sh

@@ -0,0 +1,9 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+IN_FILE2=fish_304_padded.bgr
+./run_hhb_dw_2fd_src_test.sh -m ${MODEL} -i ${IN_FILE} -s ${IN_FILE2} & sleep 5; ./run_hhb_npu_fd_sink_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_g2d_sink_test.sh -m ${MODEL} -i ${IN_FILE}

+ 8 - 0
addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_fd_stirde.sh

@@ -0,0 +1,8 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+./run_hhb_dw_fd_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_npu_fd_sink_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_g2d_sink_test.sh -m ${MODEL} -i ${IN_FILE}

+ 8 - 0
addons/run_scenario_plink_PseudoDW_NPU_PseudoG2D_face_detect_physical_stride.sh

@@ -0,0 +1,8 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+./run_hhb_dw_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_npu_sink_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_g2d_sink_test.sh -m ${MODEL} -i ${IN_FILE}

+ 9 - 0
addons/run_scenario_plink_PseudoDW_NPU_face_detect_2fd_stirde.sh

@@ -0,0 +1,9 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+IN_FILE2=fish_304_padded.bgr
+./run_hhb_dw_2fd_src_test.sh -m ${MODEL} -i ${IN_FILE} -s ${IN_FILE2} & sleep 5; ./run_hhb_npu_fd_sink_test.sh -m ${MODEL} -i ${IN_FILE}

+ 8 - 0
addons/run_scenario_plink_PseudoDW_NPU_face_detect_fd_stirde.sh

@@ -0,0 +1,8 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+./run_hhb_dw_fd_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_npu_fd_sink_test.sh -m ${MODEL} -i ${IN_FILE}

+ 8 - 0
addons/run_scenario_plink_PseudoDW_NPU_face_detect_physical_stride.sh

@@ -0,0 +1,8 @@
+ps | grep hhb |  awk '{print $2}' | xargs kill -9
+BASE_PATH=.
+TEST_PATH=${BASE_PATH}/test
+MODEL_PATH=${BASE_PATH}/model
+INPUT_PATH=${BASE_PATH}/input
+MODEL=face_detect
+IN_FILE=horse_304_padded.bgr
+./run_hhb_dw_src_test.sh -m ${MODEL} -i ${IN_FILE} & sleep 5; ./run_hhb_npu_sink_test.sh -m ${MODEL} -i ${IN_FILE}

BIN
input/install/face_detect/data.0.bin


BIN
input/install/face_detect/fish_304_padded.bgr


BIN
input/install/face_detect/horse_304_padded.bgr


+ 1075 - 0
lib/install_nn2/include/csi_nn.h

@@ -0,0 +1,1075 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_CSI_NN_H_
+#define INCLUDE_CSI_NN_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "csinn_data_structure.h"
+#include "csinn_runtime.h"
+#include "shl_debug.h"
+#include "shl_memory.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int csinn_conv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv2d_params *params);
+
+int csinn_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                 struct csinn_conv2d_params *params);
+
+int csinn_depthwise_conv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+int csinn_depthwise_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                           struct csinn_conv2d_params *params);
+
+int csinn_group_conv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                            struct csinn_conv2d_params *params);
+
+int csinn_group_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv2d_params *params);
+
+int csinn_conv2d_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                           struct csinn_conv2d_params *params);
+
+int csinn_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv2d_params *params);
+
+int csinn_depthwise_conv2d_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                     struct csinn_conv2d_params *params);
+
+int csinn_depthwise_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+int csinn_conv2d_relu6_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                            struct csinn_conv2d_params *params);
+
+int csinn_conv2d_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv2d_params *params);
+
+int csinn_deconv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                        struct csinn_conv2d_params *params);
+
+int csinn_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                   struct csinn_conv2d_params *params);
+
+int csinn_conv3d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv3d_params *params);
+
+int csinn_conv3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                 struct csinn_conv3d_params *params);
+
+int csinn_deconv3d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                        struct csinn_conv3d_params *params);
+
+int csinn_deconv3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                   struct csinn_conv3d_params *params);
+
+int csinn_fsmn_init(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+                    struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+                    struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+                    struct csinn_fsmn_params *params);
+
+int csinn_fsmn(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+               struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+               struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+               struct csinn_fsmn_params *params);
+
+int csinn_fullyconnected_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *weights, struct csinn_tensor *bias,
+                              struct csinn_fc_params *params);
+
+int csinn_fullyconnected(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *weights, struct csinn_tensor *bias,
+                         struct csinn_fc_params *params);
+
+int csinn_fullyconnected_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                   struct csinn_fc_params *params);
+
+int csinn_fullyconnected_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *weights, struct csinn_tensor *bias,
+                              struct csinn_fc_params *params);
+
+int csinn_maxpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_pool_params *params);
+
+int csinn_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pool_params *params);
+
+int csinn_maxpool3d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_pool_params *params);
+
+int csinn_maxpool3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pool_params *params);
+
+int csinn_global_maxpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+
+int csinn_global_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_pool_params *params);
+
+int csinn_avgpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_pool_params *params);
+
+int csinn_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pool_params *params);
+
+int csinn_avgpool3d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_pool_params *params);
+
+int csinn_avgpool3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pool_params *params);
+
+int csinn_global_avgpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+
+int csinn_global_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_pool_params *params);
+
+int csinn_l2pool_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pool_params *params);
+
+int csinn_l2pool(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_pool_params *params);
+
+int csinn_pool_with_argmax_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+
+int csinn_pool_with_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_pool_params *params);
+
+int csinn_maxpool2d_locat_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int csinn_maxpool2d_locat(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int csinn_unpooling_init(struct csinn_tensor *input, struct csinn_tensor *mask,
+                         struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int csinn_unpooling(struct csinn_tensor *input, struct csinn_tensor *mask,
+                    struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int csinn_roi_align_init(struct csinn_tensor *data, struct csinn_tensor *rois,
+                         struct csinn_tensor *output, struct csinn_roi_align_params *params);
+
+int csinn_roi_align(struct csinn_tensor *data, struct csinn_tensor *rois,
+                    struct csinn_tensor *output, struct csinn_roi_align_params *params);
+
+int csinn_negative_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int csinn_negative(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_floor_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_floor(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_ceil_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_ceil(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_sign_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_sign(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_trunc_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_trunc(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_round_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_round(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_abs_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_abs(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_isnan_bool_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_siso_params *params);
+
+int csinn_isnan_bool(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_exp_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_exp(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_expm1_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_expm1(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_sin_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_sin(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_cos_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_cos(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_tanh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_tanh(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_log_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_log(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_sqrt_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_sqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_rsqrt_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_rsqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_square_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int csinn_square(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int csinn_sigmoid_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_sigmoid_params *params);
+
+int csinn_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_sigmoid_params *params);
+
+int csinn_hard_sigmoid_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_sigmoid_params *params);
+
+int csinn_hard_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_sigmoid_params *params);
+
+int csinn_elu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int csinn_elu(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_relu_params *params);
+
+int csinn_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_relu_params *params);
+
+int csinn_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_relu_params *params);
+
+int csinn_relu1_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int csinn_relu1(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_relu_params *params);
+
+int csinn_relu6_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int csinn_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_relu_params *params);
+
+int csinn_relun_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int csinn_relun(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_relu_params *params);
+
+int csinn_leaky_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_relu_params *params);
+
+int csinn_leaky_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int csinn_softrelu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int csinn_softrelu(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int csinn_prelu_init(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                     struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int csinn_prelu(struct csinn_tensor *input, struct csinn_tensor *alpha, struct csinn_tensor *output,
+                struct csinn_prelu_params *params);
+
+int csinn_softplus_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int csinn_softplus(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_softmax_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_softmax_params *params);
+
+int csinn_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_softmax_params *params);
+
+int csinn_log_softmax_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_softmax_params *params);
+
+int csinn_log_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_softmax_params *params);
+
+int csinn_batch_normalization_init(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                   struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                   struct csinn_tensor *beta, struct csinn_tensor *output,
+                                   struct csinn_bn_params *params);
+
+int csinn_batch_normalization(struct csinn_tensor *input, struct csinn_tensor *mean,
+                              struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                              struct csinn_tensor *beta, struct csinn_tensor *output,
+                              struct csinn_bn_params *params);
+
+int csinn_l2_normalization_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_l2n_params *params);
+
+int csinn_l2_normalization(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_l2n_params *params);
+
+int csinn_lrn_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_lrn_params *params);
+
+int csinn_lrn(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_lrn_params *params);
+
+int csinn_matmul_init(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                      struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int csinn_matmul(struct csinn_tensor *mat0, struct csinn_tensor *mat1, struct csinn_tensor *output,
+                 struct csinn_matmul_params *params);
+
+int csinn_add_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_add(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_sub_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_sub(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_mul_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_mul(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_div_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_div(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_floor_divide_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_floor_divide(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_floor_mod_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_floor_mod(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_mod_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_mod(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_maximum_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_maximum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_minimum_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_minimum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_power_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_power(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_greater_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_greater(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_less_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_less(struct csinn_tensor *input0, struct csinn_tensor *input1,
+               struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_and_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_and(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_or_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_or(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_not_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int csinn_logical_not(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int csinn_logical_xor_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_logical_xor(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_equal_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_not_equal_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_not_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_greater_equal_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                             struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_greater_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_less_equal_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_less_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_select_init(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                      struct csinn_tensor *input1, struct csinn_tensor *output,
+                      struct csinn_select_params *params);
+
+int csinn_select(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                 struct csinn_tensor *input1, struct csinn_tensor *output,
+                 struct csinn_select_params *params);
+
+int csinn_and_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_and(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_or_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_or(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+             struct csinn_diso_params *params);
+
+int csinn_xor_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int csinn_xor(struct csinn_tensor *input0, struct csinn_tensor *input1, struct csinn_tensor *output,
+              struct csinn_diso_params *params);
+
+int csinn_not_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_not(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_pad_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_pad_params *params);
+
+int csinn_pad(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_pad_params *params);
+
+int csinn_resize_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_resize_params *params);
+
+int csinn_resize(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_resize_params *params);
+
+int csinn_concat_init(struct csinn_tensor **input, struct csinn_tensor *output,
+                      struct csinn_concat_params *params);
+
+int csinn_concat(struct csinn_tensor **input, struct csinn_tensor *output,
+                 struct csinn_concat_params *params);
+
+int csinn_proposal_init(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                        struct csinn_tensor *im_info, struct csinn_tensor *output,
+                        struct csinn_proposal_params *params);
+
+int csinn_proposal(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                   struct csinn_tensor *im_info, struct csinn_tensor *output,
+                   struct csinn_proposal_params *params);
+
+int csinn_psroipooling_init(struct csinn_tensor *data, struct csinn_tensor *rois,
+                            struct csinn_tensor *output, struct csinn_psroipooling_params *params);
+
+int csinn_psroipooling(struct csinn_tensor *data, struct csinn_tensor *rois,
+                       struct csinn_tensor *output, struct csinn_psroipooling_params *params);
+
+int csinn_transpose_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_transpose_params *params);
+
+int csinn_transpose(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_transpose_params *params);
+
+int csinn_reshape_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_reshape_params *params);
+
+int csinn_reshape(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reshape_params *params);
+
+int csinn_shape_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_shape_params *params);
+
+int csinn_shape(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_shape_params *params);
+
+int csinn_expand_dims_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_expand_dims_params *params);
+
+int csinn_expand_dims(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_expand_dims_params *params);
+
+int csinn_reverse_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_reverse_params *params);
+
+int csinn_reverse(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reverse_params *params);
+
+int csinn_flatten_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_flatten_params *params);
+
+int csinn_flatten(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_flatten_params *params);
+
+int csinn_crop_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_crop_params *params);
+
+int csinn_crop(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_crop_params *params);
+
+int csinn_slice_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_slice_params *params);
+
+int csinn_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_slice_params *params);
+
+int csinn_split_init(struct csinn_tensor *input, struct csinn_tensor **output,
+                     struct csinn_split_params *params);
+
+int csinn_split(struct csinn_tensor *input, struct csinn_tensor **output,
+                struct csinn_split_params *params);
+
+int csinn_stack_init(struct csinn_tensor **inputs, struct csinn_tensor *output,
+                     struct csinn_stack_params *params);
+
+int csinn_stack(struct csinn_tensor **inputs, struct csinn_tensor *output,
+                struct csinn_stack_params *params);
+
+int csinn_unstack_init(struct csinn_tensor *input, struct csinn_tensor **output,
+                       struct csinn_unstack_params *params);
+
+int csinn_unstack(struct csinn_tensor *input, struct csinn_tensor **output,
+                  struct csinn_unstack_params *params);
+
+int csinn_tile_init(struct csinn_tensor *inputs, struct csinn_tensor *output,
+                    struct csinn_tile_params *params);
+
+int csinn_tile(struct csinn_tensor *inputs, struct csinn_tensor *output,
+               struct csinn_tile_params *params);
+
+int csinn_arange_init(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int csinn_arange(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int csinn_where_init(struct csinn_tensor *condition, struct csinn_tensor *x, struct csinn_tensor *y,
+                     struct csinn_tensor *output, struct csinn_where_params *params);
+
+int csinn_where(struct csinn_tensor *condition, struct csinn_tensor *x, struct csinn_tensor *y,
+                struct csinn_tensor *output, struct csinn_where_params *params);
+
+int csinn_gather_init(struct csinn_tensor *input, struct csinn_tensor *indices,
+                      struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int csinn_gather(struct csinn_tensor *input, struct csinn_tensor *indices,
+                 struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int csinn_gather_nd_init(struct csinn_tensor *input, struct csinn_tensor *indices,
+                         struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int csinn_gather_nd(struct csinn_tensor *input, struct csinn_tensor *indices,
+                    struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int csinn_squeeze_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_squeeze_params *params);
+
+int csinn_squeeze(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_squeeze_params *params);
+
+int csinn_ndarray_size_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_ndarray_size_params *params);
+
+int csinn_ndarray_size(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_ndarray_size_params *params);
+
+int csinn_space_to_batch_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_space_to_batch_params *params);
+
+int csinn_space_to_batch(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_space_to_batch_params *params);
+
+int csinn_space_to_batch_nd_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_space_to_batch_nd_params *params);
+
+int csinn_space_to_batch_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_space_to_batch_nd_params *params);
+
+int csinn_batch_to_space_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_batch_to_space_params *params);
+
+int csinn_batch_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_batch_to_space_params *params);
+
+int csinn_batch_to_space_nd_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_batch_to_space_nd_params *params);
+
+int csinn_batch_to_space_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_batch_to_space_nd_params *params);
+
+int csinn_space_to_depth_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_space_to_depth_params *params);
+
+int csinn_space_to_depth(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_space_to_depth_params *params);
+
+int csinn_depth_to_space_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_depth_to_space_params *params);
+
+int csinn_depth_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_depth_to_space_params *params);
+
+int csinn_one_hot_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_one_hot_params *params);
+
+int csinn_one_hot(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_one_hot_params *params);
+
+int csinn_sequence_mask_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                             struct csinn_tensor *output,
+                             struct csinn_sequence_mask_params *params);
+
+int csinn_sequence_mask(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_sequence_mask_params *params);
+
+int csinn_im2col_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_im2col_params *params);
+
+int csinn_im2col(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_im2col_params *params);
+
+int csinn_col2im_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_col2im_params *params);
+
+int csinn_col2im(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_tensor *kernel, struct csinn_col2im_params *params);
+
+int csinn_sum_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int csinn_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_reduce_params *params);
+
+int csinn_mean_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int csinn_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_reduce_params *params);
+
+int csinn_max_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int csinn_max(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_reduce_params *params);
+
+int csinn_min_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int csinn_min(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_reduce_params *params);
+
+int csinn_prod_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int csinn_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_reduce_params *params);
+
+int csinn_argmin_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_reduce_params *params);
+
+int csinn_argmin(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int csinn_argmax_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_reduce_params *params);
+
+int csinn_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int csinn_all_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int csinn_all(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_reduce_params *params);
+
+int csinn_any_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int csinn_any(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_reduce_params *params);
+
+int csinn_reorg_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reorg_params *params);
+
+int csinn_reorg(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reorg_params *params);
+
+int csinn_yuv_rgb_scale_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_siso_params *params);
+
+int csinn_yuv_rgb_scale(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int csinn_segment_max_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_max(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_min_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_min(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_sum_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_sum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_mean_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_mean(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_prod_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_segment_prod(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int csinn_threshold_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_relu_params *params);
+
+int csinn_threshold_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_relu_params *params);
+
+int csinn_acos_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+int csinn_acos(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_acosh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_acosh(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_asin_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_asin(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_asinh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_asinh(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_atan_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_atan(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_atanh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_atanh(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_cosh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_cosh(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_sinh_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int csinn_sinh(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_siso_params *params);
+
+int csinn_tan_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_tan(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_log1p_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int csinn_log1p(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int csinn_softsign_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int csinn_softsign(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_erf_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int csinn_erf(struct csinn_tensor *input, struct csinn_tensor *output,
+              struct csinn_siso_params *params);
+
+int csinn_cumsum_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_cumsum_params *params);
+
+int csinn_cumsum(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_cumsum_params *params);
+
+int csinn_cumprod_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_cumprod_params *params);
+
+int csinn_cumprod(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_cumprod_params *params);
+
+int csinn_reduce_max_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reduce_params *params);
+
+int csinn_reduce_max(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reduce_params *params);
+
+int csinn_reduce_min_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reduce_params *params);
+
+int csinn_reduce_min(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reduce_params *params);
+
+int csinn_reduce_mean_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int csinn_reduce_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_reduce_params *params);
+
+int csinn_reduce_sum_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reduce_params *params);
+
+int csinn_reduce_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reduce_params *params);
+
+int csinn_reduce_prod_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int csinn_reduce_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_reduce_params *params);
+
+int csinn_reduce_logsumexp_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_reduce_params *params);
+
+int csinn_reduce_logsumexp(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int csinn_broadcast_to_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_broadcast_to_params *params);
+
+int csinn_broadcast_to(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_broadcast_to_params *params);
+
+int csinn_scatter_nd_init(struct csinn_tensor *input, struct csinn_tensor *indices,
+                          struct csinn_tensor *updates, struct csinn_tensor *output,
+                          struct csinn_scatter_nd_params *params);
+
+int csinn_scatter_nd(struct csinn_tensor *input, struct csinn_tensor *indices,
+                     struct csinn_tensor *updates, struct csinn_tensor *output,
+                     struct csinn_scatter_nd_params *params);
+
+int csinn_clip_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_clip_params *params);
+
+int csinn_clip(struct csinn_tensor *input, struct csinn_tensor *output,
+               struct csinn_clip_params *params);
+
+int csinn_strided_slice_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_strided_slice_params *params);
+
+int csinn_strided_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_strided_slice_params *params);
+
+int csinn_topk_init(struct csinn_tensor *input, struct csinn_tensor *output1,
+                    struct csinn_tensor *output2, struct csinn_topk_params *params);
+
+int csinn_topk(struct csinn_tensor *input, struct csinn_tensor *output1,
+               struct csinn_tensor *output2, struct csinn_topk_params *params);
+
+int csinn_non_max_suppression_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                   struct csinn_tensor *output,
+                                   struct csinn_non_max_suppression_params *params);
+
+int csinn_non_max_suppression(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                              struct csinn_tensor *output,
+                              struct csinn_non_max_suppression_params *params);
+
+int csinn_shuffle_channel_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_shuffle_channel_params *params);
+
+int csinn_shuffle_channel(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_shuffle_channel_params *params);
+
+int csinn_roipool_init(struct csinn_tensor *data, struct csinn_tensor *rois,
+                       struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int csinn_roipool(struct csinn_tensor *data, struct csinn_tensor *rois, struct csinn_tensor *output,
+                  struct csinn_roi_pool_params *params);
+
+int csinn_layer_norm_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                          struct csinn_layer_norm_params *params);
+
+int csinn_layer_norm(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                     struct csinn_layer_norm_params *params);
+
+int csinn_cache_matmul_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *weight, struct csinn_tensor *bias,
+                            struct csinn_cache_matmul_params *params);
+
+int csinn_cache_matmul(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *weight, struct csinn_tensor *bias,
+                       struct csinn_cache_matmul_params *params);
+
+int csinn_cache_conv1d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *weight, struct csinn_tensor *bias,
+                            struct csinn_cache_conv1d_params *params);
+
+int csinn_cache_conv1d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *weight, struct csinn_tensor *bias,
+                       struct csinn_cache_conv1d_params *params);
+
+int csinn_conv1d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv1d_params *params);
+
+int csinn_conv1d(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                 struct csinn_conv1d_params *params);
+
+int csinn_data_convert_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_siso_params *params);
+int csinn_data_convert(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // INCLUDE_CSI_NN_H_

+ 1013 - 0
lib/install_nn2/include/csinn_data_structure.h

@@ -0,0 +1,1013 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+#ifndef INCLUDE_CSI_INTERNAL_H_
+#define INCLUDE_CSI_INTERNAL_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* data type */
+enum csinn_dtype_enum {
+    CSINN_DTYPE_BOOL = 0,
+    CSINN_DTYPE_INT4,
+    CSINN_DTYPE_UINT8,
+    CSINN_DTYPE_INT8,
+    CSINN_DTYPE_UINT16,
+    CSINN_DTYPE_INT16,
+    CSINN_DTYPE_UINT32,
+    CSINN_DTYPE_INT32,
+    CSINN_DTYPE_FLOAT16,
+    CSINN_DTYPE_BFLOAT16,
+    CSINN_DTYPE_FLOAT32,
+    CSINN_DTYPE_FLOAT64,
+    CSINN_DTYPE_SIZE,
+};
+
+/* data memory type */
+enum csinn_mem_type_enum {
+    CSINN_MEM_TYPE_CPU_NOT_ALIGNED = 0,
+    CSINN_MEM_TYPE_CPU_ALIGNED,
+    CSINN_MEM_TYPE_DMABUF,
+    CSINN_MEM_TYPE_ASP42, /* structed sparsity 4:2 */
+    CSINN_MEM_TYPE_ASP41, /* structed sparsity 4:1 */
+};
+
+/* quant type */
+enum csinn_quant_enum {
+    CSINN_QUANT_UNSET = 0,
+    CSINN_QUANT_INT4_SYM,
+    CSINN_QUANT_UINT8_ASYM,
+    CSINN_QUANT_UINT8_SYM,
+    CSINN_QUANT_INT8_ASYM,
+    CSINN_QUANT_INT8_SYM,
+    CSINN_QUANT_INT16_SYM,
+    CSINN_QUANT_FLOAT16,
+    CSINN_QUANT_BFLOAT16,
+    CSINN_QUANT_FLOAT32,
+    CSINN_QUANT_SIZE,
+};
+
+/* API type */
+enum csinn_api_enum {
+    CSINN_REF = 0,
+    CSINN_GREF,
+    CSINN_C860,
+    CSINN_C906,
+    CSINN_C910,
+    CSINN_ANOLE,
+    CSINN_CH8601,
+    CSINN_LIGHT,
+    CSINN_DP1K,
+    CSINN_I805,
+    CSINN_E804,
+    CSINN_REF_I805,
+    CSINN_C908,
+    CSINN_TVMGEN,
+    CSINN_ASP,
+    CSINN_RVV,
+    CSINN_API_SIZE,
+};
+
+/* run mode */
+enum csinn_rmode_enum {
+    CSINN_RM_LAYER = 0,
+    CSINN_RM_CPU_GRAPH,
+    CSINN_RM_NPU_GRAPH,
+    CSINN_RUN_MODE_SIZE,
+};
+
+/* model save */
+enum csinn_mode_save_enum {
+    CSINN_SAVE_AND_RUN = 0,
+    CSINN_SAVE_ONLY,
+    CSINN_RUN_ONLY,
+};
+
+/* op and utils */
+enum csinn_op_enum {
+    CSINN_OP_ABS = 0,
+    CSINN_OP_ACOS,
+    CSINN_OP_ACOSH,
+    CSINN_OP_ADD,
+    CSINN_OP_ALL,
+    CSINN_OP_AND,
+    CSINN_OP_ANY,
+    CSINN_OP_ARANGE,
+    CSINN_OP_ARGMAX,
+    CSINN_OP_ARGMIN,
+    CSINN_OP_ASIN,
+    CSINN_OP_ASINH,
+    CSINN_OP_ATAN,
+    CSINN_OP_ATANH,
+    CSINN_OP_AVGPOOL2D,
+    CSINN_OP_AVGPOOL3D,
+    CSINN_OP_BN,
+    CSINN_OP_BATCH_TO_SPACE,
+    CSINN_OP_BATCH_TO_SPACE_ND,
+    CSINN_OP_BROADCOST,
+    CSINN_OP_CACHE_MATMUL,
+    CSINN_OP_CACHE_CONV1D,
+    CSINN_OP_CEIL,
+    CSINN_OP_CLIP,
+    CSINN_OP_COL2IM,
+    CSINN_OP_CONCAT,
+    CSINN_OP_CONV1D,
+    CSINN_OP_CONV2D,
+    CSINN_OP_CONV2D_RELU,
+    CSINN_OP_CONV2D_RELU6,
+    CSINN_OP_CONV2D_CHANNEL,
+    CSINN_OP_CONV2D_CHANNEL_RELU,
+    CSINN_OP_CONV2D_CHANNEL_RELU6,
+    CSINN_OP_DEPTHWISE_CONV2D,
+    CSINN_OP_DEPTHWISE_CONV2D_RELU,
+    CSINN_OP_DEPTHWISE_CONV2D_RELU6,
+    CSINN_OP_DEPTHWISE_CONV2D_CHANNEL,
+    CSINN_OP_DEPTHWISE_CONV2D_CHANNEL_RELU,
+    CSINN_OP_DEPTHWISE_CONV2D_CHANNEL_RELU6,
+    CSINN_OP_GROUP_CONV2D,
+    CSINN_OP_GROUP_CONV2D_RELU,
+    CSINN_OP_GROUP_CONV2D_RELU6,
+    CSINN_OP_GROUP_CONV2D_CHANNEL,
+    CSINN_OP_GROUP_CONV2D_CHANNEL_RELU,
+    CSINN_OP_CONV3D,
+    CSINN_OP_DATA_CONVERT,
+    CSINN_OP_COS,
+    CSINN_OP_COSH,
+    CSINN_OP_CROP,
+    CSINN_OP_CUMPROD,
+    CSINN_OP_CUMSUM,
+    CSINN_OP_DECONV2D,
+    CSINN_OP_DEPTHWISE_DECONV2D,
+    CSINN_OP_DECONV3D,
+    CSINN_OP_DEPTH_TO_SPACE,
+    CSINN_OP_DIV,
+    CSINN_OP_ELU,
+    CSINN_OP_EQUANL,
+    CSINN_OP_ERF,
+    CSINN_OP_EXP,
+    CSINN_OP_EXPAND_DIMS,
+    CSINN_OP_EXPM1,
+    CSINN_OP_FLATTEN,
+    CSINN_OP_FLOOR_DIVIDE,
+    CSINN_OP_FLOOR_MOD,
+    CSINN_OP_FLOOR,
+    CSINN_OP_FSMN,
+    CSINN_OP_FULLYCONNECTED,
+    CSINN_OP_GATHER_ND,
+    CSINN_OP_GATHER,
+    CSINN_OP_GLOBAL_AVGPOOL2D,
+    CSINN_OP_GLOBAL_MAXPOOL2D,
+    CSINN_OP_GREATHER_EQUAL,
+    CSINN_OP_GREATHER,
+    CSINN_OP_HARD_SIGMOID,
+    CSINN_OP_IM2COL,
+    CSINN_OP_ISNAN,
+    CSINN_OP_L2N,
+    CSINN_OP_L2POOL2D,
+    CSINN_OP_LAYER_NORM,
+    CSINN_OP_LEAKY_RELU,
+    CSINN_OP_LESS_EQUAL,
+    CSINN_OP_LESS,
+    CSINN_OP_LOG_SOFTMAX,
+    CSINN_OP_LOG,
+    CSINN_OP_LOG1P,
+    CSINN_OP_LOGICAL_AND,
+    CSINN_OP_LOGICAL_NOT,
+    CSINN_OP_LOGICAL_OR,
+    CSINN_OP_LOGICAL_XOR,
+    CSINN_OP_LRN,
+    CSINN_OP_MATMUL,
+    CSINN_OP_MAX,
+    CSINN_OP_MAXIMUM,
+    CSINN_OP_MAXPOOL2D,
+    CSINN_OP_MAXPOOL2D_LOCAT,
+    CSINN_OP_MAXPOOL3D,
+    CSINN_OP_MEAN,
+    CSINN_OP_MEAN_STRIDE,
+    CSINN_OP_MIN,
+    CSINN_OP_MIN_STRIDE,
+    CSINN_OP_MINIMUM,
+    CSINN_OP_MOD,
+    CSINN_OP_MUL,
+    CSINN_OP_NDARRAY_SIZE,
+    CSINN_OP_NEGATIIVE,
+    CSINN_OP_NON_MAX_SUPPRESSION,
+    CSINN_OP_NOT_EQUAL,
+    CSINN_OP_NOT,
+    CSINN_OP_ONE_HOT,
+    CSINN_OP_OR,
+    CSINN_OP_PAD,
+    CSINN_OP_POWER,
+    CSINN_OP_PRELU,
+    CSINN_OP_PROD,
+    CSINN_OP_PROPOSAL,
+    CSINN_OP_PSROIPOOLING,
+    CSINN_OP_REDUCE_LOGSUMEXP,
+    CSINN_OP_REDUCE_MAX,
+    CSINN_OP_REDUCE_MEAN,
+    CSINN_OP_REDUCE_MIN,
+    CSINN_OP_REDUCE_PROD,
+    CSINN_OP_REDUCE_SUM,
+    CSINN_OP_RELU,
+    CSINN_OP_RELU1,
+    CSINN_OP_RELU6,
+    CSINN_OP_RELUN,
+    CSINN_OP_REORG,
+    CSINN_OP_RESHAPE,
+    CSINN_OP_RESIZE,
+    CSINN_OP_REVERSE,
+    CSINN_OP_ROIALIGN,
+    CSINN_OP_ROIPOOL,
+    CSINN_OP_ROUND,
+    CSINN_OP_RSQRT,
+    CSINN_OP_SCATTER_ND,
+    CSINN_OP_SEGMENT_MAX,
+    CSINN_OP_UNSORTED_SEGMENT_MAX,
+    CSINN_OP_SEGMENT_MEAN,
+    CSINN_OP_UNSORTED_SEGMENT_MEAN,
+    CSINN_OP_SEGMENT_MIN,
+    CSINN_OP_UNSORTED_SEGMENT_MIN,
+    CSINN_OP_SEGMENT_PROD,
+    CSINN_OP_UNSORTED_SEGMENT_PROD,
+    CSINN_OP_SEGMENT_SUM,
+    CSINN_OP_UNSORTED_SEGMENT_SUM,
+    CSINN_OP_SELECT,
+    CSINN_OP_SEQUENCE_MASK,
+    CSINN_OP_SHAPE,
+    CSINN_OP_SHUFFLE_CHANNEL,
+    CSINN_OP_SIGMOID,
+    CSINN_OP_SIGN,
+    CSINN_OP_SIN,
+    CSINN_OP_SINH,
+    CSINN_OP_SLICE,
+    CSINN_OP_SOFTMAX,
+    CSINN_OP_SOFTPLUS,
+    CSINN_OP_SOFTRELU,
+    CSINN_OP_SOFTSIGN,
+    CSINN_OP_SPACE_TO_BATCH,
+    CSINN_OP_SPACE_TO_BATCH_ND,
+    CSINN_OP_SPACE_TO_DEPTH,
+    CSINN_OP_SPLIT,
+    CSINN_OP_SQRT,
+    CSINN_OP_SQUARE,
+    CSINN_OP_SQUEEZE,
+    CSINN_OP_STACK,
+    CSINN_OP_STRIDED_SLICE,
+    CSINN_OP_SUB,
+    CSINN_OP_SUM,
+    CSINN_OP_TAN,
+    CSINN_OP_TANH,
+    CSINN_OP_THRESHOLD_RELU,
+    CSINN_OP_TILE,
+    CSINN_OP_TOPK,
+    CSINN_OP_TRANSPOSE,
+    CSINN_OP_TRUNC,
+    CSINN_OP_UNPOOLING,
+    CSINN_OP_UNSTACK,
+    CSINN_OP_WHERE,
+    CSINN_OP_XOR,
+    CSINN_OP_YUV_RGB_SCALE,
+
+    CSINN_OP_SIZE,
+
+    /* graph */
+    CSINN_TENSOR,
+    CSINN_SUBGRAPH,
+    CSINN_SUBGRAPH_RETURN,
+    CSINN_OP_AND_UTILS_SIZE,
+};
+
+enum csinn_runtime_enum {
+    CSINN_SESSION_INIT,
+    CSINN_SESSION_DEINIT,
+    CSINN_SESSION_SETUP,
+    CSINN_SESSION_RUN,
+    CSINN_UPDATE_INPUT,
+    CSINN_UPDATE_OUTPUT,
+    CSINN_SET_INPUT_NUMBER,
+    CSINN_SET_OUTPUT_NUMBER,
+    CSINN_GET_INPUT_NUMBER,
+    CSINN_GET_OUTPUT_NUMBER,
+    CSINN_SET_INPUT,
+    CSINN_SET_OUTPUT,
+    CSINN_GET_INPUT,
+    CSINN_GET_OUTPUT,
+    CSINN_TENSOR_ENTRY,
+    CSINN_LOAD_BG,
+    CSINN_RUNTIME_OP_SIZE,
+};
+
+/* convolution mode */
+enum csinn_conv_mode_enum {
+    CSINN_DIRECT = 0x0,   /* using direct optimizational convolution */
+    CSINN_WINOGRAD = 0x1, /* using winograd fast convolution */
+    CSINN_GEMM = 0x2,     /* using im2col + gemm convolution, im2col is optional */
+};
+
+/* pad mode */
+enum csinn_pad_enum {
+    CSINN_PAD_CONSTANT = 0x0, /* pads with constant_value pad_value */
+    CSINN_PAD_EDGE = 0x1,     /* pads using the edge values of the input array */
+    CSINN_PAD_REFLECT = 0x2,  /* pads by reflecting values with respect to the edge */
+};
+
+/* resize mode */
+enum csinn_resize_enum {
+    CSINN_RESIZE_BILINEAR = 0x0,
+    CSINN_RESIZE_NEAREST_NEIGHBOR = 0x1,
+    CSINN_RESIZE_NEAREST_BICUBIC = 0x2,
+};
+
+/* depth2space mode */
+enum csinn_depth2space_enum {
+    CSINN_DEPTHTOSPACE_DCR = 0x0,
+    CSINN_DEPTHTOSPACE_CRD = 0x1,
+};
+
+/* local_response_normalization(lrn) mode */
+enum csinn_lrn_enum {
+    CSINN_LRN_ACROSS_CHANNELS = 0x0,
+    CSINN_LRN_WITHIN_CHANNEL,
+};
+
+enum csinn_layout_enum {
+    CSINN_LAYOUT_NULL = 0x0,
+    // NCHW
+    // ACTIVITION
+    CSINN_LAYOUT_N,
+    CSINN_LAYOUT_NC,
+    CSINN_LAYOUT_NCW,
+    CSINN_LAYOUT_NCHW,
+    CSINN_LAYOUT_NCDHW,
+    // WEIGHT
+    CSINN_LAYOUT_O,
+    CSINN_LAYOUT_OI,
+    CSINN_LAYOUT_O16I16,
+    CSINN_LAYOUT_O32I32,
+    CSINN_LAYOUT_OIW,
+    CSINN_LAYOUT_OIHW,
+    CSINN_LAYOUT_OIDHW,
+    CSINN_LAYOUT_O1HW,  // depthwise kernel
+
+    // NHWC
+    // ACTIVITION
+    CSINN_LAYOUT_NWC,
+    CSINN_LAYOUT_NHWC,
+    CSINN_LAYOUT_NDHWC,
+    // WEIGHT
+    CSINN_LAYOUT_OWI,
+    CSINN_LAYOUT_OHWI,
+    CSINN_LAYOUT_O16HWI16,
+    CSINN_LAYOUT_O32HWI32,
+    CSINN_LAYOUT_ODHWI,
+    CSINN_LAYOUT_1HWO,  // depthwise kernel
+    CSINN_LAYOUT_1HW16O16,
+    CSINN_LAYOUT_1HW32O32,
+
+    // NCXHWX
+    // ACTIVITION
+    CSINN_LAYOUT_NC1HWC0,  // rvv: c0=4/8/8 for fp32/fp16/int8 when vlen=128
+};
+
+enum csinn_status_enum {
+    CSINN_UNSUPPORT_LAYOUT = -3,
+    CSINN_UNSUPPORT_DTYPE = -2,
+    CSINN_CALLBACK_UNSET = -1,
+    CSINN_FALSE = 0,
+    CSINN_TRUE = 1,
+};
+
+enum csinn_profiler_enum {
+    CSI_PROFILER_LEVEL_UNSET = 0,
+    CSI_PROFILER_LEVEL_TIMER,  // print time
+};
+
+enum csinn_debug_enum {
+    CSINN_DEBUG_LEVEL_DEBUG = -2,
+    CSINN_DEBUG_LEVEL_INFO,
+    CSINN_DEBUG_LEVEL_WARNING,
+    CSINN_DEBUG_LEVEL_ERROR,
+    CSINN_DEBUG_LEVEL_FATAL,
+};
+
+struct csinn_quant_info {
+    int32_t zero_point;
+    float scale;
+    int32_t multiplier;
+    int32_t shift;
+    float min;
+    float max;
+};
+
+#define MAX_DIM 8
+struct csinn_tensor {
+    void *data;
+    enum csinn_dtype_enum dtype;
+    enum csinn_mem_type_enum mtype;
+    int32_t dim[MAX_DIM];
+    int32_t dim_count;
+    uint32_t is_const;
+    char *name;
+    int32_t layout;
+    int32_t quant_channel;
+    struct csinn_quant_info *qinfo;
+    struct csinn_session *sess;
+};
+
+struct csinn_model {
+    char *bm_path;
+    void *bm_addr;
+    size_t bm_size;
+    int32_t save_mode;
+    int32_t priority;
+};
+
+struct csinn_session {
+    int32_t base_dtype;
+    int32_t base_layout;
+    int32_t base_api;
+    int32_t base_run_mode;
+    enum csinn_quant_enum base_quant_type;
+    struct csinn_model model;
+    int32_t debug_level;
+    int32_t profiler_level;
+    int32_t input_num;
+    int32_t output_num;
+    struct csinn_tensor **input;
+    struct csinn_tensor **output;
+    void *td;
+};
+
+struct csinn_callback {
+    int (*init)();  // initialization
+    int (*est)();   // establish graph
+    int (*exec)();  // execute real compute
+    int (*caps)();  // capabilities
+    int (*perf)();  // profiling
+};
+
+struct csinn_params_base {
+    struct csinn_callback *cb;
+    char *name;
+    int32_t layout;
+    int32_t api;
+    enum csinn_quant_enum quant_type;
+    struct csinn_session *sess;
+};
+
+struct csinn_fsmn_params {
+    struct csinn_params_base base;
+    int32_t l_order;
+    int32_t r_order;
+    int32_t l_stride;
+    int32_t r_stride;
+    int32_t unavailable_frames;
+};
+
+struct csinn_conv2d_params {
+    struct csinn_params_base base;
+    int32_t group;
+    int32_t stride_height;
+    int32_t stride_width;
+    int32_t pad_top;
+    int32_t pad_left;
+    int32_t pad_down;
+    int32_t pad_right;
+    int32_t dilation_height;
+    int32_t dilation_width;
+    int32_t out_pad_height;
+    int32_t out_pad_width;
+    struct {
+        struct csinn_tensor *kernel_tm;
+        enum csinn_conv_mode_enum conv_mode;
+        int32_t fuse_zp2bias;
+    } conv_extra;
+};
+
+struct csinn_conv3d_params {
+    struct csinn_params_base base;
+    int32_t group;
+    int32_t stride_depth;
+    int32_t stride_height;
+    int32_t stride_width;
+    int32_t pad_top;
+    int32_t pad_left;
+    int32_t pad_down;
+    int32_t pad_right;
+    int32_t pad_front;
+    int32_t pad_back;
+    int32_t dilation_depth;
+    int32_t dilation_height;
+    int32_t dilation_width;
+    int32_t out_pad_depth;
+    int32_t out_pad_height;
+    int32_t out_pad_width;
+};
+
+struct csinn_fc_params {
+    struct csinn_params_base base;
+    int32_t units;
+    struct {
+        int32_t fuse_zp2bias;
+    } fc_extra;
+};
+
+struct csinn_pool_params {
+    struct csinn_params_base base;
+    int32_t pool_type;
+    int32_t filter_height;
+    int32_t filter_width;
+    int32_t filter_depth;
+    int32_t stride_height;
+    int32_t stride_width;
+    int32_t stride_depth;
+    int32_t pad_top;
+    int32_t pad_left;
+    int32_t pad_down;
+    int32_t pad_right;
+    int32_t pad_front;
+    int32_t pad_back;
+    int32_t ceil_mode;
+    bool count_include_pad;
+};
+
+struct csinn_unpooling_params {
+    struct csinn_params_base base;
+    int32_t scale_height;
+    int32_t scale_width;
+    int32_t pad_out_height;
+    int32_t pad_out_width;
+};
+
+struct csinn_roi_align_params {
+    struct csinn_params_base base;
+    int32_t pooled_size_h;
+    int32_t pooled_size_w;
+    float spatial_scale;
+    int32_t spatial_scale_multiplier;
+    int32_t spatial_scale_shift;
+    int32_t sample_ratio;
+};
+
+struct csinn_roi_pool_params {
+    struct csinn_params_base base;
+    int32_t pooled_size_h;
+    int32_t pooled_size_w;
+    float spatial_scale;
+    int32_t spatial_scale_multiplier;
+    int32_t spatial_scale_shift;
+};
+
+struct csinn_siso_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_scatter_nd_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_sigmoid_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_relu_params {
+    struct csinn_params_base base;
+
+    /* n / alpha / threshold */
+    float n;
+    int32_t n_multiplier;
+    int32_t n_shift;
+};
+
+struct csinn_prelu_params {
+    struct csinn_params_base base;
+    int32_t axis;
+};
+
+struct csinn_softmax_params {
+    struct csinn_params_base base;
+    int32_t axis;
+};
+
+struct csinn_bn_params {
+    struct csinn_params_base base;
+    float epsilon;
+    int32_t epsilon_multiplier;
+    int32_t epsilon_shift;
+};
+
+struct csinn_l2n_params {
+    struct csinn_params_base base;
+    float epsilon;
+    int32_t epsilon_multiplier;
+    int32_t epsilon_shift;
+    int32_t *axis;
+    int32_t n;
+};
+
+struct csinn_lrn_params {
+    struct csinn_params_base base;
+    int32_t range;
+    double bias;
+    int32_t bias_multiplier;
+    int32_t bias_shift;
+    double alpha;
+    int32_t alpha_multiplier;
+    int32_t alpha_shift;
+    double beta;
+    int32_t beta_multiplier;
+    int32_t beta_shift;
+    enum csinn_lrn_enum norm_region;
+};
+
+struct csinn_matmul_params {
+    struct csinn_params_base base;
+    bool trans_a;
+    bool trans_b;
+};
+
+struct csinn_diso_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_select_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_pad_params {
+    struct csinn_params_base base;
+    int32_t *pad_before;
+    int32_t *pad_after;
+    int32_t pad_num;
+    float pad_value;
+    enum csinn_pad_enum pad_mode;
+};
+
+struct csinn_resize_params {
+    struct csinn_params_base base;
+    enum csinn_resize_enum resize_mode;
+    bool align_corners;
+};
+
+struct csinn_concat_params {
+    struct csinn_params_base base;
+    int32_t inputs_count;
+    int32_t axis;
+};
+
+struct csinn_proposal_params {
+    struct csinn_params_base base;
+    float *scales;
+    int32_t *scale_multipliers;
+    int32_t *scale_shifts;
+    int32_t scales_num;
+    float *ratios;
+    int32_t *ratio_multipliers;
+    int32_t *ratio_shifts;
+    int32_t ratios_num;
+    int32_t feature_stride;
+    float threshold;
+    int32_t threshold_multiplier;
+    int32_t threshold_shift;
+    int rpn_pre_nms_top_n;
+    int rpn_post_nms_top_n;
+    int rpn_min_size;
+    bool iou_loss;
+};
+
+struct csinn_psroipooling_params {
+    struct csinn_params_base base;
+    int32_t output_dim;
+    int32_t group_size;
+    float spatial_scale;
+    int32_t spatial_scale_multiplier;
+    int32_t spatial_scale_shift;
+};
+
+struct csinn_transpose_params {
+    struct csinn_params_base base;
+    int32_t *permute;
+    int32_t permute_num;
+};
+
+struct csinn_reshape_params {
+    struct csinn_params_base base;
+    int32_t *shape;
+    int32_t shape_num;
+};
+
+struct csinn_shape_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_expand_dims_params {
+    struct csinn_params_base base;
+    int32_t axis;
+};
+
+struct csinn_reverse_params {
+    struct csinn_params_base base;
+    int32_t axis;
+};
+
+struct csinn_flatten_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_crop_params {
+    struct csinn_params_base base;
+    int32_t axis;
+    int32_t *offset;
+    int32_t offset_num;
+};
+
+struct csinn_slice_params {
+    struct csinn_params_base base;
+    int32_t *begin;
+    int32_t *end;
+    int32_t *strides;
+    int32_t slice_num;
+};
+
+struct csinn_split_params {
+    struct csinn_params_base base;
+    int32_t *split_index;
+    int32_t output_num;
+    int32_t axis;
+};
+
+struct csinn_stack_params {
+    struct csinn_params_base base;
+    int32_t inputs_count;
+    int32_t axis;
+};
+
+struct csinn_tile_params {
+    struct csinn_params_base base;
+    int32_t *reps;
+    int32_t reps_num;
+};
+
+struct csinn_arange_params {
+    struct csinn_params_base base;
+    float start;
+    int32_t start_multiplier;
+    int32_t start_shift;
+    float stop;
+    int32_t stop_multiplier;
+    int32_t stop_shift;
+    float step;
+    int32_t step_multiplier;
+    int32_t step_shift;
+};
+
+struct csinn_where_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_unstack_params {
+    struct csinn_params_base base;
+    int32_t outputs_count;
+    int32_t axis;
+};
+
+struct csinn_gather_params {
+    struct csinn_params_base base;
+    int32_t axis;
+};
+struct csinn_gather_nd_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_squeeze_params {
+    struct csinn_params_base base;
+    int32_t *axis;
+    int32_t axis_num;
+};
+
+struct csinn_ndarray_size_params {
+    struct csinn_params_base base;
+};
+
+struct csinn_space_to_batch_params {
+    struct csinn_params_base base;
+    int32_t pad_top;
+    int32_t pad_bottom;
+    int32_t pad_left;
+    int32_t pad_right;
+    int32_t block_size;
+};
+
+struct csinn_space_to_batch_nd_params {
+    struct csinn_params_base base;
+    int32_t *paddings;
+    int32_t *block_shape;
+    int32_t spatial_dim_cnt;
+};
+
+struct csinn_batch_to_space_params {
+    struct csinn_params_base base;
+    int32_t crop_top;
+    int32_t crop_bottom;
+    int32_t crop_left;
+    int32_t crop_right;
+    int32_t block_size;
+};
+
+struct csinn_batch_to_space_nd_params {
+    struct csinn_params_base base;
+    int32_t *crops;
+    int32_t *block_shape;
+    int32_t spatial_dim_cnt;
+};
+
+struct csinn_space_to_depth_params {
+    struct csinn_params_base base;
+    int32_t block_size;
+};
+
+struct csinn_depth_to_space_params {
+    struct csinn_params_base base;
+    enum csinn_depth2space_enum mode;
+    int32_t block_size;
+};
+
+struct csinn_one_hot_params {
+    struct csinn_params_base base;
+    float f_on_value;
+    float f_off_value;
+    int32_t on_value;
+    int32_t off_value;
+    int32_t depth;
+    int32_t axis;
+};
+
+struct csinn_sequence_mask_params {
+    struct csinn_params_base base;
+    float mask_value;
+    int32_t mask_value_multiplier;
+    int32_t mask_value_shift;
+    int32_t axis;
+};
+
+struct csinn_im2col_params {
+    struct csinn_params_base base;
+    int32_t pad_top;
+    int32_t pad_down;
+    int32_t pad_left;
+    int32_t pad_right;
+    int32_t stride_h;
+    int32_t stride_w;
+    int32_t kernel_h;
+    int32_t kernel_w;
+};
+
+struct csinn_col2im_params {
+    struct csinn_params_base base;
+    int32_t pad_h;
+    int32_t pad_w;
+    int32_t stride_h;
+    int32_t stride_w;
+};
+
+struct csinn_reduce_params {
+    struct csinn_params_base base;
+    int32_t *out_strides;
+    int32_t *out_extents;
+    int32_t n;
+    int32_t *inner_strides;
+    int32_t *inner_extents;
+    int32_t m;
+
+    int32_t *axis;
+    int32_t axis_count;
+    bool keepdims;
+};
+
+struct csinn_reorg_params {
+    struct csinn_params_base base;
+    int32_t stride;
+};
+
+struct csinn_segment_params {
+    struct csinn_params_base base;
+    int32_t num_segments;
+    bool unsorted;
+};
+
+struct csinn_cumsum_params {
+    struct csinn_params_base base;
+    int32_t axis;
+    bool exclusive;
+};
+
+struct csinn_cumprod_params {
+    struct csinn_params_base base;
+    int32_t axis;
+    bool exclusive;
+};
+
+struct csinn_broadcast_to_params {
+    struct csinn_params_base base;
+    int32_t *shape;
+    int32_t shape_count;
+};
+
+struct csinn_clip_params {
+    struct csinn_params_base base;
+    float min_value;
+    float max_value;
+};
+
+struct csinn_strided_slice_params {
+    struct csinn_params_base base;
+    int32_t *begin;
+    int32_t *end;
+    int32_t *stride;
+    int32_t slice_count;
+};
+
+struct csinn_shuffle_channel_params {
+    struct csinn_params_base base;
+    int32_t group;
+};
+
+struct csinn_topk_params {
+    struct csinn_params_base base;
+    int32_t k;
+};
+
+struct csinn_non_max_suppression_params {
+    struct csinn_params_base base;
+    int32_t max_output_size;
+    float iou_threshold;
+    // float score_threshold;
+};
+
+// modyfied to use asr model
+struct csinn_layer_norm_params {
+    struct csinn_params_base base;
+    float epsilon;
+    bool center;
+    bool scale;
+    int32_t axis;
+};
+
+struct csinn_asr_buffer_t {
+    size_t writer_index;
+    size_t buffer_lenth;  // lenth of buffer
+    size_t data_lenth;    // lenth of data
+    uint8_t *buffer;
+    uint8_t flag;
+};
+
+struct csinn_cache_matmul_params {
+    struct csinn_params_base base;
+    struct csinn_asr_buffer_t asr_buffer;
+    int32_t *cache_shape;
+    int32_t *shape;
+    int32_t *axes;
+    void *data;
+};
+
+struct csinn_cache_conv1d_params {
+    struct csinn_params_base base;
+    struct csinn_asr_buffer_t asr_buffer;
+    int32_t *cache_shape;
+    int32_t *in_shape;
+    int32_t group;
+    int32_t stride_width;
+    int32_t dilation_width;
+    int32_t pad_left;
+    int32_t pad_right;
+    void *data;
+};
+
+struct csinn_conv1d_params {
+    struct csinn_params_base base;
+    int32_t group;
+    int32_t stride_width;
+    int32_t dilation_width;
+    int32_t pad_left;
+    int32_t pad_right;
+};
+
+#endif  // INCLUDE_CSI_INTERNAL_H_

+ 87 - 0
lib/install_nn2/include/csinn_runtime.h

@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_CSINN_RUNTIME_H_
+#define INCLUDE_CSINN_RUNTIME_H_
+
+#include <assert.h>
+#include <float.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#if (!defined SHL_BUILD_RTOS)
+#include <omp.h>
+#endif
+#include "csinn_data_structure.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define VERSION_MAJOR 2
+#define VERSION_MINOR 0
+#define VERSION_PATCH 20
+#define VERSION_SHIFT 8
+int csinn_version(char *vstr);
+
+/* tensor */
+int csinn_tensor_size(struct csinn_tensor *tensor);
+int csinn_tensor_byte_size(struct csinn_tensor *tensor);
+struct csinn_tensor *csinn_alloc_tensor(struct csinn_session *session);
+void csinn_free_tensor(struct csinn_tensor *tensor);
+void csinn_realloc_quant_info(struct csinn_tensor *tensor, int quant_info_num);
+void csinn_tensor_copy(struct csinn_tensor *dest, struct csinn_tensor *src);
+int csinn_tensor_data_convert(struct csinn_tensor *dest, struct csinn_tensor *src);
+int csinn_tensor_layout_convert(struct csinn_tensor *dest, struct csinn_tensor *src);
+
+/* op parameters */
+void *csinn_alloc_params(int params_size, struct csinn_session *session);
+void csinn_free_params(void *params);
+
+/* session */
+struct csinn_session *csinn_alloc_session();
+void csinn_free_session(struct csinn_session *session);
+void csinn_session_init(struct csinn_session *session);
+void csinn_session_deinit(struct csinn_session *session);
+int csinn_session_setup(struct csinn_session *session);
+int csinn_session_run(struct csinn_session *session);
+int csinn_load_binary_model(struct csinn_session *session);
+struct csinn_session *__attribute__((weak)) csinn_import_binary_model(char *bm_addr);
+
+/* input/output */
+void csinn_set_input_number(int number, struct csinn_session *sess);
+void csinn_set_output_number(int number, struct csinn_session *sess);
+int csinn_get_input_number(struct csinn_session *sess);
+int csinn_get_output_number(struct csinn_session *sess);
+int csinn_set_input(int index, struct csinn_tensor *input, struct csinn_session *sess);
+int csinn_set_output(int index, struct csinn_tensor *output, struct csinn_session *sess);
+int csinn_get_input(int index, struct csinn_tensor *input, struct csinn_session *sess);
+int csinn_get_output(int index, struct csinn_tensor *output, struct csinn_session *sess);
+int csinn_update_input(int index, struct csinn_tensor *input, struct csinn_session *sess);
+int csinn_update_output(int index, struct csinn_tensor *output, struct csinn_session *sess);
+int csinn_set_tensor_entry(struct csinn_tensor *tensor, struct csinn_session *sess);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // INCLUDE_CSINN_RUNTIME_H_

+ 40 - 0
lib/install_nn2/include/shl_asp.h

@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_ASP_H_
+#define INCLUDE_SHL_ASP_H_
+
+#include "csi_nn.h"
+#include "shl_ref.h"
+
+int shl_asp_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pool_params *params);
+int shl_asp_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                   struct csinn_conv2d_params *params);
+int shl_asp_depthwise_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_asp_fullyconnected(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                           struct csinn_fc_params *params);
+int shl_asp_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pool_params *params);
+#endif  // INCLUDE_SHL_ASP_H_

+ 30 - 0
lib/install_nn2/include/shl_c860.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_CSI_C860_H_
+#define INCLUDE_CSI_C860_H_
+
+#include "csi_nn.h"
+#include "shl_ref.h"
+
+void shl_c860_dequantize_f32(uint8_t *input, float *output, int32_t offset, int32_t multiplier,
+                             int32_t shift, int32_t length);
+
+#endif  // INCLUDE_CSI_C860_H_

+ 519 - 0
lib/install_nn2/include/shl_c906.h

@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_C906_H_
+#define INCLUDE_SHL_C906_H_
+
+#include "csi_nn.h"
+#include "shl_gref.h"
+#include "shl_ref.h"
+#include "shl_thead_rvv.h"
+
+/************************** f32 func declaration ***************************/
+int shl_c906_abs_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_c906_add_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_sub_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_mul_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_minimum_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_broadcast_to_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_broadcast_to_params *params);
+
+int shl_c906_clip_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_clip_params *params);
+
+int shl_c906_concat_f32(struct csinn_tensor **input, struct csinn_tensor *output,
+                        struct csinn_concat_params *params);
+
+int shl_c906_split_f32(struct csinn_tensor *input, struct csinn_tensor **output,
+                       struct csinn_split_params *params);
+
+int shl_c906_fullyconnected_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                 struct csinn_fc_params *params);
+
+int shl_c906_fullyconnected_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                struct csinn_fc_params *params);
+
+int shl_c906_pad_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_pad_params *params);
+
+int shl_c906_prelu_f32(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                       struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_c906_relu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_c906_relu1_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_c906_relu6_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_c906_leaky_relu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_relu_params *params);
+
+int shl_c906_conv1d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv1d_params *params);
+
+int shl_c906_conv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_c906_conv2d_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_c906_depthwise_conv2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c906_depthwise_conv2d_relu_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+
+int shl_c906_maxpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_c906_global_maxpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_c906_avgpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_c906_global_avgpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_c906_div_init(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+/* pack */
+void shl_c906_reorder_kernel(float *a, float *sa, int m, int k, int ldx);
+
+void shl_c906_reorder_input(float *b, float *sb, int k, int n, int ldx);
+
+void shl_c906_reorder_input_1(float *b, float *sb, int k, int n, int ldx);
+
+/* gemm */
+void shl_c906_sgemm_kernel_f32(float *dst, const float *sa, const float *sb, int m, int k, int n,
+                               int ldc, float *bias, bool fuse_relu);
+
+/* kernel transform */
+void shl_c906_conv1x1s1_sgemm_transform_kernel(struct csinn_tensor *kernel,
+                                               struct csinn_conv2d_params *params);
+
+void shl_c906_conv_im2col_sgemm_transform_kernel(struct csinn_tensor *kernel,
+                                                 struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s1_winograd23_transform_kernel(struct csinn_tensor *o_kernel,
+                                                    struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd43_transform_kernel(struct csinn_tensor *o_kernel,
+                                                    struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd64_transform_kernel(struct csinn_tensor *o_kernel,
+                                                    struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd64_transform_kernel_1(struct csinn_tensor *o_kernel,
+                                                      struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd64_transform_kernel_pack4(struct csinn_tensor *o_kernel,
+                                                          struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd43_transform_kernel_pack4(struct csinn_tensor *o_kernel,
+                                                          struct csinn_tensor *t_kernel);
+
+/* convolution optimization */
+int shl_c906_conv1x1s1_sgemm(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_c906_conv1x1s1_sgemm_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+int shl_c906_conv_im2col_sgemm(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_c906_conv_im2col_sgemm_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd23(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd43(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd64(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd64_1(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd64_pack4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd43_pack4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s1(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                        struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s2(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                        struct csinn_conv2d_params *params);
+
+/* depthwise convolution optimization */
+int shl_c906_dwconv3x3s1(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv5x5s1(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv5x5s2(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s1_pack4(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2_pack4(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+/* depthwise convolution fuse relu */
+int shl_c906_dwconv3x3s1_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv5x5s1_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv5x5s2_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s1_pack4_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2_pack4_fuse_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv2d_s1_pad0_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+/************************** fp16 func declaration ***************************/
+int shl_c906_add_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_sub_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_mul_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_minimum_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_c906_global_avgpool2d_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_c906_global_maxpool2d_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_c906_pad_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pad_params *params);
+
+int shl_c906_relu_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_c906_relu1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_c906_relu6_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_c906_prelu_fp16(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                        struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_c906_leaky_relu_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_relu_params *params);
+
+int shl_c906_abs_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_c906_clip_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_clip_params *params);
+
+int shl_c906_concat_fp16(struct csinn_tensor **input, struct csinn_tensor *output,
+                         struct csinn_concat_params *params);
+
+int shl_c906_split_fp16(struct csinn_tensor *input, struct csinn_tensor **output,
+                        struct csinn_split_params *params);
+
+int shl_c906_fullyconnected_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                 struct csinn_fc_params *params);
+
+int shl_c906_fullyconnected_pack8_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                       struct csinn_fc_params *params);
+
+int shl_c906_fullyconnected_pack8_fp16_1(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                         struct csinn_fc_params *params);
+
+int shl_c906_fullyconnected_pack16_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                        struct csinn_fc_params *params);
+
+int shl_c906_fullyconnected_pack16_output16_fp16(struct csinn_tensor *input,
+                                                 struct csinn_tensor *output,
+                                                 struct csinn_tensor *weights,
+                                                 struct csinn_tensor *bias,
+                                                 struct csinn_fc_params *params);
+
+void shl_c906_reorder_weight_n8_fp16(__fp16 *src, __fp16 *dst, int m, int k, int ldx);
+
+void shl_c906_reorder_weight_n16_fp16(__fp16 *src, __fp16 *dst, int m, int k, int ldx);
+
+/* pack fp16 */
+void shl_c906_reorder_kernel_fp16(__fp16 *a, __fp16 *sa, int m, int k, int ldx);
+void shl_c906_reorder_input_fp16(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+
+void shl_c906_reorder_input_fp16_1(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+
+void shl_c906_reorder_matrix_z8_fp16(__fp16 *src, __fp16 *dst, int k, int n, int ldx);
+void shl_c906_reorder_matrix_z16_fp16(__fp16 *src, __fp16 *dst, int k, int n, int ldx);
+
+/* gemm fp16 */
+void shl_c906_sgemm_kernel_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int m, int k,
+                                int n, int ldc, __fp16 *bias);
+void shl_c906_sgemm_kernel_fp16_1(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int m, int k,
+                                  int n, int ldc, __fp16 *bias);
+
+/* gemv fp16 */
+void shl_c906_gemv_pack8_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int k, int n,
+                              int ldc, __fp16 *bias);
+void shl_c906_gemv_pack16_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int k, int n,
+                               int ldc, __fp16 *bias);
+
+void shl_c906_gemv_trans_pack8_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int k, int n,
+                                    int ldc, __fp16 *bias);
+void shl_c906_gemv_trans_pack16_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, int k, int n,
+                                     int ldc, __fp16 *bias);
+
+/* kernel transform fp16 */
+void shl_c906_conv1x1s1_sgemm_transform_kernel_fp16(struct csinn_tensor *kernel,
+                                                    struct csinn_conv2d_params *params);
+void shl_c906_conv_im2col_sgemm_transform_kernel_fp16(struct csinn_tensor *kernel,
+                                                      struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s1_winograd43_transform_kernel_pack8_fp16(struct csinn_tensor *o_kernel,
+                                                               struct csinn_tensor *t_kernel);
+
+void shl_c906_conv3x3s1_winograd64_transform_kernel_pack8_fp16(struct csinn_tensor *o_kernel,
+                                                               struct csinn_tensor *t_kernel);
+
+/* convolution optimization fp16 */
+int shl_c906_conv1x1s1_sgemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+int shl_c906_conv1x1s1_batch_gemv_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+int shl_c906_conv_im2col_sgemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd43_pack8_fp16(struct csinn_tensor *input,
+                                             struct csinn_tensor *output,
+                                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                             struct csinn_conv2d_params *params);
+
+int shl_c906_conv3x3s1_winograd64_pack8_fp16(struct csinn_tensor *input,
+                                             struct csinn_tensor *output,
+                                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                             struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+void shl_c906_conv3x3s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+/* depthwise convolution optimization for fp16*/
+int shl_c906_dwconv3x3s1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s1_pack8_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+int shl_c906_dwconv3x3s2_pack8_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+/* utils */
+void shl_c906_memcpy(void *dst, const void *src, size_t n);
+
+void shl_c906_pad_input(const float *input, float *input_padded, int inc, int inh, int inw,
+                        int padded_h, int padded_w, int pad_top, int pad_left);
+
+void shl_c906_crop_output(float *output_trans, float *output, int out_c, int out_h, int out_w,
+                          int wino_h, int wino_w);
+
+void shl_c906_pad_input_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh, int inw,
+                             int padded_h, int padded_w, int pad_top, int pad_left);
+
+void shl_c906_crop_output_fp16(__fp16 *output_trans, __fp16 *output, int out_c, int out_h,
+                               int out_w, int wino_h, int wino_w);
+
+/*asr related fuctions*/
+int shl_c906_cache_matmul_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_matmul_params *params);
+
+int shl_c906_cache_matmul_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_matmul_params *params);
+
+int shl_c906_matmul_fp16(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                         struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_c906_layer_norm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                             struct csinn_layer_norm_params *params);
+
+int shl_c906_reshape_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reshape_params *params);
+
+int shl_c906_transpose_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_transpose_params *params);
+
+int shl_c906_gather_fp16(struct csinn_tensor *input, struct csinn_tensor *indices,
+                         struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int shl_c906_cache_conv1d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_conv1d_params *params);
+
+int shl_c906_cache_conv1d_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_conv1d_params *params);
+
+int shl_c906_lrn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_lrn_params *params);
+
+void asr_buffer_init_c906(struct csinn_asr_buffer_t *buffer, size_t buffer_size, size_t data_lenth);
+
+void *asr_buffer_insert_c906_front(struct csinn_asr_buffer_t *buffer, void *input, size_t len);
+
+void *asr_buffer_insert_c906_back(struct csinn_asr_buffer_t *buffer, void *input, size_t len);
+
+void *asr_buffer_get_buffer_c906(struct csinn_asr_buffer_t *buffer);
+
+void asr_buffer_reset_c906(struct csinn_asr_buffer_t *buffer);
+
+void shl_c906_reset_fcsr();
+int shl_c906_get_fcsr();
+
+/* hardware performance */
+struct shl_c906_hpm {
+    size_t inst;
+    size_t cycle;
+    size_t l1_icache_access;
+    size_t l1_icache_miss;
+    size_t store_inst;
+    size_t l1_dcache_raccess;
+    size_t l1_dcache_rmiss;
+    size_t l1_dcache_waccess;
+    size_t l1_dcache_wmiss;
+};
+
+uint64_t shl_c906_get_inst();
+uint64_t shl_c906_get_cycle();
+uint64_t shl_c906_get_l1_icache_access();
+uint64_t shl_c906_get_l1_icache_miss();
+uint64_t shl_c906_get_cb_miss();
+uint64_t shl_c906_get_cb_inst();
+uint64_t shl_c906_get_store_inst();
+uint64_t shl_c906_get_l1_dcache_raccess();
+uint64_t shl_c906_get_l1_dcache_rmiss();
+uint64_t shl_c906_get_l1_dcache_waccess();
+uint64_t shl_c906_get_l1_dcache_wmiss();
+
+struct shl_c906_hpm shl_c906_get_hw_perf();
+
+int shl_c906_sum_stride_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+void shl_c906_u8_to_f32(const uint8_t *input, float *output, int32_t offset, float *scale,
+                        uint32_t length);
+
+struct csinn_callback *shl_cb_map_c906(int op, int dtype);
+int shl_c906_reg_op(enum csinn_dtype_enum dtype, enum csinn_op_enum op_name, void *init,
+                    void *exec);
+int shl_c906_reg_op_est(enum csinn_dtype_enum dtype, enum csinn_op_enum op_name, void *est);
+#endif  // INCLUDE_SHL_C906_H_

+ 348 - 0
lib/install_nn2/include/shl_c908.h

@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_C908_H_
+#define INCLUDE_SHL_C908_H_
+
+#include "csi_nn.h"
+#include "shl_gref.h"
+#include "shl_ref.h"
+#include "shl_thead_rvv.h"
+
+/*********************************** initialization ***********************************/
+int shl_c908_conv2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+int shl_c908_conv2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+int shl_c908_conv2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_c908_depthwise_conv2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+int shl_c908_depthwise_conv2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+int shl_c908_depthwise_conv2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+
+int shl_c908_avgpool2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_c908_avgpool2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_c908_avgpool2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_c908_maxpool2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_c908_maxpool2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_c908_maxpool2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_c908_fullyconnected_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                 struct csinn_fc_params *params);
+
+/************************************ convolution *********************************/
+/*********************************** im2col + gemm ********************************/
+void shl_c908_conv_im2col_gemm_reorder_kernel_fp32(struct csinn_tensor *kernel,
+                                                   struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_fp16(struct csinn_tensor *kernel,
+                                                   struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_int8(struct csinn_tensor *kernel,
+                                                   struct csinn_conv2d_params *params);
+
+int shl_c908_conv_im2col_gemm_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+void shl_c908_conv_im2col_gemm_reorder_kernel_packn_fp32(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_packn_fp16(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_packn_int8(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+
+int shl_c908_conv_im2col_gemm_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+void shl_c908_conv_im2col_gemm_reorder_kernel_pack1ton_fp32(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_pack1ton_fp16(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_pack1ton_int8(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+
+int shl_c908_conv_im2col_gemm_pack1ton_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_pack1ton_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_pack1ton_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+
+void shl_c908_conv_im2col_gemm_reorder_kernel_packnto1_fp32(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_packnto1_fp16(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+void shl_c908_conv_im2col_gemm_reorder_kernel_packnto1_int8(struct csinn_tensor *kernel,
+                                                            struct csinn_conv2d_params *params);
+
+int shl_c908_conv_im2col_gemm_packnto1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_packnto1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+int shl_c908_conv_im2col_gemm_packnto1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+
+/******************************** conv2d1x1s1 + gemm ******************************/
+void shl_c908_conv1x1s1_gemm_reorder_kernel_fp32(struct csinn_tensor *kernel,
+                                                 struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_fp16(struct csinn_tensor *kernel,
+                                                 struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_int8(struct csinn_tensor *kernel,
+                                                 struct csinn_conv2d_params *params);
+
+int shl_c908_conv1x1s1_gemm_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packn_fp32(struct csinn_tensor *kernel,
+                                                       struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packn_fp16(struct csinn_tensor *kernel,
+                                                       struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packn_int8(struct csinn_tensor *kernel,
+                                                       struct csinn_conv2d_params *params);
+
+int shl_c908_conv1x1s1_gemm_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+void shl_c908_conv1x1s1_gemm_reorder_kernel_pack1ton_fp32(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_pack1ton_fp16(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_pack1ton_int8(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+
+int shl_c908_conv1x1s1_gemm_pack1ton_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_pack1ton_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_pack1ton_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packnto1_fp32(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packnto1_fp16(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+void shl_c908_conv1x1s1_gemm_reorder_kernel_packnto1_int8(struct csinn_tensor *kernel,
+                                                          struct csinn_conv2d_params *params);
+
+int shl_c908_conv1x1s1_gemm_packnto1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_packnto1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+int shl_c908_conv1x1s1_gemm_packnto1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+
+/*********************************** winograd ***********************************/
+void shl_c908_wg_b6f3s1_trans_kernel_pack8_fp32(struct csinn_tensor *src_kernel,
+                                                struct csinn_tensor *dst_kernel);
+void shl_c908_wg_b6f3s1_trans_kernel_pack8_fp16(struct csinn_tensor *src_kernel,
+                                                struct csinn_tensor *dst_kernel);
+void shl_c908_wg_b6f3s1_trans_kernel_pack16_fp16(struct csinn_tensor *src_kernel,
+                                                 struct csinn_tensor *dst_kernel);
+
+void shl_c908_wg_b4f3s1_trans_kernel_pack8_fp32(struct csinn_tensor *src_kernel,
+                                                struct csinn_tensor *dst_kernel);
+void shl_c908_wg_b4f3s1_trans_kernel_pack8_fp16(struct csinn_tensor *src_kernel,
+                                                struct csinn_tensor *dst_kernel);
+void shl_c908_wg_b4f3s1_trans_kernel_pack16_fp16(struct csinn_tensor *src_kernel,
+                                                 struct csinn_tensor *dst_kernel);
+void shl_c908_wg_b4f3s1_trans_kernel_pack8_int8(struct csinn_tensor *src_kernel,
+                                                struct csinn_tensor *dst_kernel);
+
+int shl_c908_wg_b6f3s1_pack8_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+int shl_c908_wg_b6f3s1_pack8_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+int shl_c908_wg_b6f3s1_pack16_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_c908_wg_b4f3s1_pack8_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+int shl_c908_wg_b4f3s1_pack8_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+int shl_c908_wg_b4f3s1_pack16_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_c908_wg_b4f3s1_pack8_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+void shl_c908_ncxhwx_wg_b6f3s1_trans_kernel_packn_fp32(struct csinn_tensor *src_kernel,
+                                                       struct csinn_tensor *dst_kernel);
+void shl_c908_ncxhwx_wg_b6f3s1_trans_kernel_packn_fp16(struct csinn_tensor *src_kernel,
+                                                       struct csinn_tensor *dst_kernel);
+
+int shl_c908_ncxhwx_wg_b6f3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_c908_ncxhwx_wg_b6f3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+void shl_c908_ncxhwx_wg_b4f3s1_trans_kernel_packn_fp32(struct csinn_tensor *src_kernel,
+                                                       struct csinn_tensor *dst_kernel);
+void shl_c908_ncxhwx_wg_b4f3s1_trans_kernel_packn_fp16(struct csinn_tensor *src_kernel,
+                                                       struct csinn_tensor *dst_kernel);
+void shl_c908_ncxhwx_wg_b4f3s1_trans_kernel_packn_int8(struct csinn_tensor *src_kernel,
+                                                       struct csinn_tensor *dst_kernel);
+
+int shl_c908_ncxhwx_wg_b4f3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_c908_ncxhwx_wg_b4f3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_c908_ncxhwx_wg_b4f3s1_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+/*********************************** gemm ncxhwx kernel ***********************************/
+void shl_c908_ncxhwx_gemm_12xpack2n_fp32(float *dst, const float *sa, const float *sb,
+                                         const float *bias, int m, int k, int n, bool fuse_relu);
+void shl_c908_ncxhwx_gemm_12xpack2n_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb,
+                                         const __fp16 *bias, int m, int k, int n, bool fuse_relu);
+
+void shl_c908_ncxhwx_gemm_12xpackn_int8(int8_t *dst, const int8_t *sa, const int8_t *sb,
+                                        const int32_t *bias, int m, int k, int n, int32_t out_zp,
+                                        int32_t *mult, int32_t *shift);
+
+void shl_c908_ncxhwx_gemm_12xpackn_int16(int32_t *dst, const int16_t *sa, const int16_t *sb, int m,
+                                         int k, int n);
+/*********************************** gemm kernel ***********************************/
+void shl_c908_reorder_kernel_n8_fp32(float *src, float *dst, int m, int k, int ldc);
+void shl_c908_reorder_input_z12_fp32(float *src, float *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x12_fp32(float *dst, const float *sa, const float *sb, float *bias, int m,
+                             int k, int n, int ldc);
+
+void shl_c908_reorder_kernel_n8_fp16(__fp16 *src, __fp16 *dst, int m, int k, int ldc);
+void shl_c908_reorder_input_z24_fp16(__fp16 *src, __fp16 *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x24_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias, int m,
+                             int k, int n, int ldc);
+
+void shl_c908_reorder_kernel_n8_int8(int8_t *src, int8_t *dst, int m, int k, int ldc);
+void shl_c908_reorder_input_z8_int8(int8_t *src, int8_t *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x8_int8(int8_t *dst, const int8_t *sa, const int8_t *sb, int32_t *bias, int m,
+                            int k, int n, int ldc, int32_t out_zp, int32_t *mult, int32_t *shift);
+void shl_c908_reorder_input_z12_int8(int8_t *src, int8_t *dst, int k, int n, int ldc);
+
+/*********************************** VLEN = 256 ***********************************/
+/*********************************** VLEN = 256 ***********************************/
+/*********************************** VLEN = 256 ***********************************/
+
+void shl_c908_reorder_input_z16_fp32_v256(float *src, float *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x16_fp32_v256(float *dst, const float *sa, const float *sb, float *bias, int m,
+                                  int k, int n, int ldc);
+
+void shl_c908_reorder_input_z32_fp16_v256(__fp16 *src, __fp16 *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x32_fp16_v256(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias,
+                                  int m, int k, int n, int ldc);
+
+void shl_c908_reorder_input_z16_int8_v256(int8_t *src, int8_t *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x16_int8_v256(int8_t *dst, const int8_t *sa, const int8_t *sb, int32_t *bias,
+                                  int m, int k, int n, int ldc, int32_t out_zp, int32_t *mult,
+                                  int32_t *shift);
+
+#ifdef SHL_UNUSED_REGISTER_BLK
+void shl_c908_reorder_input_z8_fp32(float *src, float *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x8_fp32(float *dst, const float *sa, const float *sb, float *bias, int m, int k,
+                            int n, int ldc);
+void shl_c908_reorder_input_z16_fp16(__fp16 *src, __fp16 *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x16_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias, int m,
+                             int k, int n, int ldc);
+
+void shl_c908_reorder_input_z24_fp32_v256(float *src, float *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x24_fp32_v256(float *dst, const float *sa, const float *sb, float *bias, int m,
+                                  int k, int n, int ldc);
+void shl_c908_reorder_input_z48_fp16_v256(__fp16 *src, __fp16 *dst, int k, int n, int ldc);
+void shl_c908_gemm_8x48_fp16_v256(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias,
+                                  int m, int k, int n, int ldc);
+#endif
+
+#ifdef SHL_USE_DOT_INT4
+int shl_c908_conv2d_init_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_c908_depthwise_conv2d_init_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+#endif
+
+#endif  // INCLUDE_SHL_C908_H_

+ 293 - 0
lib/install_nn2/include/shl_debug.h

@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+#ifndef INCLUDE_SHL_DEBUG_H_
+#define INCLUDE_SHL_DEBUG_H_
+#include "csi_nn.h"
+#include "shl_node.h"
+
+enum shl_debug_enum {
+    SHL_DEBUG_LEVEL_DEBUG = -2,
+    SHL_DEBUG_LEVEL_INFO,
+    SHL_DEBUG_LEVEL_WARNING,
+    SHL_DEBUG_LEVEL_ERROR,
+    SHL_DEBUG_LEVEL_FATAL,
+};
+
+#ifdef SHL_DEBUG
+#define SHL_DEBUG_CALL(func) func
+void shl_debug_debug(const char *format, ...);
+void shl_debug_info(const char *format, ...);
+void shl_debug_warning(const char *format, ...);
+void shl_debug_error(const char *format, ...);
+void shl_debug_fatal(const char *format, ...);
+int shl_debug_callback_unset();
+#else
+#define SHL_DEBUG_CALL(func)
+inline void shl_debug_debug(const char *format, ...) {}
+inline void shl_debug_info(const char *format, ...) {}
+inline void shl_debug_warning(const char *format, ...) {}
+inline void shl_debug_error(const char *format, ...) {}
+inline void shl_debug_fatal(const char *format, ...) {}
+inline int shl_debug_callback_unset() { return CSINN_CALLBACK_UNSET; }
+#endif
+
+int shl_debug_get_level();
+void shl_debug_set_level(int level);
+int shl_benchmark_layer(struct shl_node *node, uint64_t start_time, uint64_t end_time,
+                        int layer_idx);
+
+int shl_conv2d_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv2d_params *params, const char *name);
+
+int shl_conv1d_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv1d_params *params, const char *name);
+
+int shl_conv3d_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv3d_params *params, const char *name);
+
+int shl_fsmn_debug_info(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+                        struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+                        struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+                        struct csinn_fsmn_params *params, const char *name);
+
+int shl_siso_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params, const char *name);
+
+int shl_diso_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params,
+                        const char *name);
+
+int shl_relu_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params, const char *name);
+
+int shl_arange_debug_info(struct csinn_tensor *output, struct csinn_arange_params *params,
+                          const char *name);
+
+int shl_pool_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_pool_params *params, const char *name);
+
+int shl_pad_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pad_params *params, const char *name);
+
+int shl_crop_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_crop_params *params, const char *name);
+
+int shl_roi_pool_debug_info(struct csinn_tensor *data, struct csinn_tensor *rois,
+                            struct csinn_tensor *output, struct csinn_roi_pool_params *params,
+                            const char *name);
+
+int shl_bn_debug_info(struct csinn_tensor *input, struct csinn_tensor *mean,
+                      struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                      struct csinn_tensor *beta, struct csinn_tensor *output,
+                      struct csinn_bn_params *params, const char *name);
+
+int shl_batch_to_space_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_batch_to_space_params *params, const char *name);
+
+int shl_batch_to_space_nd_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     struct csinn_batch_to_space_nd_params *params,
+                                     const char *name);
+
+int shl_cache_matmul_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weight, struct csinn_tensor *bias,
+                                struct csinn_cache_matmul_params *params, const char *name);
+
+int shl_cache_conv1d_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weight, struct csinn_tensor *bias,
+                                struct csinn_cache_conv1d_params *params, const char *name);
+
+int shl_space_to_depth_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_space_to_depth_params *params, const char *name);
+
+int shl_depth_to_space_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_depth_to_space_params *params, const char *name);
+
+int shl_space_to_batch_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_space_to_batch_params *params, const char *name);
+
+int shl_space_to_batch_nd_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     struct csinn_space_to_batch_nd_params *params,
+                                     const char *name);
+
+int shl_broadcast_to_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_broadcast_to_params *params, const char *name);
+
+int shl_reduce_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reduce_params *params, const char *name);
+
+int shl_clip_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_clip_params *params, const char *name);
+
+int shl_col2im_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_col2im_params *params, const char *name);
+
+int shl_concat_debug_info(struct csinn_tensor **input, struct csinn_tensor *output,
+                          struct csinn_concat_params *params, const char *name);
+
+int shl_cumprod_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_cumprod_params *params, const char *name);
+
+int shl_cumsum_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_cumsum_params *params, const char *name);
+
+int shl_expand_dims_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_expand_dims_params *params, const char *name);
+
+int shl_flatten_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_flatten_params *params, const char *name);
+
+int shl_fullyconnected_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                  struct csinn_fc_params *params, const char *name);
+
+int shl_gather_nd_debug_info(struct csinn_tensor *input, struct csinn_tensor *indices,
+                             struct csinn_tensor *output, struct csinn_gather_nd_params *params,
+                             const char *name);
+
+int shl_gather_debug_info(struct csinn_tensor *input, struct csinn_tensor *indices,
+                          struct csinn_tensor *output, struct csinn_gather_params *params,
+                          const char *name);
+
+int shl_hard_sigmoid_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_sigmoid_params *params, const char *name);
+
+int shl_im2col_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_im2col_params *params, const char *name);
+
+int shl_l2n_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_l2n_params *params, const char *name);
+
+int shl_layer_norm_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                              struct csinn_layer_norm_params *params, const char *name);
+
+int shl_softmax_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_softmax_params *params, const char *name);
+
+int shl_lrn_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_lrn_params *params, const char *name);
+
+int shl_matmul_debug_info(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                          struct csinn_tensor *output, struct csinn_matmul_params *params,
+                          const char *name);
+
+int shl_ndarray_size_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_ndarray_size_params *params, const char *name);
+
+int shl_nms_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_non_max_suppression_params *params,
+                       const char *name);
+
+int shl_one_hot_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_one_hot_params *params, const char *name);
+
+int shl_prelu_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_prelu_params *params,
+                         const char *name);
+
+int shl_proposal_debug_info(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                            struct csinn_tensor *im_info, struct csinn_tensor *output,
+                            struct csinn_proposal_params *params, const char *name);
+
+int shl_psroipooling_debug_info(struct csinn_tensor *data, struct csinn_tensor *rois,
+                                struct csinn_tensor *output,
+                                struct csinn_psroipooling_params *params, const char *name);
+
+int shl_reorg_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reorg_params *params, const char *name);
+
+int shl_reshape_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reshape_params *params, const char *name);
+
+int shl_resize_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_resize_params *params, const char *name);
+
+int shl_reverse_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reverse_params *params, const char *name);
+
+int shl_roi_align_debug_info(struct csinn_tensor *data, struct csinn_tensor *rois,
+                             struct csinn_tensor *output, struct csinn_roi_align_params *params,
+                             const char *name);
+
+int shl_scatter_nd_debug_info(struct csinn_tensor *input, struct csinn_tensor *indices,
+                              struct csinn_tensor *updates, struct csinn_tensor *output,
+                              struct csinn_scatter_nd_params *params, const char *name);
+
+int shl_segment_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_segment_params *params,
+                           const char *name);
+
+int shl_select_debug_info(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                          struct csinn_tensor *input1, struct csinn_tensor *output,
+                          struct csinn_select_params *params, const char *name);
+
+int shl_sequence_mask_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                 struct csinn_tensor *output,
+                                 struct csinn_sequence_mask_params *params, const char *name);
+
+int shl_shape_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_shape_params *params, const char *name);
+
+int shl_shuffle_channel_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_shuffle_channel_params *params, const char *name);
+
+int shl_sigmoid_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_sigmoid_params *params, const char *name);
+
+int shl_slice_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_slice_params *params, const char *name);
+
+int shl_split_debug_info(struct csinn_tensor *input, struct csinn_tensor **output,
+                         struct csinn_split_params *params, const char *name);
+
+int shl_squeeze_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_squeeze_params *params, const char *name);
+
+int shl_stack_debug_info(struct csinn_tensor **input, struct csinn_tensor *output,
+                         struct csinn_stack_params *params, const char *name);
+
+int shl_strided_slice_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_strided_slice_params *params, const char *name);
+
+int shl_tile_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tile_params *params, const char *name);
+
+int shl_topk_debug_info(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_topk_params *params,
+                        const char *name);
+
+int shl_transpose_debug_info(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_transpose_params *params, const char *name);
+
+int shl_unpooling_debug_info(struct csinn_tensor *input, struct csinn_tensor *mask,
+                             struct csinn_tensor *output, struct csinn_unpooling_params *params,
+                             const char *name);
+
+int shl_unstack_debug_info(struct csinn_tensor *input, struct csinn_tensor **output,
+                           struct csinn_unstack_params *params, const char *name);
+
+int shl_where_debug_info(struct csinn_tensor *condition, struct csinn_tensor *x,
+                         struct csinn_tensor *y, struct csinn_tensor *output,
+                         struct csinn_where_params *params, const char *name);
+
+#endif  // INCLUDE_SHL_DEBUG_H_

+ 82 - 0
lib/install_nn2/include/shl_e804.h

@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_E804_H_
+#define INCLUDE_SHL_E804_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "csi_nn.h"
+#include "shl_ref.h"
+
+int shl_e804_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                            struct csinn_conv2d_params *params);
+
+int shl_e804_conv2d_init_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_e804_depthwise_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+
+int shl_e804_avgpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int shl_e804_maxpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int shl_e804_fullyconnected_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weights, struct csinn_tensor *bias,
+                               struct csinn_fc_params *params);
+
+int shl_e804_fullyconnected_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                struct csinn_fc_params *params);
+
+int shl_e804_softmax_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_softmax_params *params);
+
+int shl_e804_softmax_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_softmax_params *params);
+
+int shl_e804_relu_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int shl_e804_relu_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_e804_sigmoid_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_sigmoid_params *params);
+
+int shl_e804_sigmoid_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_sigmoid_params *params);
+
+int shl_e804_tanh_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_e804_tanh_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+#endif  // INCLUDE_SHL_E804_H_

+ 609 - 0
lib/install_nn2/include/shl_gref.h

@@ -0,0 +1,609 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_GREF_H_
+#define INCLUDE_SHL_GREF_H_
+#include "csi_nn.h"
+#include "shl_node.h"
+#include "shl_utils.h"
+
+int shl_gref_acos(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_acosh(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_cos(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_cosh(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_asin(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_asinh(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_tan(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_atan(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_atanh(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_threshold_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_relu_params *params);
+
+int shl_gref_trunc(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_topk(struct csinn_tensor *input, struct csinn_tensor *output1,
+                  struct csinn_tensor *output2, struct csinn_topk_params *params);
+
+int shl_gref_cumprod(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_cumprod_params *params);
+
+int shl_gref_cumsum(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_cumsum_params *params);
+
+int shl_gref_conv1d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                    struct csinn_conv2d_params *params);
+
+int shl_gref_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                    struct csinn_conv2d_params *params);
+
+int shl_gref_depthwise_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_gref_group_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv2d_params *params);
+
+int shl_gref_group_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_gref_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_gref_conv2d_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv2d_params *params);
+
+int shl_gref_conv3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                    struct csinn_conv3d_params *params);
+
+int shl_gref_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv2d_params *params);
+
+int shl_gref_deconv3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv3d_params *params);
+
+int shl_gref_depthwise_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+int shl_gref_depthwise_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_gref_depthwise_conv2d_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+int shl_gref_fsmn(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+                  struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+                  struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+                  struct csinn_fsmn_params *params);
+
+int shl_gref_fullyconnected(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *weights, struct csinn_tensor *bias,
+                            struct csinn_fc_params *params);
+
+int shl_gref_fullyconnected_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                 struct csinn_fc_params *params);
+
+int shl_gref_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_gref_maxpool3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_gref_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_gref_avgpool3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_gref_global_avgpool3d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_gref_global_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_gref_global_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_gref_l2pool(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pool_params *params);
+
+int shl_gref_pool_with_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_gref_maxpool2d_locat(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_pool_params *params);
+
+int shl_gref_mod(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_non_max_suppression(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                 struct csinn_tensor *output,
+                                 struct csinn_non_max_suppression_params *params);
+
+int shl_gref_unpooling(struct csinn_tensor *input, struct csinn_tensor *mask,
+                       struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int shl_gref_negative(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_gref_floor(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_ceil(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_clip(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_abs(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_exp(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_sin(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_sinh(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_tanh(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_sqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_rsqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_square(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_gref_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_sigmoid_params *params);
+
+int shl_gref_softsign(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_gref_space_to_batch_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_space_to_batch_nd_params *params);
+
+int shl_gref_elu(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_relu_params *params);
+
+int shl_gref_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_relu_params *params);
+
+int shl_gref_relu1(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int shl_gref_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int shl_gref_relun(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int shl_gref_roi_align(struct csinn_tensor *data, struct csinn_tensor *rois,
+                       struct csinn_tensor *output, struct csinn_roi_align_params *params);
+
+int shl_gref_roipool(struct csinn_tensor *data, struct csinn_tensor *rois,
+                     struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int shl_gref_round(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_leaky_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_gref_softrelu(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_gref_prelu(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                   struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_gref_softplus(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_gref_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_softmax_params *params);
+
+int shl_gref_batch_normalization(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                 struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                 struct csinn_tensor *beta, struct csinn_tensor *output,
+                                 struct csinn_bn_params *params);
+
+int shl_gref_l2_normalization(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_l2n_params *params);
+
+int shl_gref_lrn(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_lrn_params *params);
+
+int shl_gref_matmul(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                    struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_gref_add(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_sub(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_mul(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_div(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_floor_divide(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_floor_mod(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_maximum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_minimum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_power(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_greater(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_less(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_log_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_softmax_params *params);
+
+int shl_gref_log(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_log1p(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_not_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_not(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_reduce_logsumexp(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_reduce_params *params);
+
+int shl_gref_reduce_max(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_reduce_params *params);
+
+int shl_gref_reduce_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reduce_params *params);
+
+int shl_gref_reduce_min(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_reduce_params *params);
+
+int shl_gref_reduce_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reduce_params *params);
+
+int shl_gref_reduce_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_reduce_params *params);
+
+int shl_gref_greater_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_less_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_select(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                    struct csinn_tensor *input1, struct csinn_tensor *output,
+                    struct csinn_select_params *params);
+
+int shl_gref_and(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_or(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_pad(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_pad_params *params);
+
+int shl_gref_resize(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_resize_params *params);
+
+int shl_gref_concat(struct csinn_tensor **input, struct csinn_tensor *output,
+                    struct csinn_concat_params *params);
+
+int shl_gref_proposal(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                      struct csinn_tensor *im_info, struct csinn_tensor *output,
+                      struct csinn_proposal_params *params);
+
+int shl_gref_psroipooling(struct csinn_tensor *data, struct csinn_tensor *rois,
+                          struct csinn_tensor *output, struct csinn_psroipooling_params *params);
+
+int shl_gref_transpose(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_transpose_params *params);
+
+int shl_gref_reshape(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reshape_params *params);
+
+int shl_gref_shape(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_shape_params *params);
+
+int shl_gref_strided_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_strided_slice_params *params);
+
+int shl_gref_expand_dims(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_expand_dims_params *params);
+
+int shl_gref_expm1(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_gref_reverse(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reverse_params *params);
+
+int shl_gref_flatten(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_flatten_params *params);
+
+int shl_gref_crop(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_crop_params *params);
+
+int shl_gref_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_slice_params *params);
+
+int shl_gref_split(struct csinn_tensor *input, struct csinn_tensor **output,
+                   struct csinn_split_params *params);
+
+int shl_gref_stack(struct csinn_tensor **input, struct csinn_tensor *output,
+                   struct csinn_stack_params *params);
+
+int shl_gref_tile(struct csinn_tensor *inputs, struct csinn_tensor *output,
+                  struct csinn_tile_params *params);
+
+int shl_gref_arange(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int shl_gref_where(struct csinn_tensor *condition, struct csinn_tensor *x, struct csinn_tensor *y,
+                   struct csinn_tensor *output, struct csinn_where_params *params);
+
+int shl_gref_unstack(struct csinn_tensor *input, struct csinn_tensor **output,
+                     struct csinn_unstack_params *params);
+
+int shl_gref_gather(struct csinn_tensor *input, struct csinn_tensor *indices,
+                    struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int shl_gref_gather_nd(struct csinn_tensor *input, struct csinn_tensor *indices,
+                       struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int shl_gref_hard_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_sigmoid_params *params);
+
+int shl_gref_isnan_bool(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_gref_logical_and(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_logical_not(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_siso_params *params);
+
+int shl_gref_logical_or(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_logical_xor(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_squeeze(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_squeeze_params *params);
+
+int shl_gref_segment_max(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_gref_segment_mean(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_gref_segment_min(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_gref_segment_prod(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_gref_segment_sum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_gref_scatter_nd(struct csinn_tensor *input, struct csinn_tensor *indices,
+                        struct csinn_tensor *updates, struct csinn_tensor *output,
+                        struct csinn_scatter_nd_params *params);
+
+int shl_gref_shuffle_channel(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_shuffle_channel_params *params);
+
+int shl_gref_sign(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_gref_ndarray_size(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_ndarray_size_params *params);
+
+int shl_gref_space_to_batch(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_space_to_batch_params *params);
+
+int shl_gref_batch_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_batch_to_space_params *params);
+
+int shl_gref_batch_to_space_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_batch_to_space_nd_params *params);
+
+int shl_gref_space_to_depth(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_space_to_depth_params *params);
+
+int shl_gref_depth_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_depth_to_space_params *params);
+
+int shl_gref_broadcast_to(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_broadcast_to_params *params);
+
+int shl_gref_one_hot(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_one_hot_params *params);
+
+int shl_gref_sequence_mask(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_sequence_mask_params *params);
+
+int shl_gref_im2col(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_im2col_params *params);
+
+int shl_gref_col2im(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_tensor *kernel, struct csinn_col2im_params *params);
+
+int shl_gref_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_gref_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reduce_params *params);
+
+int shl_gref_max(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_gref_min(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_gref_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reduce_params *params);
+
+int shl_gref_argmin(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int shl_gref_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int shl_gref_all(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_gref_any(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_gref_reorg(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reorg_params *params);
+
+int shl_gref_erf(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_gref_xor(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_gref_yuv_rgb_scale(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int shl_gref_layer_norm(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                        struct csinn_layer_norm_params *params);
+
+int shl_gref_cache_matmul(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *weight, struct csinn_tensor *bias,
+                          struct csinn_cache_matmul_params *params);
+
+int shl_gref_cache_conv1d(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *weight, struct csinn_tensor *bias,
+                          struct csinn_cache_conv1d_params *params);
+
+int shl_gref_data_convert(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_siso_params *params);
+struct shl_ref_graph {
+    struct shl_node **input;
+    struct shl_node **output;
+    int input_num;
+    int output_num;
+    struct shl_node **layer;
+    int layer_size;
+    int layer_index;
+};
+
+struct shl_gref_target_data {
+    struct shl_ref_graph *graph;
+    int is_hybrid_quantization_type;
+};
+
+struct shl_ref_graph *shl_gref_get_graph(struct csinn_session *sess);
+int shl_gref_graph_insert(struct shl_node *node, struct shl_ref_graph *graph);
+void shl_gref_post_dfs(struct shl_ref_graph *graph,
+                       void (*fvisit)(struct shl_ref_graph *, struct shl_node *));
+int shl_gref_is_root_node(struct shl_ref_graph *graph, struct shl_node *node);
+struct shl_node *shl_gref_get_input_subgraph(struct shl_ref_graph *graph, struct shl_node *node,
+                                             int index);
+void shl_gref_reset_graph_visit(struct shl_ref_graph *graph);
+void shl_gref_update_input_output(struct shl_ref_graph *graph, int index);
+int shl_gref_siso_op(struct csinn_tensor *input, struct csinn_tensor *output, int op, void *params);
+int shl_gref_diso_op(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, int op, void *params);
+int shl_gref_sidcso_op(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *const0, struct csinn_tensor *const1, int op,
+                       void *params);
+void shl_gref_set_tensor(struct csinn_tensor *tensor, struct csinn_session *sess);
+void shl_gref_set_const_tensor(struct csinn_tensor *tensor, struct csinn_session *sess);
+int shl_gref_get_tensor(int index, struct csinn_tensor *ret, struct csinn_session *sess);
+void shl_gref_nbg(struct csinn_tensor **input, struct csinn_tensor **output, uint32_t inputs_count,
+                  uint32_t outputs_count, const char *url);
+
+void shl_subgraph_alloc(struct shl_node *node, struct shl_ref_graph *ograph,
+                        struct shl_ref_graph *ggraph);
+int shl_subgraph_setup(struct shl_node *n);
+int shl_subgraph_deinit(struct shl_node *n);
+int shl_subgraph_run_init(struct shl_node *n);
+int shl_subgraph_run(struct shl_node *n);
+int shl_subgraph_run_deinit(struct shl_node *n, struct shl_ref_graph *graph);
+
+struct shl_ref_graph *shl_subgraph_generate(struct shl_ref_graph *ograph);
+struct shl_ref_graph *shl_subgraph_rebuild(struct shl_ref_graph *subgraph);
+struct shl_ref_graph *shl_subgraph_topology_sort(struct shl_ref_graph *graph);
+void shl_subgraph_fvisit_fuse(struct shl_ref_graph *graph, struct shl_node *node);
+void shl_subgraph_fvisit_print(struct shl_ref_graph *graph, struct shl_node *node);
+int shl_subgraph_get_device(struct shl_node *node);
+void *shl_gref_runtime_callback(int api);
+#endif  // INCLUDE_SHL_GREF_H_

+ 144 - 0
lib/install_nn2/include/shl_i805.h

@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_I805_H_
+#define INCLUDE_SHL_I805_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "csi_nn.h"
+#include "shl_ref.h"
+
+int shl_i805_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                            struct csinn_conv2d_params *params);
+
+int shl_i805_conv2d_init_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_i805_depthwise_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+
+int shl_i805_avgpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int shl_i805_maxpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int shl_i805_fullyconnected_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weights, struct csinn_tensor *bias,
+                               struct csinn_fc_params *params);
+
+int shl_i805_fullyconnected_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                struct csinn_fc_params *params);
+
+int shl_i805_softmax_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_softmax_params *params);
+
+int shl_i805_softmax_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_softmax_params *params);
+
+int shl_i805_relu_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int shl_i805_relu_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_i805_sigmoid_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_sigmoid_params *params);
+
+int shl_i805_sigmoid_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_sigmoid_params *params);
+
+int shl_i805_tanh_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_i805_tanh_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+/*********************** u8 asym quant opt func *********************************/
+
+int shl_i805_add_init_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_i805_add_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_i805_clip_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_clip_params *params);
+
+int shl_i805_clip_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_clip_params *params);
+
+int shl_i805_conv2d_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                            struct csinn_conv2d_params *params);
+
+int shl_i805_conv2d_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv2d_params *params);
+
+int shl_i805_depthwise_conv2d_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+
+int shl_i805_depthwise_conv2d_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+int shl_i805_fullyconnected_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                    struct csinn_fc_params *params);
+
+int shl_i805_fullyconnected_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weights, struct csinn_tensor *bias,
+                               struct csinn_fc_params *params);
+
+int shl_i805_maxpool2d_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int shl_i805_mul_init_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_i805_mul_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_i805_relu_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_relu_params *params);
+
+int shl_i805_relu_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int shl_i805_relu6_init_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_relu_params *params);
+
+int shl_i805_relu6_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_i805_reshape_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_reshape_params *params);
+
+#endif  // INCLUDE_SHL_I805_H_

+ 33 - 0
lib/install_nn2/include/shl_memory.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+#ifndef INCLUDE_SHL_MEMORY_H_
+#define INCLUDE_SHL_MEMORY_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+void shl_mem_print_map();
+void *shl_mem_alloc(int64_t size);
+void *shl_mem_alloc_aligned(int64_t size, int aligned_bytes);
+void *shl_mem_calloc(size_t nmemb, size_t size);
+void *shl_mem_realloc(void *ptr, size_t size);
+void shl_mem_free(void *ptr);
+
+#endif  // INCLUDE_SHL_MEMORY_H_

+ 55 - 0
lib/install_nn2/include/shl_node.h

@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_NODE_H_
+#define INCLUDE_SHL_NODE_H_
+
+struct shl_node {
+    int type;
+    struct shl_node **in;
+    struct shl_node **out;
+    int subgraph_idx;
+    int in_num;
+    int out_num;
+    char *name;
+    void *data;
+    int ref_count;
+    int ref_count_init;
+    int visited;
+    int *restricted_map;
+    int restricted_map_num;
+};
+
+/* node */
+struct shl_node *shl_node_alloc(int node_type, char *name, int in_num, int out_num, void *data);
+struct shl_node *shl_node_var_alloc(char *name, void *data);
+struct shl_node *shl_node_const_var_alloc(char *name, void *data);
+int shl_node_free(struct shl_node *node);
+int shl_node_add_in(struct shl_node *node, struct shl_node *in, int index);
+int shl_node_add_out(struct shl_node *node, struct shl_node *out, int index);
+int shl_node_get_in_number(struct shl_node *node);
+int shl_node_get_out_number(struct shl_node *node);
+int shl_node_get_non_const_in_number(struct shl_node *node);
+struct shl_node *shl_node_get_in(struct shl_node *node, int index);
+struct shl_node *shl_node_get_out(struct shl_node *node, int index);
+int shl_node_restrict_map_insert(int value, struct shl_node *node);
+int shl_node_find(struct shl_node **list, int len, struct shl_node *node);
+
+#endif  // INCLUDE_SHL_NODE_H_

+ 389 - 0
lib/install_nn2/include/shl_ovx.h

@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_OVX_H_
+#define INCLUDE_SHL_OVX_H_
+#include "csi_nn.h"
+#include "shl_utils.h"
+
+int shl_ovx_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                   struct csinn_conv2d_params *params);
+
+int shl_ovx_depthwise_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_ovx_group_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_ovx_conv2d_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                        struct csinn_conv2d_params *params);
+
+int shl_ovx_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                     struct csinn_conv2d_params *params);
+
+int shl_ovx_depthwise_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_ovx_fullyconnected(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *weights, struct csinn_tensor *bias,
+                           struct csinn_fc_params *params);
+
+int shl_ovx_fullyconnected_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                struct csinn_fc_params *params);
+
+int shl_ovx_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pool_params *params);
+
+int shl_ovx_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pool_params *params);
+
+int shl_ovx_global_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_pool_params *params);
+
+int shl_ovx_global_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_pool_params *params);
+
+int shl_ovx_l2pool(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_pool_params *params);
+
+int shl_ovx_pool_with_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_pool_params *params);
+
+int shl_ovx_maxpool2d_locat(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_ovx_unpooling(struct csinn_tensor *input, struct csinn_tensor *mask,
+                      struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int shl_ovx_negative(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ovx_floor(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_ovx_ceil(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_ovx_abs(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int shl_ovx_exp(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int shl_ovx_log(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int shl_ovx_sin(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_siso_params *params);
+
+int shl_ovx_tanh(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_ovx_sqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_siso_params *params);
+
+int shl_ovx_rsqrt(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_ovx_square(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_ovx_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_sigmoid_params *params);
+
+int shl_ovx_elu(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_relu_params *params);
+
+int shl_ovx_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_relu_params *params);
+
+int shl_ovx_relu1(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_relu_params *params);
+
+int shl_ovx_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_relu_params *params);
+
+int shl_ovx_relun(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_relu_params *params);
+
+int shl_ovx_leaky_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_ovx_softrelu(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int shl_ovx_prelu(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                  struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_ovx_softplus(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ovx_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_softmax_params *params);
+
+int shl_ovx_log_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_softmax_params *params);
+
+int shl_ovx_batch_normalization(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                struct csinn_tensor *beta, struct csinn_tensor *output,
+                                struct csinn_bn_params *params);
+
+int shl_ovx_l2_normalization(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_l2n_params *params);
+
+int shl_ovx_lrn(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_lrn_params *params);
+
+int shl_ovx_matmul(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                   struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_ovx_add(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_sub(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_mul(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_div(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_floor_divide(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                         struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_maximum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_minimum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_power(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_greater(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_less(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_not_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_greater_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_less_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_select(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                   struct csinn_tensor *input1, struct csinn_tensor *output,
+                   struct csinn_diso_params *params);
+
+int shl_ovx_and(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_or(struct csinn_tensor *input0, struct csinn_tensor *input1,
+               struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ovx_pad(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_pad_params *params);
+
+int shl_ovx_resize(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_resize_params *params);
+
+int shl_ovx_concat(struct csinn_tensor **input, struct csinn_tensor *output,
+                   struct csinn_concat_params *params);
+
+int shl_ovx_proposal(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                     struct csinn_tensor *im_info, struct csinn_tensor *output,
+                     struct csinn_proposal_params *params);
+
+int shl_ovx_psroipooling(struct csinn_tensor *data, struct csinn_tensor *rois,
+                         struct csinn_tensor *output, struct csinn_psroipooling_params *params);
+
+int shl_ovx_roipool(struct csinn_tensor *data, struct csinn_tensor *rois,
+                    struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int shl_ovx_roi_align(struct csinn_tensor *input, struct csinn_tensor *rois,
+                      struct csinn_tensor *output, struct csinn_roi_align_params *params);
+
+int shl_ovx_transpose(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_transpose_params *params);
+
+int shl_ovx_reshape(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reshape_params *params);
+
+int shl_ovx_reshape_tail(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+
+int shl_ovx_shape(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_shape_params *params);
+
+int shl_ovx_expand_dims_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_expand_dims_params *params);
+
+int shl_ovx_expand_dims_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_expand_dims_params *params);
+
+int shl_ovx_reverse(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reverse_params *params);
+
+int shl_ovx_flatten(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_flatten_params *params);
+
+int shl_ovx_flatten_tail(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_flatten_params *params);
+
+int shl_ovx_crop(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_crop_params *params);
+
+int shl_ovx_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_slice_params *params);
+
+int shl_ovx_slice_tail(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_slice_params *params);
+
+int shl_ovx_strided_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_strided_slice_params *params);
+
+int shl_ovx_split(struct csinn_tensor *input, struct csinn_tensor **output,
+                  struct csinn_split_params *params);
+
+int shl_ovx_stack(struct csinn_tensor **inputs, struct csinn_tensor *output,
+                  struct csinn_stack_params *params);
+
+int shl_ovx_tile(struct csinn_tensor *inputs, struct csinn_tensor *output,
+                 struct csinn_tile_params *params);
+
+int shl_ovx_arange(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int shl_ovx_where(struct csinn_tensor *condition, struct csinn_tensor *x, struct csinn_tensor *y,
+                  struct csinn_tensor *output, struct csinn_where_params *params);
+
+int shl_ovx_unstack(struct csinn_tensor *input, struct csinn_tensor **outputs,
+                    struct csinn_unstack_params *params);
+
+int shl_ovx_gather(struct csinn_tensor *input, struct csinn_tensor *indices,
+                   struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int shl_ovx_gather_nd(struct csinn_tensor *input, struct csinn_tensor *indices,
+                      struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int shl_ovx_squeeze(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_squeeze_params *params);
+
+int shl_ovx_squeeze_tail(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_squeeze_params *params);
+
+int shl_ovx_ndarray_size(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_ndarray_size_params *params);
+
+int shl_ovx_space_to_batch(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_space_to_batch_params *params);
+
+int shl_ovx_batch_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_batch_to_space_params *params);
+
+int shl_ovx_space_to_depth(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_space_to_depth_params *params);
+
+int shl_ovx_depth_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_depth_to_space_params *params);
+
+int shl_ovx_one_hot(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_one_hot_params *params);
+
+int shl_ovx_sequence_mask(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_sequence_mask_params *params);
+
+int shl_ovx_im2col(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_im2col_params *params);
+
+int shl_ovx_col2im(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_tensor *kernel, struct csinn_col2im_params *params);
+
+int shl_ovx_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reduce_params *params);
+
+int shl_ovx_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_ovx_max(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reduce_params *params);
+
+int shl_ovx_min(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reduce_params *params);
+
+int shl_ovx_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_ovx_argmin(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int shl_ovx_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_reduce_params *params);
+
+int shl_ovx_all(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reduce_params *params);
+
+int shl_ovx_any(struct csinn_tensor *input, struct csinn_tensor *output,
+                struct csinn_reduce_params *params);
+
+int shl_ovx_reorg(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reorg_params *params);
+
+int shl_ovx_topk(struct csinn_tensor *input, struct csinn_tensor *output0,
+                 struct csinn_tensor *output1, struct csinn_topk_params *params);
+
+int shl_ovx_clip(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_clip_params *params);
+
+int shl_ovx_shuffle_channel(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_shuffle_channel_params *params);
+
+int32_t shl_get_ceil_mode_fix(int32_t input, int32_t kernel, int32_t stride, int32_t pad);
+
+struct shl_ovx_target_data {
+    void *graph;
+};
+
+void *shl_ovx_get_graph(struct csinn_session *sess);
+
+uint8_t *shl_ovx_input_f32_to_u8(uint32_t idx, float *data, struct csinn_session *sess);
+int shl_ovx_get_tensor(int index, struct csinn_tensor *ret, struct csinn_session *sess);
+void shl_ovx_save_output(int index, const char *filename, struct csinn_session *sess);
+void shl_ovx_show_top5(int index, struct csinn_session *sess);
+void shl_ovx_set_graph_attribute(struct csinn_session *sess, int device_index);
+int shl_ovx_get_device_number();
+int shl_ovx_set_tensor(struct csinn_tensor *tensor, struct csinn_session *sess);
+
+#endif  // INCLUDE_SHL_OVX_H_

+ 272 - 0
lib/install_nn2/include/shl_pnna.h

@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_PNNA_H_
+#define INCLUDE_SHL_PNNA_H_
+#include "csi_nn.h"
+#include "shl_utils.h"
+
+int shl_pnna_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                    struct csinn_conv2d_params *params);
+
+int shl_pnna_depthwise_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_pnna_group_conv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                          struct csinn_conv2d_params *params);
+
+int shl_pnna_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                      struct csinn_conv2d_params *params);
+
+int shl_pnna_depthwise_deconv2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+int shl_pnna_fullyconnected(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_tensor *weights, struct csinn_tensor *bias,
+                            struct csinn_fc_params *params);
+
+int shl_pnna_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_pnna_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_pnna_global_avgpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_pnna_global_maxpool2d(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+
+int shl_pnna_negative(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_pnna_tanh(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_pnna_sigmoid(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_sigmoid_params *params);
+
+int shl_pnna_elu(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_relu_params *params);
+
+int shl_pnna_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_relu_params *params);
+
+int shl_pnna_relu1(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int shl_pnna_relu6(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_relu_params *params);
+
+int shl_pnna_leaky_relu(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_pnna_prelu(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                   struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_pnna_softmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_softmax_params *params);
+
+int shl_pnna_batch_normalization(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                 struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                 struct csinn_tensor *beta, struct csinn_tensor *output,
+                                 struct csinn_bn_params *params);
+
+int shl_pnna_l2_normalization(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_l2n_params *params);
+
+int shl_pnna_lrn(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_lrn_params *params);
+
+int shl_pnna_matmul(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                    struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_pnna_add(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_sub(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_mul(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_div(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_maximum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_minimum(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_power(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_greater(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_less(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_not_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_greater_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_less_equal(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_select(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                    struct csinn_tensor *input1, struct csinn_tensor *output,
+                    struct csinn_diso_params *params);
+
+int shl_pnna_and(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                 struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_or(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_pnna_pad(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_pad_params *params);
+
+int shl_pnna_resize(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_resize_params *params);
+
+int shl_pnna_concat(struct csinn_tensor **input, struct csinn_tensor *output,
+                    struct csinn_concat_params *params);
+
+int shl_pnna_transpose(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_transpose_params *params);
+
+int shl_pnna_reshape(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_reshape_params *params);
+
+int shl_pnna_shape(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_shape_params *params);
+
+int shl_pnna_flatten(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_flatten_params *params);
+
+int shl_pnna_crop(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_crop_params *params);
+
+int shl_pnna_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_slice_params *params);
+
+int shl_pnna_split(struct csinn_tensor *input, struct csinn_tensor **output,
+                   struct csinn_split_params *params);
+
+int shl_pnna_squeeze(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_squeeze_params *params);
+
+int shl_pnna_space_to_batch_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_space_to_batch_nd_params *params);
+
+int shl_pnna_batch_to_space_nd(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_batch_to_space_nd_params *params);
+
+int shl_pnna_space_to_depth(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_space_to_depth_params *params);
+
+int shl_pnna_depth_to_space(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_depth_to_space_params *params);
+
+int shl_pnna_sum(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_pnna_mean(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reduce_params *params);
+
+int shl_pnna_max(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_pnna_min(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_pnna_prod(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_reduce_params *params);
+
+int shl_pnna_argmin(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int shl_pnna_argmax(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reduce_params *params);
+
+int shl_pnna_all(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_pnna_any(struct csinn_tensor *input, struct csinn_tensor *output,
+                 struct csinn_reduce_params *params);
+
+int shl_pnna_strided_slice(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_strided_slice_params *params);
+
+int shl_pnna_roipool(struct csinn_tensor *data, struct csinn_tensor *rois,
+                     struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int shl_pnna_proposal(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                      struct csinn_tensor *im_info, struct csinn_tensor *output,
+                      struct csinn_proposal_params *params);
+
+int shl_pnna_unpooling(struct csinn_tensor *input, struct csinn_tensor *mask,
+                       struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int shl_pnna_maxpool2d_locat(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_pool_params *params);
+
+int shl_pnna_data_covert(struct csinn_tensor *input, struct csinn_tensor *output,
+                  struct csinn_siso_params *params);
+
+int shl_pnna_set_input_strides(struct csinn_session *sess, int input_byte_size, int input_fix_h,
+                               int input_fix_w);
+
+struct shl_pnna_tensor_fix {
+    int height;
+    int width;
+};
+
+struct shl_pnna_target_data {
+    void *network;
+    void *net_obj;
+    void *context;
+    void *binding;
+    void *attrs;
+    void *graph;
+    void *nodes;
+    void *in_buffers;
+    void *out_buffers;
+    void *light_hwconfig;
+    void *light_mapconfig;
+    void *to_free;
+    int priority;
+    struct shl_pnna_tensor_fix **input_fix;
+    enum csinn_quant_enum quant_type;
+};
+
+#endif  // INCLUDE_SHL_PNNA_H_

+ 1206 - 0
lib/install_nn2/include/shl_ref.h

@@ -0,0 +1,1206 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_REF_H_
+#define INCLUDE_SHL_REF_H_
+
+#include "csi_nn.h"
+#include "shl_utils.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int shl_ref_abs_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_abs_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_acos_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_acos_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_acosh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_acosh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_add_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_add_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_add_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_add_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_and_u32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_and_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_and_i8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_arange_f32(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int shl_ref_arange_quant(struct csinn_tensor *output, struct csinn_arange_params *params);
+
+int shl_ref_argmax_stride_i32_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_reduce_params *params);
+
+int shl_ref_argmax_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_reduce_params *params);
+
+int shl_ref_argmin_stride_i32_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_reduce_params *params);
+
+int shl_ref_argmin_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_reduce_params *params);
+
+int shl_ref_asin_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_asin_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_asinh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_asinh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_atan_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_atan_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_atanh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_atanh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_avgpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int shl_ref_avgpool2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_ref_avgpool3d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int shl_ref_avgpool3d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_ref_batch_normalization_f32(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                    struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                    struct csinn_tensor *beta, struct csinn_tensor *output,
+                                    struct csinn_bn_params *params);
+
+int shl_ref_batch_normalization_quant(struct csinn_tensor *input, struct csinn_tensor *mean,
+                                      struct csinn_tensor *variance, struct csinn_tensor *gamma,
+                                      struct csinn_tensor *beta, struct csinn_tensor *output,
+                                      struct csinn_bn_params *params);
+
+int shl_ref_batch_to_space_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_batch_to_space_params *params);
+
+int shl_ref_batch_to_space_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_batch_to_space_params *params);
+
+int shl_ref_broadcast_to_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_broadcast_to_params *params);
+
+int shl_ref_broadcast_to_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_broadcast_to_params *params);
+
+int shl_ref_ceil_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_ceil_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_clip_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_clip_params *params);
+
+int shl_ref_clip_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_clip_params *params);
+
+int shl_ref_col2im_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_col2im_params *params);
+
+int shl_ref_concat_f32(struct csinn_tensor **input, struct csinn_tensor *output,
+                       struct csinn_concat_params *params);
+
+int shl_ref_concat_quant(struct csinn_tensor **input, struct csinn_tensor *output,
+                         struct csinn_concat_params *params);
+
+int shl_ref_conv1d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv1d_params *params);
+
+int shl_ref_conv1d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv1d_params *params);
+
+int shl_ref_conv2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_channel_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_relu_f32(struct csinn_tensor *o_input, struct csinn_tensor *o_output,
+                            struct csinn_tensor *o_kernel, struct csinn_tensor *o_bias,
+                            struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_relu_quant(struct csinn_tensor *o_input, struct csinn_tensor *o_output,
+                              struct csinn_tensor *o_kernel, struct csinn_tensor *o_bias,
+                              struct csinn_conv2d_params *params);
+
+int shl_ref_cache_matmul_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *weight, struct csinn_tensor *bias,
+                              struct csinn_cache_matmul_params *params);
+
+int shl_ref_cache_matmul_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *weight, struct csinn_tensor *bias,
+                             struct csinn_cache_matmul_params *params);
+
+int shl_ref_cache_matmul_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_matmul_params *params);
+
+int shl_ref_cache_conv1d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *weight, struct csinn_tensor *bias,
+                              struct csinn_cache_conv1d_params *params);
+
+int shl_ref_cache_conv1d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *weight, struct csinn_tensor *bias,
+                             struct csinn_cache_conv1d_params *params);
+
+int shl_ref_cache_conv1d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weight, struct csinn_tensor *bias,
+                               struct csinn_cache_conv1d_params *params);
+
+int shl_ref_conv2d_channel_relu_quant(struct csinn_tensor *o_input, struct csinn_tensor *o_output,
+                                      struct csinn_tensor *o_kernel, struct csinn_tensor *o_bias,
+                                      struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_relu6_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_ref_conv2d_channel_relu6_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_channel_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_relu_f32(struct csinn_tensor *o_input, struct csinn_tensor *o_output,
+                                      struct csinn_tensor *o_kernel, struct csinn_tensor *o_bias,
+                                      struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_relu_quant(struct csinn_tensor *o_input, struct csinn_tensor *o_output,
+                                        struct csinn_tensor *o_kernel, struct csinn_tensor *o_bias,
+                                        struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_channel_relu_quant(struct csinn_tensor *o_input,
+                                                struct csinn_tensor *o_output,
+                                                struct csinn_tensor *o_kernel,
+                                                struct csinn_tensor *o_bias,
+                                                struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_relu6_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_conv2d_channel_relu6_quant(struct csinn_tensor *input,
+                                                 struct csinn_tensor *output,
+                                                 struct csinn_tensor *kernel,
+                                                 struct csinn_tensor *bias,
+                                                 struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                               struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_channel_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_relu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                    struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_relu6_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                     struct csinn_conv2d_params *params);
+
+int shl_ref_group_conv2d_channel_relu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                            struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                            struct csinn_conv2d_params *params);
+
+int shl_ref_conv3d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                       struct csinn_conv3d_params *params);
+
+int shl_ref_conv3d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv3d_params *params);
+
+int shl_ref_cos_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_cos_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_cosh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_cosh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_cumprod_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_cumprod_params *params);
+
+int shl_ref_cumprod_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_cumprod_params *params);
+
+int shl_ref_cumsum_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_cumsum_params *params);
+
+int shl_ref_cumsum_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_cumsum_params *params);
+
+int shl_ref_data_convert_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_siso_params *params);
+int shl_ref_data_convert_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_siso_params *params);
+
+int shl_ref_deconv2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv2d_params *params);
+
+int shl_ref_deconv2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                           struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_deconv2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_ref_depthwise_deconv2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                     struct csinn_conv2d_params *params);
+
+int shl_ref_deconv3d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                         struct csinn_conv3d_params *params);
+
+int shl_ref_deconv3d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                           struct csinn_conv3d_params *params);
+
+int shl_ref_depth_to_space_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_depth_to_space_params *params);
+
+int shl_ref_depth_to_space_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_depth_to_space_params *params);
+
+int shl_ref_div_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_div_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_elu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_relu_params *params);
+
+int shl_ref_elu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_ref_fsmn_f32(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+                     struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+                     struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+                     struct csinn_fsmn_params *params);
+
+int shl_ref_fsmn_quant(struct csinn_tensor *frame, struct csinn_tensor *l_filter,
+                       struct csinn_tensor *r_filter, struct csinn_tensor *frame_sequence,
+                       struct csinn_tensor *frame_counter, struct csinn_tensor *output,
+                       struct csinn_fsmn_params *params);
+
+int shl_ref_equal_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_equal_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_erf_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_erf_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_exp_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_exp_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_expand_dims_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_expand_dims_params *params);
+
+int shl_ref_expand_dims_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_expand_dims_params *params);
+
+int shl_ref_expm1_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_expm1_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_flatten(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_flatten_params *params);
+
+int shl_ref_flatten_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_flatten_params *params);
+
+int shl_ref_floor_divide_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                             struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_floor_divide_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                               struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_floor_mod_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_floor_mod_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_floor_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_floor_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_fullyconnected_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *weights, struct csinn_tensor *bias,
+                               struct csinn_fc_params *params);
+
+int shl_ref_fullyconnected_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                 struct csinn_fc_params *params);
+
+int shl_ref_gather_nd_f32(struct csinn_tensor *input, struct csinn_tensor *indices,
+                          struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int shl_ref_gather_nd_quant(struct csinn_tensor *input, struct csinn_tensor *indices,
+                            struct csinn_tensor *output, struct csinn_gather_nd_params *params);
+
+int shl_ref_gather_f32(struct csinn_tensor *input, struct csinn_tensor *indices,
+                       struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int shl_ref_gather_quant(struct csinn_tensor *input, struct csinn_tensor *indices,
+                         struct csinn_tensor *output, struct csinn_gather_params *params);
+
+int shl_ref_global_avgpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_ref_global_avgpool2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_ref_global_maxpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_ref_global_maxpool2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_ref_greater_equal_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                              struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_greater_equal_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_greater_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_greater_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_hard_sigmoid_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_sigmoid_params *params);
+
+int shl_ref_hard_sigmoid_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_sigmoid_params *params);
+
+int shl_ref_im2col_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_im2col_params *params);
+
+int shl_ref_im2col_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_im2col_params *params);
+
+int shl_ref_isnan_bool_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int shl_ref_l2_normalization_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_l2n_params *params);
+
+int shl_ref_l2_normalization_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_l2n_params *params);
+
+int shl_ref_l2pool_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_pool_params *params);
+
+int shl_ref_layer_norm_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                           struct csinn_layer_norm_params *params);
+
+int shl_ref_layer_norm_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *gamma, struct csinn_tensor *beta,
+                             struct csinn_layer_norm_params *params);
+
+int shl_ref_leaky_relu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_relu_params *params);
+
+int shl_ref_leaky_relu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_relu_params *params);
+
+int shl_ref_less_equal_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_less_equal_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                             struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_less_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_less_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                       struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_log_softmax_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_softmax_params *params);
+
+int shl_ref_log_softmax_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_softmax_params *params);
+
+int shl_ref_log_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_log_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_log1p_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_log1p_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_logical_and_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_logical_and_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                              struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_logical_not_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_siso_params *params);
+
+int shl_ref_logical_not_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_siso_params *params);
+
+int shl_ref_logical_or_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                           struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_logical_or_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                             struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_logical_xor_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_logical_xor_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                              struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_lrn_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_lrn_params *params);
+
+int shl_ref_lrn_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_lrn_params *params);
+
+int shl_ref_matmul_f32(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                       struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_ref_matmul_quant(struct csinn_tensor *mat0, struct csinn_tensor *mat1,
+                         struct csinn_tensor *output, struct csinn_matmul_params *params);
+
+int shl_ref_max_stride_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_max_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_maximum_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_maximum_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_maxpool2d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int shl_ref_maxpool2d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_ref_maxpool2d_locat_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+
+int shl_ref_maxpool2d_locat_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_ref_maxpool3d_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_pool_params *params);
+
+int shl_ref_maxpool3d_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_pool_params *params);
+
+int shl_ref_mean_stride_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_reduce_params *params);
+
+int shl_ref_mean_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_reduce_params *params);
+
+int shl_ref_mean_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_reduce_params *params);
+
+int shl_ref_min_stride_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_min_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_minimum_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_minimum_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_mod_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_mod_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_mul_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_mul_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_ndarray_size_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_ndarray_size_params *params);
+
+int shl_ref_ndarray_size_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_ndarray_size_params *params);
+
+int shl_ref_ndarray_size_i8(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_ndarray_size_params *params);
+
+int shl_ref_ndarray_size_i32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_ndarray_size_params *params);
+
+int shl_ref_negative_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_siso_params *params);
+
+int shl_ref_negative_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int shl_ref_non_max_suppression_std(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                    struct csinn_tensor *output,
+                                    struct csinn_non_max_suppression_params *params);
+
+int shl_ref_not_equal_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                          struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_not_equal_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                            struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_not_u32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_not_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_ref_not_i8(struct csinn_tensor *input, struct csinn_tensor *output,
+                   struct csinn_siso_params *params);
+
+int shl_ref_or_u32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_or_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_or_i8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                  struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_pad_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_pad_params *params);
+
+int shl_ref_pad_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_pad_params *params);
+
+int shl_ref_power_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_power_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                        struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_prelu_f32(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                      struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_ref_prelu_quant(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                        struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+int shl_ref_prod_stride_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_reduce_params *params);
+
+int shl_ref_prod_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_reduce_params *params);
+
+int shl_ref_proposal_f32(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                         struct csinn_tensor *im_info, struct csinn_tensor *output,
+                         struct csinn_proposal_params *params);
+
+int shl_ref_proposal_quant(struct csinn_tensor *cls_prob, struct csinn_tensor *bbox_pred,
+                           struct csinn_tensor *im_info, struct csinn_tensor *output,
+                           struct csinn_proposal_params *params);
+
+int shl_ref_psroipooling_f32(struct csinn_tensor *data, struct csinn_tensor *rois,
+                             struct csinn_tensor *output, struct csinn_psroipooling_params *params);
+
+int shl_ref_psroipooling_quant(struct csinn_tensor *data, struct csinn_tensor *rois,
+                               struct csinn_tensor *output,
+                               struct csinn_psroipooling_params *params);
+
+int shl_ref_reduce_logsumexp_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_reduce_params *params);
+
+int shl_ref_reduce_logsumexp_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_reduce_params *params);
+
+int shl_ref_reduce_max_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_reduce_max_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_reduce_mean_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_reduce_params *params);
+
+int shl_ref_reduce_mean_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_reduce_params *params);
+
+int shl_ref_reduce_min_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_reduce_min_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_reduce_prod_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_reduce_params *params);
+
+int shl_ref_reduce_prod_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_reduce_params *params);
+
+int shl_ref_reduce_sum_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_reduce_sum_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_relu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_relu_params *params);
+
+int shl_ref_relu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_ref_relu1_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_ref_relu1_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_ref_relu6_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_ref_relu6_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_ref_relun_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_ref_relun_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_relu_params *params);
+
+int shl_ref_reshape(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_reshape_params *params);
+
+int shl_ref_reshape_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reshape_params *params);
+
+int shl_ref_resize_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_resize_params *params);
+
+int shl_ref_resize_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_resize_params *params);
+
+int shl_ref_reverse_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_reverse_params *params);
+
+int shl_ref_reverse_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_reverse_params *params);
+
+int shl_ref_roi_align_f32(struct csinn_tensor *data, struct csinn_tensor *rois,
+                          struct csinn_tensor *output, struct csinn_roi_align_params *params);
+
+int shl_ref_roipool_f32(struct csinn_tensor *data, struct csinn_tensor *rois,
+                        struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int shl_ref_roipool_quant(struct csinn_tensor *data, struct csinn_tensor *rois,
+                          struct csinn_tensor *output, struct csinn_roi_pool_params *params);
+
+int shl_ref_round_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_round_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_rsqrt_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_rsqrt_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_scatter_nd_f32(struct csinn_tensor *input, struct csinn_tensor *indices,
+                           struct csinn_tensor *updates, struct csinn_tensor *output,
+                           struct csinn_scatter_nd_params *params);
+
+int shl_ref_scatter_nd_quant(struct csinn_tensor *input, struct csinn_tensor *indices,
+                             struct csinn_tensor *updates, struct csinn_tensor *output,
+                             struct csinn_scatter_nd_params *params);
+
+int shl_ref_unsorted_segment_max_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                     struct csinn_tensor *output,
+                                     struct csinn_segment_params *params);
+
+int shl_ref_segment_max_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                            struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_max_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                       struct csinn_tensor *output,
+                                       struct csinn_segment_params *params);
+
+int shl_ref_segment_max_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                              struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_mean_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                      struct csinn_tensor *output,
+                                      struct csinn_segment_params *params);
+
+int shl_ref_segment_mean_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                             struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_mean_quant(struct csinn_tensor *input,
+                                        struct csinn_tensor *segment_ids,
+                                        struct csinn_tensor *output,
+                                        struct csinn_segment_params *params);
+
+int shl_ref_segment_mean_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                               struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_min_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                     struct csinn_tensor *output,
+                                     struct csinn_segment_params *params);
+
+int shl_ref_segment_min_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                            struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_min_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                       struct csinn_tensor *output,
+                                       struct csinn_segment_params *params);
+
+int shl_ref_segment_min_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                              struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_prod_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                      struct csinn_tensor *output,
+                                      struct csinn_segment_params *params);
+
+int shl_ref_segment_prod_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                             struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_prod_quant(struct csinn_tensor *input,
+                                        struct csinn_tensor *segment_ids,
+                                        struct csinn_tensor *output,
+                                        struct csinn_segment_params *params);
+
+int shl_ref_segment_prod_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                               struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_sum_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                     struct csinn_tensor *output,
+                                     struct csinn_segment_params *params);
+
+int shl_ref_segment_sum_f32(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                            struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_unsorted_segment_sum_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                                       struct csinn_tensor *output,
+                                       struct csinn_segment_params *params);
+
+int shl_ref_segment_sum_quant(struct csinn_tensor *input, struct csinn_tensor *segment_ids,
+                              struct csinn_tensor *output, struct csinn_segment_params *params);
+
+int shl_ref_select_f32(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                       struct csinn_tensor *input1, struct csinn_tensor *output,
+                       struct csinn_select_params *params);
+
+int shl_ref_select_u8(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                      struct csinn_tensor *input1, struct csinn_tensor *output,
+                      struct csinn_select_params *params);
+
+int shl_ref_select_i8(struct csinn_tensor *condition, struct csinn_tensor *input0,
+                      struct csinn_tensor *input1, struct csinn_tensor *output,
+                      struct csinn_select_params *params);
+
+int shl_ref_shape_i32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_shape_params *params);
+
+int shl_ref_shape_u8(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_shape_params *params);
+
+int shl_ref_shape_i8(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_shape_params *params);
+
+int shl_ref_shuffle_channel_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_shuffle_channel_params *params);
+
+int shl_ref_shuffle_channel_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_shuffle_channel_params *params);
+
+int shl_ref_sigmoid_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_sigmoid_params *params);
+
+int shl_ref_sigmoid_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_sigmoid_params *params);
+
+int shl_ref_sign_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_sign_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_sin_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_sin_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_sinh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_sinh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_slice_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_slice_params *params);
+
+int shl_ref_slice_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_slice_params *params);
+
+int shl_ref_softmax_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_softmax_params *params);
+
+int shl_ref_softmax_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_softmax_params *params);
+
+int shl_ref_softplus_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_siso_params *params);
+
+int shl_ref_softplus_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int shl_ref_softrelu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_relu_params *params);
+
+int shl_ref_softrelu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_relu_params *params);
+
+int shl_ref_softsign_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_siso_params *params);
+
+int shl_ref_softsign_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_siso_params *params);
+
+int shl_ref_space_to_batch_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_space_to_batch_params *params);
+
+int shl_ref_space_to_batch_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_space_to_batch_params *params);
+
+int shl_ref_space_to_depth_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_space_to_depth_params *params);
+
+int shl_ref_space_to_depth_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_space_to_depth_params *params);
+
+int shl_ref_split_f32(struct csinn_tensor *input, struct csinn_tensor **output,
+                      struct csinn_split_params *params);
+
+int shl_ref_split_quant(struct csinn_tensor *input, struct csinn_tensor **output,
+                        struct csinn_split_params *params);
+
+int shl_ref_sqrt_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_sqrt_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_square_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_squeeze(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_squeeze_params *params);
+
+int shl_ref_stack_f32(struct csinn_tensor **input, struct csinn_tensor *output,
+                      struct csinn_stack_params *params);
+
+int shl_ref_stack_quant(struct csinn_tensor **input, struct csinn_tensor *output,
+                        struct csinn_stack_params *params);
+
+int shl_ref_strided_slice_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_strided_slice_params *params);
+
+int shl_ref_strided_slice_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_strided_slice_params *params);
+
+int shl_ref_sub_f32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_sub_quant(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                      struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_sum_stride_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_reduce_params *params);
+
+int shl_ref_sum_stride_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_reduce_params *params);
+
+int shl_ref_tan_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                    struct csinn_siso_params *params);
+
+int shl_ref_tan_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_tanh_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_tanh_f64(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_siso_params *params);
+
+int shl_ref_tanh_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_siso_params *params);
+
+int shl_ref_threshold_relu_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_relu_params *params);
+
+int shl_ref_threshold_relu_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_relu_params *params);
+
+int shl_ref_tile_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                     struct csinn_tile_params *params);
+
+int shl_ref_tile_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_tile_params *params);
+
+int shl_ref_topk_f32(struct csinn_tensor *input, struct csinn_tensor *output1,
+                     struct csinn_tensor *output2, struct csinn_topk_params *params);
+
+int shl_ref_topk_quant(struct csinn_tensor *input, struct csinn_tensor *output1,
+                       struct csinn_tensor *output2, struct csinn_topk_params *params);
+
+int shl_ref_transpose(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_transpose_params *params);
+
+int shl_ref_transpose_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_transpose_params *params);
+
+int shl_ref_trunc_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_siso_params *params);
+
+int shl_ref_trunc_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                        struct csinn_siso_params *params);
+
+int shl_ref_unpooling_f32(struct csinn_tensor *input, struct csinn_tensor *mask,
+                          struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int shl_ref_unpooling_quant(struct csinn_tensor *input, struct csinn_tensor *mask,
+                            struct csinn_tensor *output, struct csinn_unpooling_params *params);
+
+int shl_ref_unstack_f32(struct csinn_tensor *input, struct csinn_tensor **output,
+                        struct csinn_unstack_params *params);
+
+int shl_ref_unstack_qunat(struct csinn_tensor *input, struct csinn_tensor **output,
+                          struct csinn_unstack_params *params);
+
+int shl_ref_xor_u32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                    struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_xor_u8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_xor_i8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                   struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_ref_yuv_rgb_scale_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_siso_params *params);
+
+int shl_ref_yuv_rgb_scale_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_siso_params *params);
+
+int32_t shl_ref_max_internal_s32(int32_t a, int32_t b);
+int32_t shl_ref_min_internal_s32(int32_t a, int32_t b);
+int32_t shl_ref_get_index(int32_t *dim, int32_t index0, int32_t index1, int32_t index2,
+                          int32_t index3);
+int32_t shl_ref_get_index_5(int32_t *dim, int32_t index0, int32_t index1, int32_t index2,
+                            int32_t index3, int32_t index4);
+int32_t shl_ref_get_index_iter(int32_t *dim, int dim_count, int32_t *index);
+float shl_ref_get_scale(int32_t multiplier, int32_t shift);
+float shl_ref_dequantize_u8_to_f32(uint8_t input, struct csinn_quant_info *qinfo);
+float shl_ref_dequantize_i8_to_f32(int8_t input, struct csinn_quant_info *qinfo);
+uint8_t shl_ref_quantize_f32_to_u8(float input, struct csinn_quant_info *qinfo);
+int8_t shl_ref_quantize_f32_to_i8(float input, struct csinn_quant_info *qinfo);
+uint8_t shl_ref_quantize_channel_u8(int32_t data, struct csinn_tensor *input,
+                                    struct csinn_tensor *output, float wscale);
+int8_t shl_ref_quantize_channel_i8(int32_t data, struct csinn_tensor *input,
+                                   struct csinn_tensor *output, float wscale);
+float shl_ref_uint8_to_float(uint8_t i, struct csinn_tensor *t);
+float shl_ref_int8_to_float(int8_t i, struct csinn_tensor *t);
+int16_t shl_ref_float32_to_float16(float value);
+float shl_ref_float16_to_float32(int16_t value);
+int16_t shl_ref_float32_to_bfloat16(float value);
+float shl_ref_bfloat16_to_float32(int16_t value);
+struct csinn_tensor *shl_ref_nchw_to_nhwc_8(struct csinn_tensor *t);
+void shl_ref_nhwc_to_nchw_8(struct csinn_tensor *nt, struct csinn_tensor *t);
+struct csinn_tensor *shl_ref_deconv_kernel_nchw_to_nhwc_f32(struct csinn_tensor *t,
+                                                            int32_t permute[4]);
+struct csinn_tensor *shl_ref_nchw_to_nhwc_f32(struct csinn_tensor *t);
+void shl_ref_nhwc_to_nchw_f32(struct csinn_tensor *nt, struct csinn_tensor *t);
+int32_t shl_ref_get_reduction_index(int32_t k, const int32_t *strides, const int32_t *extents,
+                                    int32_t n);
+struct csinn_tensor *shl_ref_alloc_float_tensor(struct csinn_tensor *src);
+void shl_ref_free_float_tensor(struct csinn_tensor *src);
+struct csinn_tensor *shl_ref_convert_float_tensor(struct csinn_tensor *src);
+void shl_ref_conv_free_float_tensor(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *kernel, struct csinn_tensor *bias);
+struct csinn_tensor *shl_ref_tensor_transform_f32(struct csinn_tensor *input);
+int shl_ref_tensor_transform_free_f32(struct csinn_tensor *input);
+uint8_t *shl_ref_f32_to_input_dtype(uint32_t index, float *data, struct csinn_session *sess);
+
+struct shl_ref_diso_callback {
+    void (*bc)();
+    struct csinn_tensor *input0;
+    struct csinn_tensor *input1;
+    struct csinn_tensor *output;
+    int32_t *input_dim;
+};
+
+int shl_ref_diso_broadcast_base(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                                struct csinn_tensor *output, struct csinn_diso_params *params,
+                                struct shl_ref_diso_callback *cb);
+int shl_ref_broadcast_to_shape(struct csinn_tensor *input, struct csinn_tensor *output,
+                               int32_t *shape, int32_t shape_count);
+int shl_ref_broadcast_to_shape_f32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   int32_t *shape, int32_t shape_count);
+int shl_ref_broadcast_to_shape_quant(struct csinn_tensor *input, struct csinn_tensor *output,
+                                     int32_t *shape, int32_t shape_count);
+
+int shl_ref_siso_callback_base(struct csinn_tensor *input, struct csinn_tensor *output,
+                               void *params, void *cb);
+int shl_ref_diso_callback_base(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                               struct csinn_tensor *output, void *params, void *cb);
+int shl_ref_conv_callback_base(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_tensor *kernel, struct csinn_tensor *bias, void *params,
+                               void *cb);
+
+void shl_ref_nn_init(struct csinn_tensor *input, struct csinn_tensor *output);
+
+void shl_ref_nn_deinit(struct csinn_tensor *input, struct csinn_tensor *output);
+
+int shl_ref_flatten_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+
+int shl_ref_reshape_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+
+int shl_ref_transpose_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                           struct csinn_transpose_params *params);
+
+void asr_buffer_init(struct csinn_asr_buffer_t *buffer, size_t buffer_size, size_t data_lenth);
+
+void *asr_buffer_insert_front(struct csinn_asr_buffer_t *buffer, void *input, size_t len);
+
+void *asr_buffer_insert_back(struct csinn_asr_buffer_t *buffer, void *input, size_t len);
+
+void *asr_buffer_get_buffer(struct csinn_asr_buffer_t *buffer);
+
+void asr_buffer_reset(struct csinn_asr_buffer_t *buffer);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // INCLUDE_SHL_REF_H_

+ 77 - 0
lib/install_nn2/include/shl_ref_i805.h

@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_I805_REF_H_
+#define INCLUDE_SHL_I805_REF_H_
+
+#include "csi_nn.h"
+#include "shl_ref.h"
+
+int shl_i805_ref_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+int shl_i805_ref_conv2d_init_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+int shl_i805_ref_depthwise_conv2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                          struct csinn_conv2d_params *params);
+
+int shl_i805_ref_avgpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_i805_ref_maxpool2d_init_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_pool_params *params);
+
+int shl_i805_ref_fullyconnected_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                   struct csinn_fc_params *params);
+
+int shl_i805_ref_fullyconnected_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                    struct csinn_fc_params *params);
+
+int shl_i805_ref_softmax_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_softmax_params *params);
+
+int shl_i805_ref_softmax_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_softmax_params *params);
+
+int shl_i805_ref_relu_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_relu_params *params);
+
+int shl_i805_ref_relu_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_relu_params *params);
+
+int shl_i805_ref_sigmoid_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_sigmoid_params *params);
+
+int shl_i805_ref_sigmoid_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_sigmoid_params *params);
+
+int shl_i805_ref_tanh_q7(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_siso_params *params);
+
+int shl_i805_ref_tanh_q15(struct csinn_tensor *input, struct csinn_tensor *output,
+                          struct csinn_siso_params *params);
+
+#endif  // INCLUDE_SHL_I805_REF_H_

+ 732 - 0
lib/install_nn2/include/shl_thead_rvv.h

@@ -0,0 +1,732 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_RVV_H_
+#define INCLUDE_SHL_RVV_H_
+
+#if __riscv_vector
+#include <riscv_vector.h>
+
+#if (__riscv_v == 1000000)
+#define RVV_1_0_0
+#elif (__riscv_v == 7000)
+#define RVV_0_7_1
+#endif
+
+#ifdef __riscv_xtheadvdot
+#define XTHEADVDOT
+#define SHL_USE_DOT_INT8  // default: support int8 dot
+// #define SHL_USE_DOT_INT4     // easter eggs
+#endif  // __riscv_xtheadvdot
+
+#endif  // __riscv_vector
+
+#include "csi_nn.h"
+#include "shl_gref.h"
+#include "shl_ref.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/********************************** initialization ******************************/
+int shl_rvv_conv2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_conv2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+int shl_rvv_depthwise_conv2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+int shl_rvv_depthwise_conv2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+int shl_rvv_depthwise_conv2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+int shl_rvv_depthwise_conv2d_init_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                       struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                       struct csinn_conv2d_params *params);
+
+int shl_rvv_avgpool2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_avgpool2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_avgpool2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_global_avgpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_rvv_maxpool2d_init_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_maxpool2d_init_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_maxpool2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_pool_params *params);
+int shl_rvv_global_maxpool2d_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_rvv_fullyconnected_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                struct csinn_fc_params *params);
+
+int shl_rvv_data_convert_init(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_siso_params *params);
+
+/************************************ convolution *********************************/
+/*********************************** im2col + gemm ********************************/
+void shl_rvv_conv_im2col_gemm_reorder_kernel_fp32(struct csinn_tensor *kernel,
+                                                  struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_fp16(struct csinn_tensor *kernel,
+                                                  struct csinn_conv2d_params *params);
+
+int shl_rvv_conv_im2col_gemm_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packn_fp32(struct csinn_tensor *kernel,
+                                                        struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packn_fp16(struct csinn_tensor *kernel,
+                                                        struct csinn_conv2d_params *params);
+
+int shl_rvv_conv_im2col_gemm_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+
+void shl_rvv_conv_im2col_gemm_reorder_kernel_pack1ton_fp32(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_pack1ton_fp16(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+
+int shl_rvv_conv_im2col_gemm_pack1ton_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_pack1ton_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packnto1_fp32(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packnto1_fp16(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+
+int shl_rvv_conv_im2col_gemm_packnto1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_packnto1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+
+/******************************** conv2d1x1s1 + gemm ******************************/
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_fp32(struct csinn_tensor *kernel,
+                                                struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_fp16(struct csinn_tensor *kernel,
+                                                struct csinn_conv2d_params *params);
+
+int shl_rvv_conv1x1s1_gemm_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packn_fp32(struct csinn_tensor *kernel,
+                                                      struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packn_fp16(struct csinn_tensor *kernel,
+                                                      struct csinn_conv2d_params *params);
+
+int shl_rvv_conv1x1s1_gemm_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_pack1ton_fp32(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_pack1ton_fp16(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+
+int shl_rvv_conv1x1s1_gemm_pack1ton_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_pack1ton_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packnto1_fp32(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packnto1_fp16(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+
+int shl_rvv_conv1x1s1_gemm_packnto1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_packnto1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+
+/************************************* winograd ***********************************/
+void shl_rvv_wg_b6f3s1_trans_kernel_packn_fp32(struct csinn_tensor *src_kernel,
+                                               struct csinn_tensor *dst_kernel);
+void shl_rvv_wg_b6f3s1_trans_kernel_packn_fp16(struct csinn_tensor *src_kernel,
+                                               struct csinn_tensor *dst_kernel);
+
+int shl_rvv_wg_b6f3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+int shl_rvv_wg_b6f3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+void shl_rvv_wg_b4f3s1_trans_kernel_packn_fp32(struct csinn_tensor *src_kernel,
+                                               struct csinn_tensor *dst_kernel);
+void shl_rvv_wg_b4f3s1_trans_kernel_packn_fp16(struct csinn_tensor *src_kernel,
+                                               struct csinn_tensor *dst_kernel);
+void shl_rvv_wg_b4f3s1_trans_kernel_packn_int8(struct csinn_tensor *src_kernel,
+                                               struct csinn_tensor *dst_kernel);
+
+int shl_rvv_wg_b4f3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+int shl_rvv_wg_b4f3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+int shl_rvv_wg_b4f3s1_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                 struct csinn_conv2d_params *params);
+
+/******************************* depthwise convolution ****************************/
+int shl_rvv_dwconv3x3s1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s1_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+
+void shl_rvv_dwconv_reorder_kernel_packn_fp32(struct csinn_tensor *kernel,
+                                              struct csinn_conv2d_params *params);
+void shl_rvv_dwconv_reorder_kernel_packn_fp16(struct csinn_tensor *kernel,
+                                              struct csinn_conv2d_params *params);
+void shl_rvv_dwconv_reorder_kernel_packn_int8(struct csinn_tensor *kernel,
+                                              struct csinn_conv2d_params *params);
+
+int shl_rvv_dwconv3x3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s1_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+int shl_rvv_dwconv3x3s2_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                   struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                   struct csinn_conv2d_params *params);
+
+int shl_rvv_dwconv_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+int shl_rvv_dwconv_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+int shl_rvv_dwconv_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                              struct csinn_conv2d_params *params);
+/*************************************** gemm *************************************/
+void shl_rvv_reorder_kernel_n8_fp32(float *a, float *sa, int m, int k, int ldx);
+void shl_rvv_reorder_input_z8_fp32(float *b, float *sb, int k, int n, int ldx);
+void shl_rvv_gemm_8x8_fp32(float *dst, const float *sa, const float *sb, float *bias, int m, int k,
+                           int n, int ldc);
+
+void shl_rvv256_reorder_input_z16_fp32(float *b, float *sb, int k, int n, int ldx);
+void shl_rvv256_gemm_8x16_fp32(float *dst, const float *sa, const float *sb, float *bias, int m,
+                               int k, int n, int ldc);
+
+void shl_rvv_reorder_kernel_n8_fp16(__fp16 *a, __fp16 *sa, int m, int k, int ldx);
+void shl_rvv_reorder_input_z16_fp16(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+void shl_rvv_gemm_8x16_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias, int m,
+                            int k, int n, int ldc);
+
+void shl_rvv256_reorder_kernel_n16_fp16(__fp16 *a, __fp16 *sa, int m, int k, int ldx);
+void shl_rvv256_reorder_input_z16_fp16(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+void shl_rvv256_gemm_16x16_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb, __fp16 *bias,
+                                int m, int k, int n, int ldc);
+
+void shl_rvv_reorder_kernel_n8_int8(int8_t *a, int8_t *sa, int m, int k, int ldx);
+void shl_rvv_reorder_input_z8_int8(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv_gemm_8x8_int32(int32_t *dst, const int8_t *sa, const int8_t *sb, int32_t *bias, int m,
+                            int k, int n, int ldc);
+void shl_rvv_gemm_8x8_int8(int8_t *dst, const int8_t *sa, const int8_t *sb, int32_t *bias, int m,
+                           int k, int n, int ldc, int32_t out_zp, int32_t *mult, int32_t *shift);
+
+void shl_rvv256_reorder_input_z16_int8(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv256_gemm_8x16_int32(int32_t *dst, const int8_t *sa, const int8_t *sb, int32_t *bias,
+                                int m, int k, int n, int ldc);
+
+void shl_rvv_reorder_input_n8_int4(int8_t *a, int8_t *sa, int m, int k, int ldx);
+void shl_rvv_reorder_kernel_n8_int4(int8_t *b, int8_t *sb, int n, int k, int ldx);
+void shl_rvv_gemm_8x8_int4(int8_t *dst, const int8_t *sa, const int8_t *sb, int m, int k, int n,
+                           int ldc, int32_t *bias, int32_t out_zp, int32_t *mult, int32_t *shift);
+
+/************************************ gemm ncxhwx *********************************/
+void shl_rvv_reorder_kernel_packn_fp32(float *a, float *sa, int m, int k, int ldx);
+void shl_rvv_reorder_input_z8_packn_fp32(float *b, float *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_8xpack2n_fp32(float *dst, const float *sa, const float *sb, float *bias,
+                                       int m, int k, int n, int ldc);
+void shl_rvv_reorder_input_z12_packn_fp32(float *b, float *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_12xpack2n_fp32(float *dst, const float *sa, const float *sb, float *bias,
+                                        int m, int k, int n, int ldc);
+
+void shl_rvv_reorder_kernel_packn_fp16(__fp16 *a, __fp16 *sa, int m, int k, int ldx);
+void shl_rvv_reorder_input_z8_packn_fp16(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_8xpack2n_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb,
+                                       __fp16 *bias, int m, int k, int n, int ldc);
+void shl_rvv_reorder_input_z12_packn_fp16(__fp16 *b, __fp16 *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_12xpack2n_fp16(__fp16 *dst, const __fp16 *sa, const __fp16 *sb,
+                                        __fp16 *bias, int m, int k, int n, int ldc);
+
+void shl_rvv_reorder_input_z8_packn_int8(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_8xpackn_int8(int8_t *dst, const int8_t *sa, const int8_t *sb,
+                                      int32_t *bias, int m, int k, int n, int ldc, int32_t out_zp,
+                                      int32_t *mult, int32_t *shift);
+void shl_rvv_reorder_input_z12_packn_int8(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_12xpackn_int8(int8_t *dst, const int8_t *sa, const int8_t *sb,
+                                       int32_t *bias, int m, int k, int n, int ldc, int32_t out_zp,
+                                       int32_t *mult, int32_t *shift);
+
+void shl_rvv_reorder_input_z8_packn_int4(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_8xpackn_int4(int8_t *dst, const int8_t *sa, const int8_t *sb,
+                                      int32_t *bias, int m, int k, int n, int ldc, int32_t out_zp,
+                                      int32_t *mult, int32_t *shift);
+
+void shl_rvv_reorder_input_z12_packn_int4(int8_t *b, int8_t *sb, int k, int n, int ldx);
+void shl_rvv_ncxhwx_gemm_12xpackn_int4(int8_t *dst, const int8_t *sa, const int8_t *sb,
+                                       int32_t *bias, int m, int k, int n, int ldc, int32_t out_zp,
+                                       int32_t *mult, int32_t *shift);
+
+void shl_rvv_reorder_input_z12_pack1ton_fp32(float *b, float *sb, int inc, int maxk, int n,
+                                             int ldx);
+void shl_rvv_reorder_input_z12_pack1ton_fp16(__fp16 *b, __fp16 *sb, int inc, int maxk, int n,
+                                             int ldx);
+void shl_rvv_reorder_input_z12_pack1ton_int8(int8_t *b, int8_t *sb, int inc, int maxk, int n,
+                                             int ldx);
+
+/************************************ pooling *********************************/
+int shl_rvv_avgpool2x2s2_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_avgpool2x2s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_avgpool2x2s2_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_avgpool2x2s2_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s1_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s1_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_rvv_maxpool2x2s2_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_p1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                              struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_p1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_p1_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_p1_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_p1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                 struct csinn_pool_params *params);
+
+int shl_rvv_global_avgpool2d_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+int shl_rvv_global_avgpool2d_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_rvv_global_maxpool2d_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+int shl_rvv_global_maxpool2d_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_pool_params *params);
+
+int shl_rvv_maxpool2x2s2_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool2x2s2_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s2_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_maxpool3x3s1_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+
+int shl_rvv_avgpool2x2s2_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool2x2s2_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool2x2s2_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s1_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s1_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s2_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool3x3s1_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                    struct csinn_pool_params *params);
+int shl_rvv_avgpool_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+int shl_rvv_avgpool_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+int shl_rvv_avgpool_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+int shl_rvv_global_maxpool2d_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+int shl_rvv_global_maxpool2d_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+int shl_rvv_global_maxpool2d_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+int shl_rvv_global_avgpool2d_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+int shl_rvv_global_avgpool2d_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+int shl_rvv_global_avgpool2d_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_pool_params *params);
+
+int shl_rvv_maxpool_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+int shl_rvv_maxpool_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+int shl_rvv_maxpool_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                               struct csinn_pool_params *params);
+
+/************************************ fullyconnected *********************************/
+void shl_rvv_fc_gemv_transform_weight_fp32(struct csinn_tensor *weights);
+void shl_rvv_fc_gemv_transform_weight_fp16(struct csinn_tensor *weights);
+void shl_rvv_fc_gemv_transform_weight_int8(struct csinn_tensor *weights);
+
+int shl_rvv_fullyconnected_packn_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                      struct csinn_fc_params *params);
+int shl_rvv_fullyconnected_packn_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                      struct csinn_fc_params *params);
+int shl_rvv_fullyconnected_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                      struct csinn_fc_params *params);
+
+/************************************ activation *********************************/
+int shl_rvv_relu_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+int shl_rvv_relu_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+int shl_rvv_relu_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                      struct csinn_relu_params *params);
+
+int shl_rvv_relu6_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+int shl_rvv_relu6_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+int shl_rvv_relu6_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                       struct csinn_relu_params *params);
+
+int shl_rvv_leaky_relu_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_relu_params *params);
+int shl_rvv_leaky_relu_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_relu_params *params);
+int shl_rvv_leaky_relu_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_relu_params *params);
+
+int shl_rvv_sigmoid_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_sigmoid_params *params);
+
+int shl_rvv_softmax_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_softmax_params *params);
+
+int shl_rvv_prelu_fp32(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                       struct csinn_tensor *output, struct csinn_prelu_params *params);
+int shl_rvv_prelu_fp16(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                       struct csinn_tensor *output, struct csinn_prelu_params *params);
+int shl_rvv_prelu_int8(struct csinn_tensor *input, struct csinn_tensor *alpha,
+                       struct csinn_tensor *output, struct csinn_prelu_params *params);
+
+/************************************ layout/memory transform *********************************/
+int shl_rvv_concat_fp32(struct csinn_tensor **input, struct csinn_tensor *output,
+                        struct csinn_concat_params *params);
+int shl_rvv_concat_fp16(struct csinn_tensor **input, struct csinn_tensor *output,
+                        struct csinn_concat_params *params);
+int shl_rvv_concat_int8(struct csinn_tensor **input, struct csinn_tensor *output,
+                        struct csinn_concat_params *params);
+
+int shl_rvv_reshape_fp32(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+int shl_rvv_reshape_fp16(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+int shl_rvv_reshape_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                         struct csinn_reshape_params *params);
+
+/************************************ basic math *********************************/
+int shl_rvv_add_fp32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+int shl_rvv_add_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+int shl_rvv_add_int8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_rvv_mul_fp32(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+int shl_rvv_mul_fp16(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+int shl_rvv_mul_int8(struct csinn_tensor *input0, struct csinn_tensor *input1,
+                     struct csinn_tensor *output, struct csinn_diso_params *params);
+
+int shl_rvv_sum_stride_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                            struct csinn_reduce_params *params);
+
+/************************************ utils *********************************/
+void shl_rvv_pad_input_fp32(const float *input, float *input_padded, int inc, int inh, int inw,
+                            int padded_h, int padded_w, int pad_top, int pad_left);
+void shl_rvv_pad_input_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh, int inw,
+                            int padded_h, int padded_w, int pad_top, int pad_left);
+void shl_rvv_pad_input_int8(const int8_t *input, int8_t *input_padded, int inc, int inh, int inw,
+                            int padded_h, int padded_w, int pad_top, int pad_left,
+                            int8_t pad_value);
+
+void shl_rvv_pad_input_packn_fp32(const float *input, float *input_padded, int inc, int inh,
+                                  int inw, int padded_h, int padded_w, int pad_top, int pad_left);
+void shl_rvv_pad_input_packn_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh,
+                                  int inw, int padded_h, int padded_w, int pad_top, int pad_left);
+void shl_rvv_pad_input_packn_int8(const int8_t *input, int8_t *input_padded, int inc, int inh,
+                                  int inw, int padded_h, int padded_w, int pad_top, int pad_left,
+                                  int8_t pad_value);
+
+void shl_rvv_pad_input_pack1ton_fp32(const float *input, float *input_padded, int inc, int inh,
+                                     int inw, int padded_h, int padded_w, int pad_top,
+                                     int pad_left);
+void shl_rvv_pad_input_pack1ton_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh,
+                                     int inw, int padded_h, int padded_w, int pad_top,
+                                     int pad_left);
+void shl_rvv_pad_input_pack1ton_int8(const int8_t *input, int8_t *input_padded, int inc, int inh,
+                                     int inw, int padded_h, int padded_w, int pad_top, int pad_left,
+                                     int8_t pad_value);
+
+void shl_rvv_reorder_input_pack1ton_fp32(const float *src, float *dst, int inc, int inh, int inw);
+void shl_rvv_reorder_input_pack1ton_fp16(const __fp16 *src, __fp16 *dst, int inc, int inh, int inw);
+void shl_rvv_reorder_input_pack1ton_int8(const int8_t *src, int8_t *dst, int inc, int inh, int inw);
+void shl_rvv_reorder_input_packnto1_fp32(const float *src, float *dst, int inc, int inh, int inw);
+void shl_rvv_reorder_input_packnto1_fp16(const __fp16 *src, __fp16 *dst, int inc, int inh, int inw);
+void shl_rvv_reorder_input_packnto1_int8(const int8_t *src, int8_t *dst, int inc, int inh, int inw);
+
+void shl_rvv_saturated_int8(int32_t *src, int8_t *dst, int32_t out_zp, int size);
+
+void shl_rvv_requantize(int32_t *src, int32_t multiplier, int32_t shift, int channel_size);
+
+void shl_rvv_pad_input_int4_trans_int8(const int8_t *input, int8_t *input_padded, int inc, int inh,
+                                       int inw, int padded_h, int padded_w, int pad_top,
+                                       int pad_left, int8_t pad_value);
+void shl_rvv_int4_to_int8(int8_t *src, int8_t *dst, int size);
+void shl_rvv_int8_to_int4(int8_t *src, int8_t *dst, int size);
+void shl_rvv_int4_trans_int8(int8_t *src, int8_t *dst, int size);
+void shl_rvv_saturated_int4(int32_t *src, int8_t *dst, int32_t out_zp, int size);
+
+int shl_rvv_data_convert_int8_to_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_siso_params *params);
+int shl_rvv_data_convert_int4_to_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_siso_params *params);
+
+struct csinn_callback *shl_cb_map_rvv(int op, int dtype);
+void shl_rvv_reg_op(enum csinn_dtype_enum dtype, enum csinn_op_enum op_name, void *init, void *exec,
+                    void *est);
+
+int csrr_vl();
+int csrr_vlenb();
+
+enum avgpool_loc_enum {
+    AVGPOOL_LEFT_TOP = 0,
+    AVGPOOL_RIGHT_TOP,
+    AVGPOOL_LEFT_BOTTOM,
+    AVGPOOL_RIGHT_BOTTOM,
+    AVGPOOL_LEFT,
+    AVGPOOL_RIGHT,
+    AVGPOOL_TOP,
+    AVGPOOL_BOTTOM,
+    AVGPOOL_CENTER,
+};
+
+int shl_rvv_avgpool_get_window_size(struct csinn_pool_params *params, int idx_h_start,
+                                    int idx_h_end, int idx_w_start, int idx_w_end,
+                                    enum avgpool_loc_enum loc);
+
+#ifdef SHL_USE_DOT_INT8
+int shl_rvv_conv2d_init_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_int8(struct csinn_tensor *kernel,
+                                                  struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packn_int8(struct csinn_tensor *kernel,
+                                                        struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_pack1ton_int8(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_pack1ton_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packnto1_int8(struct csinn_tensor *kernel,
+                                                           struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_packnto1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                           struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                           struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_int8(struct csinn_tensor *kernel,
+                                                struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packn_int8(struct csinn_tensor *kernel,
+                                                      struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_packn_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_pack1ton_int8(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_pack1ton_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packnto1_int8(struct csinn_tensor *kernel,
+                                                         struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_packnto1_int8(struct csinn_tensor *input, struct csinn_tensor *output,
+                                         struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                         struct csinn_conv2d_params *params);
+void shl_rvv_fc_gemv_transform_weight_int8_dot(struct csinn_tensor *weights);
+int shl_rvv_fullyconnected_packn_int8_dot(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                          struct csinn_fc_params *params);
+#endif
+
+#ifdef SHL_USE_DOT_INT4
+int shl_rvv_conv2d_init_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                             struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                             struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_int4(struct csinn_tensor *kernel,
+                                                  struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                  struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                  struct csinn_conv2d_params *params);
+void shl_rvv_conv_im2col_gemm_reorder_kernel_packn_int4(struct csinn_tensor *kernel,
+                                                        struct csinn_conv2d_params *params);
+int shl_rvv_conv_im2col_gemm_packn_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                        struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                        struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_int4(struct csinn_tensor *kernel,
+                                                struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                struct csinn_conv2d_params *params);
+void shl_rvv_conv1x1s1_gemm_reorder_kernel_packn_int4(struct csinn_tensor *kernel,
+                                                      struct csinn_conv2d_params *params);
+int shl_rvv_conv1x1s1_gemm_packn_int4(struct csinn_tensor *input, struct csinn_tensor *output,
+                                      struct csinn_tensor *kernel, struct csinn_tensor *bias,
+                                      struct csinn_conv2d_params *params);
+void shl_rvv_fc_gemv_transform_weight_int4_dot(struct csinn_tensor *weights);
+int shl_rvv_fullyconnected_packn_int4_dot(struct csinn_tensor *input, struct csinn_tensor *output,
+                                          struct csinn_tensor *weights, struct csinn_tensor *bias,
+                                          struct csinn_fc_params *params);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // INCLUDE_SHL_RVV_H_

+ 97 - 0
lib/install_nn2/include/shl_utils.h

@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* CSI-NN2 version 2.0.x */
+
+#ifndef INCLUDE_SHL_UTILS_H_
+#define INCLUDE_SHL_UTILS_H_
+
+#include <assert.h>
+#include <float.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#if (!defined SHL_BUILD_RTOS)
+#include <omp.h>
+#endif
+#include "csinn_data_structure.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void shl_get_top5(float *buf, uint32_t size, float *prob, uint32_t *cls);
+void shl_show_top5(struct csinn_tensor *output, struct csinn_session *sess);
+uint64_t shl_get_timespec();
+void shl_print_time_interval(uint64_t start, uint64_t end, const char *msg);
+void shl_statistical_mean_std(float *data, int sz);
+void shl_quantize_multiplier(double double_multiplier, int32_t *quantized_multiplier,
+                             int32_t *shift);
+
+void shl_register_runtime_callback(int api, void *cb);
+void shl_register_op_callback(int api, void *cb);
+int shl_op_callback_map(struct csinn_params_base *base, int op, int dtype);
+
+void *shl_get_p0_cb(struct csinn_params_base *base);
+void *shl_get_init_cb(struct csinn_params_base *base);
+
+enum csinn_rmode_enum shl_get_run_mode(struct csinn_params_base *base);
+
+struct shl_cb_op_list {
+    struct shl_cb_op_list *next;
+    enum csinn_dtype_enum dtype;
+    enum csinn_op_enum op_name;
+    struct csinn_callback *cb;
+};
+
+struct shl_cb_op_list *shl_cb_list_end(struct shl_cb_op_list *list);
+struct csinn_callback *shl_cb_list_match(struct shl_cb_op_list *list, enum csinn_dtype_enum dtype,
+                                         enum csinn_op_enum op_name);
+
+struct shl_bm_sections {
+    int32_t graph_offset;
+    int32_t graph_size;
+    int32_t params_offset;
+    int32_t params_size;
+    int32_t info_offset;
+    int32_t info_size;
+    int32_t debug_offset;
+    int32_t debug_size;
+};
+
+struct shl_binary_model_section_info {
+    int32_t section_num;
+    int32_t section_info_size;
+    int32_t reserve[6];
+    struct shl_bm_sections sections[127];
+};
+
+char *shl_bm_header_str();
+
+void shl_dump_bm_header(FILE *f);
+void shl_dump_bm_section_info(FILE *f, struct shl_binary_model_section_info *info);
+void shl_dump_bm_graph_info_section(FILE *f, struct csinn_session *sess);
+void shl_bm_session_load(struct csinn_session *dest, struct csinn_session *src);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // INCLUDE_SHL_UTILS_H_

BIN
lib/install_nn2/lib/libshl_pnna.so


+ 1 - 0
lib/install_nn2/version

@@ -0,0 +1 @@
+2.0.20

+ 154 - 0
lib/plink/include/process_linker.h

@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2021 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef _PROCESS_LINKER_H_
+#define _PROCESS_LINKER_H_
+
+/* Maximum data descriptors in one packet */
+#define PLINK_MAX_DATA_DESCS 4
+
+/* Close all the connections from client. */
+/* Can be used as the second parameter of PLINK_close when the instance is created as SERVER */
+#define PLINK_CLOSE_ALL -1
+
+/* invalid file descriptor */
+#define PLINK_INVALID_FD -1
+
+#define DATA_HEADER_SIZE (sizeof(PlinkDescHdr))
+#define DATA_SIZE(type) (sizeof(type) - DATA_HEADER_SIZE)
+
+typedef void *PlinkHandle;
+typedef void PlinkDescriptor;
+typedef int PlinkChannelID;
+
+typedef enum _PlinkStatus
+{
+    PLINK_STATUS_OK = 0,
+    PLINK_STATUS_MORE_DATA = 1,     /* have more data to parse in the receive buffer */
+    PLINK_STATUS_TIMEOUT = 2,       /* wait timeout, which means no data received within the time */
+    PLINK_STATUS_ERROR = -1,        /* general error */
+    PLINK_STATUS_WRONG_PARAMS = -2, /* wrong parameters */
+    PLINK_STATUS_NO_MEMORY = -3,    /* not enough memory */
+} PlinkStatus;
+
+/* plink mode */
+typedef enum _PlinkMode
+{
+    PLINK_MODE_SERVER = 0,      /* run plink as server; server should be launched before client */
+    PLINK_MODE_CLIENT,          /* run plink as client which can connect to server */
+    PLINK_MODE_MAX
+} PlinkMode;
+
+typedef struct _PlinkDescHdr
+{
+    unsigned int size;      /* data size, excluding this header */
+    int type;               /* type of this data descriptor */
+    int id;                 /* buffer id if it's buffer descriptor. Only values greater than 0 are valid */
+} PlinkDescHdr;
+
+/* data packet can be sent/received in one send/recv call */
+typedef struct _PlinkPacket
+{
+    int fd;                                         /* file descriptor. If PLINK_INVALID_FD, it's invalid */
+    unsigned int timestamp;                         /* timestamp of this packet, the time for rendering */
+    int num;                                        /* number of valid data descriptor entries in list[] */
+    PlinkDescriptor *list[PLINK_MAX_DATA_DESCS];    /* list of pointers which point to data descriptor. */
+} PlinkPacket;
+
+/**
+ * \brief Create a plink instance.
+ *
+ * Create a plink object with the specified name as server or client.
+ * When mode is PLINK_MODE_SERVER, a file of the specified name will be created.
+ *
+ * \param plink Point to the pointer of plink instance.
+ * \param name Socket file name.
+ * \param mode plink mode, server or client.
+ * \return PLINK_STATUS_OK successful, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_create(PlinkHandle *plink, const char *name, PlinkMode mode);
+
+/**
+ * \brief Create a connection between server and client
+ *
+ * Server calls this function to wait for connection and accept.
+ * Client calls this function to connect to server.
+ *
+ * \param plink Pointer of plink instance.
+ * \param channel id of the new connection. Valid for server only. Should be 0 for client
+ * \return PLINK_STATUS_OK successful, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_connect(PlinkHandle plink, PlinkChannelID *channel);
+
+/**
+ * \brief Send a packet
+ *
+ * Send a packet through the channel.
+ *
+ * \param plink Pointer of plink instance.
+ * \param channel The channel to send this packet. Valid for server only. Should be 0 for client
+ * \param pkt Point to the packet to be sent.
+ * \return PLINK_STATUS_OK successful, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_send(PlinkHandle plink, PlinkChannelID channel, PlinkPacket *pkt);
+
+/**
+ * \brief Wait for data from channel
+ *
+ * This function returns once there is data received from the channel.
+ *
+ * \param plink Pointer of plink instance.
+ * \param channel The channel to receive data. Valid for server only. Should be 0 for client
+ * \param timeout_ms timeout in unit of milliseconds.
+ * \return PLINK_STATUS_OK successful, 
+ * \return PLINK_STATUS_TIMEOUT if no data received within timeout_ms, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_wait(PlinkHandle plink, PlinkChannelID channel, int timeout_ms);
+
+/**
+ * \brief Receive data
+ *
+ * Receive data from the channel.
+ * Data descriptors of the packet are stored in the internal buffer, 
+ * and may be overwritten in the next PLINK_recv call. 
+ *
+ * \param plink Pointer of plink instance.
+ * \param channel The channel to receive data. Valid for server only. Should be 0 for client
+ * \param pkt Point to the received packet.
+ * \return PLINK_STATUS_OK successful, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_recv(PlinkHandle plink, PlinkChannelID channel, PlinkPacket *pkt);
+
+/**
+ * \brief Close connections
+ *
+ * Close connections. Server can set channel to PLINK_CLOSE_ALL to close all connections.
+ *
+ * \param plink Pointer of plink instance.
+ * \param channel The connection to be closed. Valid for server only. Should be 0 for client
+ * \return PLINK_STATUS_OK successful, 
+ * \return other unsuccessful.
+ */
+PlinkStatus PLINK_close(PlinkHandle plink, PlinkChannelID channel);
+
+#endif /* !_PROCESS_LINKER_H_ */

+ 164 - 0
lib/plink/include/process_linker_types.h

@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2021 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef _PROCESS_LINKER_TYPES_H_
+#define _PROCESS_LINKER_TYPES_H_
+
+#include "process_linker.h"
+
+/* When set PlinkMsg.msg to this exit code, it means to close the connection */
+#define PLINK_EXIT_CODE -1
+
+/* image/video color format */
+typedef enum _PlinkColorFormat
+{
+    PLINK_COLOR_FormatUnused,
+    PLINK_COLOR_FormatMonochrome,
+    PLINK_COLOR_FormatYUV420Planar,
+    PLINK_COLOR_FormatYUV420SemiPlanar,
+    PLINK_COLOR_FormatYUV420SemiPlanarP010,
+    PLINK_COLOR_FormatYUV422Planar,
+    PLINK_COLOR_FormatYUV422SemiPlanar,
+    PLINK_COLOR_Format32bitBGRA8888,
+    PLINK_COLOR_Format32bitARGB8888,
+    PLINK_COLOR_Format24BitRGB888,
+    PLINK_COLOR_Format24BitRGB888Planar,
+    PLINK_COLOR_Format24BitBGR888,
+    PLINK_COLOR_Format24BitBGR888Planar,
+    PLINK_COLOR_FormatRawBayer8bit,
+    PLINK_COLOR_FormatRawBayer10bit,
+    PLINK_COLOR_FormatMax
+} PlinkColorFormat;
+
+/* Data descriptor type */
+typedef enum _PlinkDescType
+{
+    PLINK_TYPE_1D_BUFFER = 0,   /* PlinkBufferInfo */
+    PLINK_TYPE_2D_YUV,          /* PlinkYuvInfo */
+    PLINK_TYPE_2D_RGB,          /* PlinkRGBInfo */
+    PLINK_TYPE_OBJECT,          /* PlinkObjectInfo */
+    PLINK_TYPE_MESSAGE,         /* PlinkMsg */
+    PLINK_TYPE_TIME,            /* PlinkTimeInfo */
+    PLINK_TYPE_MAX
+} PlinkDescType;
+
+/* time type */
+typedef enum _PlinkTimeType
+{
+    PLINK_TIME_START = 0,       /* start time */
+    PLINK_TIME_CALIBRATION,     /* time delta for calibration */
+    PLINK_TIME_MAX
+} PlinkTimeType;
+
+/* 1D buffer */
+typedef struct _PlinkBufferInfo
+{
+    PlinkDescHdr header;
+    unsigned long long bus_address;
+    unsigned int offset;
+    unsigned int size;
+} PlinkBufferInfo;
+
+/* 2D YUV surface */
+typedef struct _PlinkYuvInfo
+{
+    PlinkDescHdr header;
+    PlinkColorFormat format;
+    unsigned long long bus_address_y;
+    unsigned long long bus_address_u;
+    unsigned long long bus_address_v;
+    unsigned int offset_y;
+    unsigned int offset_u;
+    unsigned int offset_v;
+    unsigned int pic_width;
+    unsigned int pic_height;
+    unsigned int stride_y;
+    unsigned int stride_u;
+    unsigned int stride_v;
+} PlinkYuvInfo;
+
+/* 2D RGB surface */
+typedef struct _PlinkRGBInfo
+{
+    PlinkDescHdr header;
+    PlinkColorFormat format;
+    unsigned long long bus_address_r;
+    unsigned long long bus_address_g;
+    unsigned long long bus_address_b;
+    unsigned long long bus_address_a;
+    unsigned int offset_r;
+    unsigned int offset_g;
+    unsigned int offset_b;
+    unsigned int offset_a;
+    unsigned int img_width;
+    unsigned int img_height;
+    unsigned int stride_r;
+    unsigned int stride_g;
+    unsigned int stride_b;
+    unsigned int stride_a;
+} PlinkRGBInfo;
+
+/* Feature map buffer after NPU inference */
+typedef struct _PlinkBox
+{
+    float x1;
+    float y1;
+    float x2;
+    float y2;
+} PlinkBox;
+
+typedef struct _PlinkLandmark
+{
+    float x[5];
+    float y[5];
+} PlinkLandmark;
+
+typedef struct _PlinkObjectDetect
+{
+    float score;
+    PlinkBox box;
+    PlinkLandmark landmark;
+} PlinkObjectDetect;
+
+typedef struct _PlinkObjectInfo
+{
+    PlinkDescHdr header;
+    unsigned long long bus_address;
+    unsigned int object_cnt;
+} PlinkObjectInfo;
+
+/* Used to send message */
+typedef struct _PlinkMsg
+{
+    PlinkDescHdr header;
+    int msg;                /* When greater than 0, it means the id of buffer which can be released */
+                            /* When set to 0, it means a buffer can be released, but id is unknown */
+                            /* When set to PLINK_EXIT_CODE, it means to close connection */
+                            /* Other values are reserved */
+} PlinkMsg;
+
+/* time information */
+typedef struct _PlinkTimeInfo
+{
+    PlinkDescHdr header;
+    PlinkTimeType type;
+    long long seconds;
+    long long useconds;
+} PlinkTimeInfo;
+
+#endif /* !_PROCESS_LINKER_TYPES_H_ */

BIN
lib/plink/lib/libplink.so


+ 56 - 0
lib/vmem/include/video_mem.h

@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2021-2022 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#ifndef _VIDEO_MEM_H_
+#define _VIDEO_MEM_H_
+
+/* No special needs. */
+#define VMEM_FLAG_NONE                  0x00000000
+/* Physical contiguous. */
+#define VMEM_FLAG_CONTIGUOUS            0x00000001
+/* Physical non contiguous. */
+#define VMEM_FLAG_NON_CONTIGUOUS        0x00000002
+/* Need 32bit address. */
+#define VMEM_FLAG_4GB_ADDR              0x00000004
+
+typedef enum _VmemStatus
+{
+    VMEM_STATUS_OK = 0,
+    VMEM_STATUS_ERROR = -1,     /* general error */
+    VMEM_STATUS_NO_MEMORY = -2, /* not enough memory to allocate buffer */
+} VmemStatus;
+
+typedef struct _VmemParams
+{
+    int size;
+    int flags;
+    unsigned int phy_address;
+    void *vir_address;
+    int fd;
+} VmemParams;
+
+VmemStatus VMEM_create(void **vmem);
+VmemStatus VMEM_allocate(void *vmem, VmemParams *params);
+VmemStatus VMEM_mmap(void *vmem, VmemParams *params);
+VmemStatus VMEM_free(void *vmem, VmemParams *params);
+VmemStatus VMEM_destroy(void *vmem);
+
+VmemStatus VMEM_export(void *vmem, VmemParams *params);
+VmemStatus VMEM_import(void *vmem, VmemParams *params);
+VmemStatus VMEM_release(void *vmem, VmemParams *params);
+
+#endif /* !_VIDEO_MEM_H_ */

BIN
lib/vmem/lib/libvmem.so


BIN
model/install/face_detect/shl.hhb.bm


BIN
model/install/face_detect/shl.hhb.fd.bm


+ 2046 - 0
test/.debug

@@ -0,0 +1,2046 @@
+# filename:lineno [module]function flags format
+init/main.c:855 [main]initcall_blacklisted =p "initcall %s blacklisted\012"
+init/main.c:816 [main]initcall_blacklist =p "blacklisting initcall %s\012"
+init/initramfs.c:477 [initramfs]unpack_to_rootfs =_ "Detected %s compressed data\012"
+arch/riscv/kernel/module.c:309 [module]apply_relocate_add =_ "Applying relocate section %u to %u\012"
+arch/riscv/kernel/machine_kexec.c:38 [machine_kexec]_kexec_image_info =_ "%s:%d:\012"
+arch/riscv/kernel/machine_kexec.c:39 [machine_kexec]_kexec_image_info =_ "  kexec kimage info:\012"
+arch/riscv/kernel/machine_kexec.c:40 [machine_kexec]_kexec_image_info =_ "    type:        %d\012"
+arch/riscv/kernel/machine_kexec.c:41 [machine_kexec]_kexec_image_info =_ "    start:       0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:42 [machine_kexec]_kexec_image_info =_ "    head:        0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:43 [machine_kexec]_kexec_image_info =_ "    nr_segments: %lu\012"
+arch/riscv/kernel/machine_kexec.c:51 [machine_kexec]_kexec_image_info =_ "      segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\012"
+arch/riscv/kernel/machine_kexec.c:88 [machine_kexec]machine_kexec =_ "%s:%d: control_code_page:        0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:90 [machine_kexec]machine_kexec =_ "%s:%d: reboot_code_buffer_phys:  0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:92 [machine_kexec]machine_kexec =_ "%s:%d: reboot_code_buffer:       0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:94 [machine_kexec]machine_kexec =_ "%s:%d: relocate_new_kernel:      0x%lx\012"
+arch/riscv/kernel/machine_kexec.c:97 [machine_kexec]machine_kexec =_ "%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\012"
+arch/riscv/mm/init.c:46 [init]reserve_crashkernel =_ "crash_base: 0x%llx\012"
+kernel/params.c:177 [params]parse_args =_ "doing %s, parsing ARGS: '%s'\012"
+kernel/params.c:140 [params]parse_one =_ "handling %s with %p\012"
+kernel/params.c:152 [params]parse_one =_ "doing %s: %s='%s'\012"
+kernel/params.c:156 [params]parse_one =_ "Unknown argument '%s'\012"
+kernel/async.c:120 [async]async_run_entry_fn =_ "calling  %lli_%pS @ %i\012"
+kernel/async.c:130 [async]async_run_entry_fn =_ "initcall %lli_%pS returned 0 after %lld usecs\012"
+kernel/async.c:293 [async]async_synchronize_cookie_domain =_ "async_waiting @ %i\012"
+kernel/async.c:305 [async]async_synchronize_cookie_domain =_ "async_continuing @ %i after %lli usec\012"
+kernel/irq/manage.c:768 [manage]__irq_set_trigger =_ "No set_type function for IRQ %d (%s)\012"
+kernel/irq/irqdomain.c:457 [irqdomain]irq_set_default_host =_ "Default domain set to @0x%p\012"
+kernel/irq/irqdomain.c:260 [irqdomain]irq_domain_remove =_ "Removed domain %s\012"
+kernel/irq/irqdomain.c:591 [irqdomain]irq_domain_associate_many =_ "%s(%s, irqbase=%i, hwbase=%i, count=%i)\012"
+kernel/irq/irqdomain.c:620 [irqdomain]irq_create_direct_mapping =_ "create_direct virq allocation failed\012"
+kernel/irq/irqdomain.c:629 [irqdomain]irq_create_direct_mapping =_ "create_direct obtained virq %d\012"
+kernel/irq/irqdomain.c:230 [irqdomain]__irq_domain_add =_ "Added domain %s\012"
+kernel/irq/irqdomain.c:656 [irqdomain]irq_create_mapping =_ "irq_create_mapping(0x%p, 0x%lx)\012"
+kernel/irq/irqdomain.c:665 [irqdomain]irq_create_mapping =_ "-> using domain @%p\012"
+kernel/irq/irqdomain.c:672 [irqdomain]irq_create_mapping =_ "-> existing mapping on virq %d\012"
+kernel/irq/irqdomain.c:679 [irqdomain]irq_create_mapping =_ "-> virq allocation failed\012"
+kernel/irq/irqdomain.c:689 [irqdomain]irq_create_mapping =_ "irq %lu on domain %s mapped to virtual irq %u\012"
+kernel/time/sched_clock.c:237 [sched_clock]sched_clock_register =_ "Registered %pS as sched_clock source\012"
+kernel/module.c:898 [module]module_unload_free =_ "%s unusing %s\012"
+kernel/module.c:605 [module]find_symbol =_ "Failed to find symbol %s\012"
+kernel/module.c:837 [module]already_uses =_ "%s uses %s!\012"
+kernel/module.c:841 [module]already_uses =_ "%s does not use %s!\012"
+kernel/module.c:856 [module]add_module_usage =_ "Allocating new usage for %s.\012"
+kernel/module.c:1004 [module]__do_sys_delete_module =_ "%s already dying\012"
+kernel/module.c:2437 [module]layout_sections =_ "Core section allocation order:\012"
+kernel/module.c:2449 [module]layout_sections =_ "\011%s\012"
+kernel/module.c:2470 [module]layout_sections =_ "Init section allocation order:\012"
+kernel/module.c:2483 [module]layout_sections =_ "\011%s\012"
+kernel/module.c:2702 [module]layout_symtab =_ "\011%s\012"
+kernel/module.c:2729 [module]layout_symtab =_ "\011%s\012"
+kernel/module.c:3290 [module]move_module =_ "final section addresses:\012"
+kernel/module.c:3309 [module]move_module =_ "\0110x%lx %s\012"
+kernel/module.c:2316 [module]simplify_symbols =_ "Common symbol: %s\012"
+kernel/module.c:2325 [module]simplify_symbols =_ "Absolute symbol: 0x%08lx\012"
+kernel/module.c:3990 [module]__do_sys_init_module =_ "init_module: umod=%p, len=%lu, uargs=%p\012"
+kernel/module.c:4010 [module]__do_sys_finit_module =_ "finit_module: fd=%d, uargs=%p, flags=%i\012"
+kernel/cgroup/cgroup.c:5636 [cgroup]cgroup_init_subsys =_ "Initializing cgroup subsys %s\012"
+mm/util.c:449 [util]__account_locked_vm =_ "%s: [%d] caller %ps %c%lu %lu/%lu%s\012"
+mm/page_alloc.c:6845 [page_alloc]alloc_node_mem_map =_ "%s: node %d, pgdat %08lx, node_mem_map %08lx\012"
+mm/swapfile.c:1050 [swapfile]get_swap_pages =_ "scan_swap_map of si %d failed to find offset\012"
+fs/notify/fsnotify.c:229 [fsnotify]send_to_group =_ "%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x data=%p data_is=%d cookie=%d\012"
+fs/notify/notification.c:92 [notification]fsnotify_add_event =_ "%s: group=%p event=%p\012"
+fs/notify/notification.c:153 [notification]fsnotify_remove_first_event =_ "%s: group=%p\012"
+fs/notify/inotify/inotify_fsnotify.c:88 [inotify_fsnotify]inotify_handle_event =_ "%s: group=%p inode=%p mask=%x\012"
+fs/notify/inotify/inotify_user.c:268 [inotify_user]inotify_release =_ "%s: group=%p\012"
+fs/notify/inotify/inotify_user.c:143 [inotify_user]get_one_event =_ "%s: group=%p event=%p\012"
+fs/notify/inotify/inotify_user.c:230 [inotify_user]inotify_read =_ "%s: group=%p kevent=%p\012"
+fs/notify/inotify/inotify_user.c:172 [inotify_user]copy_event_to_user =_ "%s: group=%p event=%p\012"
+fs/notify/inotify/inotify_user.c:288 [inotify_user]inotify_ioctl =_ "%s: group=%p cmd=%u\012"
+fs/aio.c:1464 [aio]aio_prep_rw =_ "aio ioprio check cap error: %d\012"
+fs/aio.c:1119 [aio]aio_complete =_ "%p[%u]: %p: %p %Lx %Lx %Lx\012"
+fs/aio.c:1139 [aio]aio_complete =_ "added to ring %p at [%u]\012"
+fs/aio.c:313 [aio]aio_free_ring =_ "pid(%d) [%d] page->count=%d\012"
+fs/aio.c:586 [aio]free_ioctx =_ "freeing %p\012"
+fs/aio.c:1202 [aio]aio_read_events_ring =_ "h%u t%u m%u\012"
+fs/aio.c:1246 [aio]aio_read_events_ring =_ "%li  h%u t%u\012"
+fs/aio.c:1860 [aio]io_submit_one =_ "EINVAL: reserve field set\012"
+fs/aio.c:1870 [aio]io_submit_one =_ "EINVAL: overflow check\012"
+fs/aio.c:1818 [aio]__io_submit_one =_ "EFAULT: aio_key\012"
+fs/aio.c:1843 [aio]__io_submit_one =_ "invalid aio operation %d\012"
+fs/aio.c:1325 [aio]__do_sys_io_setup =_ "EINVAL: ctx %lu nr_events %u\012"
+fs/aio.c:727 [aio]ioctx_alloc =_ "ENOMEM: nr_events too high\012"
+fs/aio.c:506 [aio]aio_setup_ring =_ "pid(%d) page[%d]->count=%d\012"
+fs/aio.c:520 [aio]aio_setup_ring =_ "attempting mmap of %lu bytes\012"
+fs/aio.c:538 [aio]aio_setup_ring =_ "mmap address: 0x%08lx\012"
+fs/aio.c:791 [aio]ioctx_alloc =_ "allocated ioctx %p[%ld]: mm=%p mask=0x%x\012"
+fs/aio.c:807 [aio]ioctx_alloc =_ "error allocating ioctx %d\012"
+fs/aio.c:1407 [aio]__do_sys_io_destroy =_ "EINVAL: invalid context id\012"
+fs/aio.c:1920 [aio]__do_sys_io_submit =_ "EINVAL: invalid context id\012"
+fs/kernfs/mount.c:239 [mount]kernfs_fill_super =_ "kernfs: could not get root inode\012"
+fs/kernfs/mount.c:246 [mount]kernfs_fill_super =_ "%s: could not get root dentry!\012"
+fs/kernfs/dir.c:1292 [dir]__kernfs_remove =_ "kernfs %s: removing\012"
+fs/ubifs/shrinker.c:295 [ubifs]ubifs_shrink_scan =_ "UBIFS DBG tnc (pid %d): no clean znodes, kick a thread\012"
+fs/ubifs/shrinker.c:303 [ubifs]ubifs_shrink_scan =_ "UBIFS DBG tnc (pid %d): not enough old znodes, try to free young ones\012"
+fs/ubifs/shrinker.c:308 [ubifs]ubifs_shrink_scan =_ "UBIFS DBG tnc (pid %d): not enough young znodes, free all\012"
+fs/ubifs/shrinker.c:312 [ubifs]ubifs_shrink_scan =_ "UBIFS DBG tnc (pid %d): freed nothing, but contention\012"
+fs/ubifs/shrinker.c:317 [ubifs]ubifs_shrink_scan =_ "UBIFS DBG tnc (pid %d): %lu znodes were freed, requested %lu\012"
+fs/ubifs/journal.c:139 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): no free space in jhead %s, run GC\012"
+fs/ubifs/journal.c:155 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): GC couldn't make a free LEB for jhead %s\012"
+fs/ubifs/journal.c:157 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): retry (%d)\012"
+fs/ubifs/journal.c:161 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): return -ENOSPC\012"
+fs/ubifs/journal.c:166 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): got LEB %d for jhead %s\012"
+fs/ubifs/journal.c:176 [ubifs]reserve_space =_ "UBIFS DBG jnl (pid %d): return LEB %d back, already have LEB %d:%d\012"
+fs/ubifs/journal.c:331 [ubifs]make_reservation =_ "UBIFS DBG jnl (pid %d): no space, retry\012"
+fs/ubifs/journal.c:363 [ubifs]make_reservation =_ "UBIFS DBG jnl (pid %d): -EAGAIN, commit and retry (retried %d times)\012"
+fs/ubifs/journal.c:279 [ubifs]write_head =_ "UBIFS DBG jnl (pid %d): jhead %s, LEB %d:%d, len %d\012"
+fs/ubifs/journal.c:734 [ubifs]ubifs_jnl_write_data =_ "UBIFS DBG jnl (pid %d): ino %lu, blk %u, len %d, key %s\012"
+fs/ubifs/journal.c:850 [ubifs]ubifs_jnl_write_inode =_ "UBIFS DBG jnl (pid %d): ino %lu, nlink %u\012"
+fs/ubifs/journal.c:1518 [ubifs]ubifs_jnl_truncate =_ "UBIFS DBG jnl (pid %d): ino %lu, size %lld -> %lld\012"
+fs/ubifs/journal.c:1545 [ubifs]ubifs_jnl_truncate =_ "UBIFS DBG jnl (pid %d): last block key %s\012"
+fs/ubifs/journal.c:1797 [ubifs]ubifs_jnl_change_xattr =_ "UBIFS DBG jnl (pid %d): ino %lu, ino %lu\012"
+fs/ubifs/file.c:1316 [ubifs]ubifs_fsync =_ "UBIFS DBG gen (pid %d): syncing inode %lu\012"
+fs/ubifs/file.c:110 [ubifs]do_readpage =_ "UBIFS DBG gen (pid %d): ino %lu, pg %lu, i_size %lld, flags %#lx\012"
+fs/ubifs/file.c:163 [ubifs]do_readpage =_ "UBIFS DBG gen (pid %d): hole\012"
+fs/ubifs/file.c:1521 [ubifs]ubifs_vm_page_mkwrite =_ "UBIFS DBG gen (pid %d): ino %lu, pg %lu, i_size %lld\012"
+fs/ubifs/file.c:1016 [ubifs]ubifs_writepage =_ "UBIFS DBG gen (pid %d): ino %lu, pg %lu, pg flags %#lx\012"
+fs/ubifs/file.c:617 [ubifs]populate_page =_ "UBIFS DBG gen (pid %d): ino %lu, pg %lu, i_size %lld, flags %#lx\012"
+fs/ubifs/file.c:689 [ubifs]populate_page =_ "UBIFS DBG gen (pid %d): hole\012"
+fs/ubifs/file.c:547 [ubifs]ubifs_write_end =_ "UBIFS DBG gen (pid %d): ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld\012"
+fs/ubifs/file.c:560 [ubifs]ubifs_write_end =_ "UBIFS DBG gen (pid %d): copied %d instead of %d, read page and repeat\012"
+fs/ubifs/file.c:229 [ubifs]write_begin_slow =_ "UBIFS DBG gen (pid %d): ino %lu, pos %llu, len %u, i_size %lld\012"
+fs/ubifs/file.c:1267 [ubifs]ubifs_setattr =_ "UBIFS DBG gen (pid %d): ino %lu, mode %#x, ia_valid %#x\012"
+fs/ubifs/file.c:1117 [ubifs]do_truncation =_ "UBIFS DBG gen (pid %d): ino %lu, size %lld -> %lld\012"
+fs/ubifs/file.c:1228 [ubifs]do_setattr =_ "UBIFS DBG gen (pid %d): size %lld -> %lld\012"
+fs/ubifs/dir.c:781 [ubifs]ubifs_unlink =_ "UBIFS DBG gen (pid %d): dent '%pd' from ino %lu (nlink %d) in dir ino %lu\012"
+fs/ubifs/dir.c:706 [ubifs]ubifs_link =_ "UBIFS DBG gen (pid %d): dent '%pd' to ino %lu (nlink %d) in dir ino %lu\012"
+fs/ubifs/dir.c:506 [ubifs]ubifs_readdir =_ "UBIFS DBG gen (pid %d): dir ino %lu, f_pos %#llx\012"
+fs/ubifs/dir.c:587 [ubifs]ubifs_readdir =_ "UBIFS DBG gen (pid %d): ino %llu, new f_pos %#x\012"
+fs/ubifs/dir.c:209 [ubifs]ubifs_lookup =_ "UBIFS DBG gen (pid %d): '%pd' in dir ino %lu\012"
+fs/ubifs/dir.c:242 [ubifs]ubifs_lookup =_ "UBIFS DBG gen (pid %d): not found\012"
+fs/ubifs/dir.c:367 [ubifs]do_tmpfile =_ "UBIFS DBG gen (pid %d): dent '%pd', mode %#hx in dir ino %lu\012"
+fs/ubifs/dir.c:1033 [ubifs]ubifs_mknod =_ "UBIFS DBG gen (pid %d): dent '%pd' in dir ino %lu\012"
+fs/ubifs/dir.c:958 [ubifs]ubifs_mkdir =_ "UBIFS DBG gen (pid %d): dent '%pd', mode %#hx in dir ino %lu\012"
+fs/ubifs/dir.c:1118 [ubifs]ubifs_symlink =_ "UBIFS DBG gen (pid %d): dent '%pd', target '%s' in dir ino %lu\012"
+fs/ubifs/dir.c:298 [ubifs]ubifs_create =_ "UBIFS DBG gen (pid %d): dent '%pd', mode %#hx in dir ino %lu\012"
+fs/ubifs/dir.c:1279 [ubifs]do_rename =_ "UBIFS DBG gen (pid %d): dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x\012"
+fs/ubifs/dir.c:884 [ubifs]ubifs_rmdir =_ "UBIFS DBG gen (pid %d): directory '%pd', ino %lu in dir ino %lu\012"
+fs/ubifs/super.c:399 [ubifs]ubifs_statfs =_ "UBIFS DBG gen (pid %d): free space %lld bytes (%lld blocks)\012"
+fs/ubifs/super.c:345 [ubifs]ubifs_evict_inode =_ "UBIFS DBG gen (pid %d): inode %lu, mode %#x\012"
+fs/ubifs/super.c:305 [ubifs]ubifs_write_inode =_ "UBIFS DBG gen (pid %d): inode %lu, mode %#x, nlink %u\012"
+fs/ubifs/super.c:387 [ubifs]ubifs_dirty_inode =_ "UBIFS DBG gen (pid %d): inode %lu\012"
+fs/ubifs/super.c:1619 [ubifs]ubifs_umount =_ "UBIFS DBG gen (pid %d): un-mounting UBI device %d, volume %d\012"
+fs/ubifs/super.c:1960 [ubifs]ubifs_remount_fs =_ "UBIFS DBG gen (pid %d): old flags %#lx, new flags %#x\012"
+fs/ubifs/super.c:1785 [ubifs]ubifs_remount_rw =_ "UBIFS DBG gen (pid %d): re-mounted read-write\012"
+fs/ubifs/super.c:1991 [ubifs]ubifs_remount_fs =_ "UBIFS DBG gen (pid %d): disable bulk-read\012"
+fs/ubifs/super.c:98 [ubifs]ubifs_iget =_ "UBIFS DBG gen (pid %d): inode %lu\012"
+fs/ubifs/super.c:2231 [ubifs]ubifs_mount =_ "UBIFS DBG gen (pid %d): name %s, flags %#x\012"
+fs/ubifs/super.c:2252 [ubifs]ubifs_mount =_ "UBIFS DBG gen (pid %d): opened ubi%d_%d\012"
+fs/ubifs/super.c:2265 [ubifs]ubifs_mount =_ "UBIFS DBG gen (pid %d): this ubi volume is already mounted\012"
+fs/ubifs/super.c:1527 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): default compressor:  %s\012"
+fs/ubifs/super.c:1529 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): data journal heads:  %d\012"
+fs/ubifs/super.c:1531 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): log LEBs:            %d (%d - %d)\012"
+fs/ubifs/super.c:1533 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): LPT area LEBs:       %d (%d - %d)\012"
+fs/ubifs/super.c:1535 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): orphan area LEBs:    %d (%d - %d)\012"
+fs/ubifs/super.c:1537 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): main area LEBs:      %d (%d - %d)\012"
+fs/ubifs/super.c:1538 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): index LEBs:          %d\012"
+fs/ubifs/super.c:1541 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): total index bytes:   %lld (%lld KiB, %lld MiB)\012"
+fs/ubifs/super.c:1542 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): key hash type:       %d\012"
+fs/ubifs/super.c:1543 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): tree fanout:         %d\012"
+fs/ubifs/super.c:1544 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): reserved GC LEB:     %d\012"
+fs/ubifs/super.c:1545 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. znode size      %d\012"
+fs/ubifs/super.c:1546 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. index node size %d\012"
+fs/ubifs/super.c:1548 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): node sizes:          data %zu, inode %zu, dentry %zu\012"
+fs/ubifs/super.c:1550 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): node sizes:          trun %zu, sb %zu, master %zu\012"
+fs/ubifs/super.c:1552 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): node sizes:          ref %zu, cmt. start %zu, orph %zu\012"
+fs/ubifs/super.c:1555 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. node sizes:     data %zu, inode %zu dentry %zu, idx %d\012"
+fs/ubifs/super.c:1556 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): dead watermark:      %d\012"
+fs/ubifs/super.c:1557 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): dark watermark:      %d\012"
+fs/ubifs/super.c:1558 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): LEB overhead:        %d\012"
+fs/ubifs/super.c:1561 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. dark space:     %lld (%lld KiB, %lld MiB)\012"
+fs/ubifs/super.c:1564 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): maximum bud bytes:   %lld (%lld KiB, %lld MiB)\012"
+fs/ubifs/super.c:1567 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): BG commit bud bytes: %lld (%lld KiB, %lld MiB)\012"
+fs/ubifs/super.c:1569 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): current bud bytes    %lld (%lld KiB, %lld MiB)\012"
+fs/ubifs/super.c:1570 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. seq. number:    %llu\012"
+fs/ubifs/super.c:1571 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): commit number:       %llu\012"
+fs/ubifs/super.c:1572 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max. xattrs per inode: %d\012"
+fs/ubifs/super.c:1573 [ubifs]mount_ubifs =_ "UBIFS DBG gen (pid %d): max orphans:           %d\012"
+fs/ubifs/sb.c:803 [ubifs]fixup_leb =_ "UBIFS DBG mnt (pid %d): unmap empty LEB %d\012"
+fs/ubifs/sb.c:807 [ubifs]fixup_leb =_ "UBIFS DBG mnt (pid %d): fixup LEB %d, data len %d\012"
+fs/ubifs/sb.c:157 [ubifs]create_default_filesystem =_ "UBIFS DBG gen (pid %d): LEB Properties Tree created (LEBs %d-%d)\012"
+fs/ubifs/sb.c:220 [ubifs]create_default_filesystem =_ "UBIFS DBG gen (pid %d): default superblock created at LEB 0:0\012"
+fs/ubifs/sb.c:269 [ubifs]create_default_filesystem =_ "UBIFS DBG gen (pid %d): default master node created at LEB %d:0\012"
+fs/ubifs/sb.c:285 [ubifs]create_default_filesystem =_ "UBIFS DBG gen (pid %d): default root indexing node created LEB %d:0\012"
+fs/ubifs/sb.c:311 [ubifs]create_default_filesystem =_ "UBIFS DBG gen (pid %d): root inode created at LEB %d:0\012"
+fs/ubifs/sb.c:765 [ubifs]ubifs_read_superblock =_ "UBIFS DBG mnt (pid %d): Auto resizing from %d LEBs to %d LEBs\012"
+fs/ubifs/io.c:478 [ubifs]wbuf_timer_callback_nolock =_ "UBIFS DBG io (pid %d): jhead %s\012"
+fs/ubifs/io.c:547 [ubifs]ubifs_wbuf_sync_nolock =_ "UBIFS DBG io (pid %d): LEB %d:%d, %d bytes, jhead %s\012"
+fs/ubifs/io.c:615 [ubifs]ubifs_wbuf_seek_nolock =_ "UBIFS DBG io (pid %d): LEB %d:%d, jhead %s\012"
+fs/ubifs/io.c:660 [ubifs]ubifs_bg_wbufs_sync =_ "UBIFS DBG io (pid %d): synchronize\012"
+fs/ubifs/io.c:725 [ubifs]ubifs_wbuf_write_nolock =_ "UBIFS DBG io (pid %d): %d bytes (%s) to jhead %s wbuf at LEB %d:%d\012"
+fs/ubifs/io.c:758 [ubifs]ubifs_wbuf_write_nolock =_ "UBIFS DBG io (pid %d): flush jhead %s wbuf to LEB %d:%d\012"
+fs/ubifs/io.c:793 [ubifs]ubifs_wbuf_write_nolock =_ "UBIFS DBG io (pid %d): flush jhead %s wbuf to LEB %d:%d\012"
+fs/ubifs/io.c:813 [ubifs]ubifs_wbuf_write_nolock =_ "UBIFS DBG io (pid %d): write %d bytes to LEB %d:%d\012"
+fs/ubifs/io.c:835 [ubifs]ubifs_wbuf_write_nolock =_ "UBIFS DBG io (pid %d): write %d bytes to LEB %d:%d\012"
+fs/ubifs/io.c:506 [ubifs]new_wbuf_timer_nolock =_ "UBIFS DBG io (pid %d): set timer for jhead %s, %llu-%llu millisecs\012"
+fs/ubifs/io.c:909 [ubifs]ubifs_write_node_hmac =_ "UBIFS DBG io (pid %d): LEB %d:%d, %s, length %d (aligned %d)\012"
+fs/ubifs/io.c:1047 [ubifs]ubifs_read_node =_ "UBIFS DBG io (pid %d): LEB %d:%d, %s, length %d\012"
+fs/ubifs/io.c:972 [ubifs]ubifs_read_node_wbuf =_ "UBIFS DBG io (pid %d): LEB %d:%d, %s, length %d, jhead %s\012"
+fs/ubifs/tnc.c:1726 [ubifs]validate_data_node =_ "UBIFS DBG tnc (pid %d): looked for key %s\012"
+fs/ubifs/tnc.c:1727 [ubifs]validate_data_node =_ "UBIFS DBG tnc (pid %d): found node's key %s\012"
+fs/ubifs/tnc.c:1313 [ubifs]lookup_level0_dirty =_ "UBIFS DBG tnc (pid %d): search and dirty key %s\012"
+fs/ubifs/tnc.c:1359 [ubifs]lookup_level0_dirty =_ "UBIFS DBG tnc (pid %d): found %d, lvl %d, n %d\012"
+fs/ubifs/tnc.c:1370 [ubifs]lookup_level0_dirty =_ "UBIFS DBG tnc (pid %d): found 0, lvl %d, n -1\012"
+fs/ubifs/tnc.c:1377 [ubifs]lookup_level0_dirty =_ "UBIFS DBG tnc (pid %d): found 0, lvl %d, n -1\012"
+fs/ubifs/tnc.c:1387 [ubifs]lookup_level0_dirty =_ "UBIFS DBG tnc (pid %d): found 1, lvl %d, n %d\012"
+fs/ubifs/tnc.c:502 [ubifs]fallible_read_node =_ "UBIFS DBG tnc (pid %d): LEB %d:%d, key %s\012"
+fs/ubifs/tnc.c:451 [ubifs]try_read_node =_ "UBIFS DBG io (pid %d): LEB %d:%d, %s, length %d\012"
+fs/ubifs/tnc.c:516 [ubifs]fallible_read_node =_ "UBIFS DBG mnt (pid %d): dangling branch LEB %d:%d len %d, key %s\012"
+fs/ubifs/tnc.c:994 [ubifs]fallible_resolve_collision =_ "UBIFS DBG mnt (pid %d): dangling match LEB %d:%d len %d key %s\012"
+fs/ubifs/tnc.c:2095 [ubifs]tnc_insert =_ "UBIFS DBG tnc (pid %d): inserted at %d level %d, key %s\012"
+fs/ubifs/tnc.c:2110 [ubifs]tnc_insert =_ "UBIFS DBG tnc (pid %d): splitting level %d, key %s\012"
+fs/ubifs/tnc.c:2190 [ubifs]tnc_insert =_ "UBIFS DBG tnc (pid %d): moving %d, keeping %d\012"
+fs/ubifs/tnc.c:2204 [ubifs]tnc_insert =_ "UBIFS DBG tnc (pid %d): inserting at %d level %d, key %s\012"
+fs/ubifs/tnc.c:2228 [ubifs]tnc_insert =_ "UBIFS DBG tnc (pid %d): creating new zroot at level %d\012"
+fs/ubifs/tnc.c:2512 [ubifs]tnc_delete =_ "UBIFS DBG tnc (pid %d): deleting key %s\012"
+fs/ubifs/tnc.c:1177 [ubifs]ubifs_lookup_level0 =_ "UBIFS DBG tnc (pid %d): search key %s\012"
+fs/ubifs/tnc.c:1215 [ubifs]ubifs_lookup_level0 =_ "UBIFS DBG tnc (pid %d): found %d, lvl %d, n %d\012"
+fs/ubifs/tnc.c:1264 [ubifs]ubifs_lookup_level0 =_ "UBIFS DBG tnc (pid %d): found 0, lvl %d, n -1\012"
+fs/ubifs/tnc.c:1276 [ubifs]ubifs_lookup_level0 =_ "UBIFS DBG tnc (pid %d): found 1, lvl %d, n %d\012"
+fs/ubifs/tnc.c:1271 [ubifs]ubifs_lookup_level0 =_ "UBIFS DBG tnc (pid %d): found 0, lvl %d, n -1\012"
+fs/ubifs/tnc.c:1653 [ubifs]read_wbuf =_ "UBIFS DBG io (pid %d): LEB %d:%d, length %d\012"
+fs/ubifs/tnc.c:1780 [ubifs]ubifs_tnc_bulk_read =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc.c:1815 [ubifs]do_lookup_nm =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc.c:1829 [ubifs]do_lookup_nm =_ "UBIFS DBG tnc (pid %d): rc returned %d, znode %p, n %d\012"
+fs/ubifs/tnc.c:2281 [ubifs]ubifs_tnc_add =_ "UBIFS DBG tnc (pid %d): %d:%d, len %d, key %s\012"
+fs/ubifs/tnc.c:2333 [ubifs]ubifs_tnc_replace =_ "UBIFS DBG tnc (pid %d): old LEB %d:%d, new LEB %d:%d, len %d, key %s\012"
+fs/ubifs/tnc.c:2357 [ubifs]ubifs_tnc_replace =_ "UBIFS DBG tnc (pid %d): rc returned %d, znode %p, n %d, LEB %d:%d\012"
+fs/ubifs/tnc.c:2622 [ubifs]ubifs_tnc_remove =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc.c:2653 [ubifs]ubifs_tnc_remove_nm =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc.c:2664 [ubifs]ubifs_tnc_remove_nm =_ "UBIFS DBG tnc (pid %d): rc returned %d, znode %p, n %d\012"
+fs/ubifs/tnc.c:2417 [ubifs]ubifs_tnc_add_nm =_ "UBIFS DBG tnc (pid %d): LEB %d:%d, key %s\012"
+fs/ubifs/tnc.c:2430 [ubifs]ubifs_tnc_add_nm =_ "UBIFS DBG tnc (pid %d): rc returned %d, znode %p, n %d\012"
+fs/ubifs/tnc.c:2836 [ubifs]ubifs_tnc_remove_range =_ "UBIFS DBG tnc (pid %d): removing key %s\012"
+fs/ubifs/tnc.c:2958 [ubifs]ubifs_tnc_next_ent =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc.c:2975 [ubifs]ubifs_tnc_next_ent =_ "UBIFS DBG tnc (pid %d): rc returned %d, znode %p, n %d\012"
+fs/ubifs/tnc.c:2872 [ubifs]ubifs_tnc_remove_ino =_ "UBIFS DBG tnc (pid %d): ino %lu\012"
+fs/ubifs/tnc.c:2893 [ubifs]ubifs_tnc_remove_ino =_ "UBIFS DBG tnc (pid %d): xent '%s', ino %lu\012"
+fs/ubifs/master.c:403 [ubifs]ubifs_read_master =_ "UBIFS DBG mnt (pid %d): Auto resizing (master) from %d LEBs to %d LEBs\012"
+fs/ubifs/scan.c:66 [ubifs]ubifs_scan_a_node =_ "UBIFS DBG scan (pid %d): hit empty space at LEB %d:%d\012"
+fs/ubifs/scan.c:33 [ubifs]scan_padding_bytes =_ "UBIFS DBG scan (pid %d): not a node\012"
+fs/ubifs/scan.c:41 [ubifs]scan_padding_bytes =_ "UBIFS DBG scan (pid %d): %d padding bytes\012"
+fs/ubifs/scan.c:77 [ubifs]ubifs_scan_a_node =_ "UBIFS DBG scan (pid %d): scanning %s at LEB %d:%d\012"
+fs/ubifs/scan.c:107 [ubifs]ubifs_scan_a_node =_ "UBIFS DBG scan (pid %d): %d bytes padded at LEB %d:%d, offset now %d\012"
+fs/ubifs/scan.c:131 [ubifs]ubifs_start_scan =_ "UBIFS DBG scan (pid %d): scan LEB %d:%d\012"
+fs/ubifs/scan.c:166 [ubifs]ubifs_end_scan =_ "UBIFS DBG scan (pid %d): stop scanning LEB %d at offset %d\012"
+fs/ubifs/scan.c:270 [ubifs]ubifs_scan =_ "UBIFS DBG scan (pid %d): look at LEB %d:%d (%d bytes left)\012"
+fs/ubifs/replay.c:389 [ubifs]insert_node =_ "UBIFS DBG mnt (pid %d): add LEB %d:%d, key %s\012"
+fs/ubifs/replay.c:1210 [ubifs]ubifs_replay_journal =_ "UBIFS DBG mnt (pid %d): start replaying the journal\012"
+fs/ubifs/replay.c:1005 [ubifs]replay_log_leb =_ "UBIFS DBG mnt (pid %d): replay log LEB %d:%d\012"
+fs/ubifs/replay.c:1049 [ubifs]replay_log_leb =_ "UBIFS DBG mnt (pid %d): commit start sqnum %llu\012"
+fs/ubifs/replay.c:910 [ubifs]add_replay_bud =_ "UBIFS DBG mnt (pid %d): add replay bud LEB %d:%d, head %d\012"
+fs/ubifs/replay.c:689 [ubifs]replay_bud =_ "UBIFS DBG mnt (pid %d): replay bud LEB %d, head %d, offs %d, is_last %d\012"
+fs/ubifs/replay.c:439 [ubifs]insert_dent =_ "UBIFS DBG mnt (pid %d): add LEB %d:%d, key %s\012"
+fs/ubifs/replay.c:839 [ubifs]replay_bud =_ "UBIFS DBG mnt (pid %d): bud LEB %d replied: dirty %d, free %d\012"
+fs/ubifs/replay.c:247 [ubifs]apply_replay_entry =_ "UBIFS DBG mnt (pid %d): LEB %d:%d len %d deletion %d sqnum %llu key %s\012"
+fs/ubifs/replay.c:123 [ubifs]set_bud_lprops =_ "UBIFS DBG mnt (pid %d): bud LEB %d was GC'd (%d free, %d dirty)\012"
+fs/ubifs/replay.c:125 [ubifs]set_bud_lprops =_ "UBIFS DBG gc (pid %d): bud LEB %d was GC'd (%d free, %d dirty)\012"
+fs/ubifs/replay.c:137 [ubifs]set_bud_lprops =_ "UBIFS DBG mnt (pid %d): LEB %d lp: %d free %d dirty replay: %d free %d dirty\012"
+fs/ubifs/replay.c:1261 [ubifs]ubifs_replay_journal =_ "UBIFS DBG mnt (pid %d): finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu\012"
+fs/ubifs/log.c:147 [ubifs]ubifs_add_bud =_ "UBIFS DBG log (pid %d): LEB %d:%d, jhead %s, bud_bytes %lld\012"
+fs/ubifs/log.c:189 [ubifs]ubifs_add_bud_to_log =_ "UBIFS DBG log (pid %d): not enough log space - %lld, required %d\012"
+fs/ubifs/log.c:206 [ubifs]ubifs_add_bud_to_log =_ "UBIFS DBG log (pid %d): bud bytes %lld (%lld max), require commit\012"
+fs/ubifs/log.c:220 [ubifs]ubifs_add_bud_to_log =_ "UBIFS DBG log (pid %d): bud bytes %lld (%lld max), initiate BG commit\012"
+fs/ubifs/log.c:261 [ubifs]ubifs_add_bud_to_log =_ "UBIFS DBG log (pid %d): write ref LEB %d:%d\012"
+fs/ubifs/log.c:401 [ubifs]ubifs_log_start_commit =_ "UBIFS DBG log (pid %d): add ref to LEB %d:%d for jhead %s\012"
+fs/ubifs/log.c:433 [ubifs]ubifs_log_start_commit =_ "UBIFS DBG log (pid %d): writing commit start at LEB %d:0, len %d\012"
+fs/ubifs/log.c:322 [ubifs]remove_buds =_ "UBIFS DBG log (pid %d): preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld\012"
+fs/ubifs/log.c:328 [ubifs]remove_buds =_ "UBIFS DBG log (pid %d): remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld\012"
+fs/ubifs/log.c:478 [ubifs]ubifs_log_end_commit =_ "UBIFS DBG log (pid %d): old tail was LEB %d:0, new tail is LEB %d:0\012"
+fs/ubifs/log.c:533 [ubifs]ubifs_log_post_commit =_ "UBIFS DBG log (pid %d): unmap log LEB %d\012"
+fs/ubifs/log.c:651 [ubifs]ubifs_consolidate_log =_ "UBIFS DBG rcvry (pid %d): log tail LEB %d, log head LEB %d\012"
+fs/ubifs/log.c:721 [ubifs]ubifs_consolidate_log =_ "UBIFS DBG rcvry (pid %d): new log head at %d:%d\012"
+fs/ubifs/commit.c:377 [ubifs]wait_for_commit =_ "UBIFS DBG cmt (pid %d): pid %d goes sleep\012"
+fs/ubifs/commit.c:388 [ubifs]wait_for_commit =_ "UBIFS DBG cmt (pid %d): commit finished, pid %d woke up\012"
+fs/ubifs/commit.c:333 [ubifs]ubifs_commit_required =_ "UBIFS DBG cmt (pid %d): old: %s, new: %s\012"
+fs/ubifs/commit.c:338 [ubifs]ubifs_commit_required =_ "UBIFS DBG cmt (pid %d): old: %s, new: %s\012"
+fs/ubifs/commit.c:361 [ubifs]ubifs_request_bg_commit =_ "UBIFS DBG cmt (pid %d): old: %s, new: %s\012"
+fs/ubifs/commit.c:473 [ubifs]ubifs_gc_should_commit =_ "UBIFS DBG cmt (pid %d): commit required now\012"
+fs/ubifs/commit.c:476 [ubifs]ubifs_gc_should_commit =_ "UBIFS DBG cmt (pid %d): commit not requested\012"
+fs/ubifs/commit.c:103 [ubifs]do_commit =_ "UBIFS DBG cmt (pid %d): start\012"
+fs/ubifs/commit.c:209 [ubifs]do_commit =_ "UBIFS DBG cmt (pid %d): commit end\012"
+fs/ubifs/gc.c:77 [ubifs]switch_gc_head =_ "UBIFS DBG gc (pid %d): switch GC head from LEB %d:%d to LEB %d (waste %d bytes)\012"
+fs/ubifs/gc.c:508 [ubifs]ubifs_garbage_collect_leb =_ "UBIFS DBG gc (pid %d): LEB %d is free, return it\012"
+fs/ubifs/gc.c:552 [ubifs]ubifs_garbage_collect_leb =_ "UBIFS DBG gc (pid %d): indexing LEB %d (free %d, dirty %d)\012"
+fs/ubifs/gc.c:588 [ubifs]ubifs_garbage_collect_leb =_ "UBIFS DBG gc (pid %d): data LEB %d (free %d, dirty %d)\012"
+fs/ubifs/gc.c:711 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): soft limit, some index LEBs GC'ed, -EAGAIN\012"
+fs/ubifs/gc.c:722 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): hard limit, -ENOSPC\012"
+fs/ubifs/gc.c:737 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): no more dirty LEBs\012"
+fs/ubifs/gc.c:743 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): found LEB %d: free %d, dirty %d, sum %d (min. space %d)\012"
+fs/ubifs/gc.c:768 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): LEB %d freed, return\012"
+fs/ubifs/gc.c:780 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): indexing LEB %d freed, continue\012"
+fs/ubifs/gc.c:787 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): LEB %d retained, freed %d bytes\012"
+fs/ubifs/gc.c:797 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): did not make progress\012"
+fs/ubifs/gc.c:816 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): try again\012"
+fs/ubifs/gc.c:823 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): set min. space to %d\012"
+fs/ubifs/gc.c:827 [ubifs]ubifs_garbage_collect =_ "UBIFS DBG gc (pid %d): no space, some index LEBs GC'ed, -EAGAIN\012"
+fs/ubifs/gc.c:949 [ubifs]ubifs_gc_end_commit =_ "UBIFS DBG gc (pid %d): LEB %d\012"
+fs/ubifs/orphan.c:132 [ubifs]orphan_delete =_ "UBIFS DBG gen (pid %d): deleted twice ino %lu\012"
+fs/ubifs/orphan.c:140 [ubifs]orphan_delete =_ "UBIFS DBG gen (pid %d): delete later ino %lu\012"
+fs/ubifs/orphan.c:92 [ubifs]orphan_add =_ "UBIFS DBG gen (pid %d): ino %lu\012"
+fs/ubifs/orphan.c:248 [ubifs]ubifs_orphan_start_commit =_ "UBIFS DBG cmt (pid %d): %d orphans to commit\012"
+fs/ubifs/orphan.c:438 [ubifs]consolidate =_ "UBIFS DBG cmt (pid %d): there is space for %d orphans and there are %d\012"
+fs/ubifs/orphan.c:517 [ubifs]erase_deleted =_ "UBIFS DBG gen (pid %d): deleting orphan ino %lu\012"
+fs/ubifs/orphan.c:1035 [ubifs]dbg_check_orphans =_ "UBIFS DBG cmt (pid %d): last inode number is %lu\012"
+fs/ubifs/orphan.c:1036 [ubifs]dbg_check_orphans =_ "UBIFS DBG cmt (pid %d): total number of inodes is %lu\012"
+fs/ubifs/orphan.c:1037 [ubifs]dbg_check_orphans =_ "UBIFS DBG cmt (pid %d): total number of leaf nodes is %llu\012"
+fs/ubifs/orphan.c:754 [ubifs]kill_orphans =_ "UBIFS DBG rcvry (pid %d): no orphans\012"
+fs/ubifs/orphan.c:771 [ubifs]kill_orphans =_ "UBIFS DBG rcvry (pid %d): LEB %d\012"
+fs/ubifs/orphan.c:674 [ubifs]do_kill_orphans =_ "UBIFS DBG rcvry (pid %d): out of date LEB %d\012"
+fs/ubifs/orphan.c:700 [ubifs]do_kill_orphans =_ "UBIFS DBG rcvry (pid %d): deleting orphaned inode %lu\012"
+fs/ubifs/orphan.c:607 [ubifs]insert_dead_orphan =_ "UBIFS DBG mnt (pid %d): ino %lu, new %d, tot %d\012"
+fs/ubifs/orphan.c:718 [ubifs]do_kill_orphans =_ "UBIFS DBG rcvry (pid %d): last orph node for commit %llu at %d:%d\012"
+fs/ubifs/budget.c:454 [ubifs]ubifs_budget_space =_ "UBIFS DBG budg (pid %d): no space\012"
+fs/ubifs/budget.c:334 [ubifs]do_budget_space =_ "UBIFS DBG budg (pid %d): out of indexing space: min_idx_lebs %d (old %d), rsvd_idx_lebs %d\012"
+fs/ubifs/budget.c:343 [ubifs]do_budget_space =_ "UBIFS DBG budg (pid %d): out of data space: available %lld, outstanding %lld\012"
+fs/ubifs/budget.c:479 [ubifs]ubifs_budget_space =_ "UBIFS DBG budg (pid %d): no space for fast budgeting\012"
+fs/ubifs/budget.c:131 [ubifs]make_free_space =_ "UBIFS DBG budg (pid %d): liability %lld, run write-back\012"
+fs/ubifs/budget.c:138 [ubifs]make_free_space =_ "UBIFS DBG budg (pid %d): new liability %lld (not shrunk)\012"
+fs/ubifs/budget.c:141 [ubifs]make_free_space =_ "UBIFS DBG budg (pid %d): Run GC\012"
+fs/ubifs/budget.c:78 [ubifs]run_gc =_ "UBIFS DBG budg (pid %d): GC freed LEB %d\012"
+fs/ubifs/budget.c:150 [ubifs]make_free_space =_ "UBIFS DBG budg (pid %d): Run commit (retries %d)\012"
+fs/ubifs/budget.c:486 [ubifs]ubifs_budget_space =_ "UBIFS DBG budg (pid %d): try again\012"
+fs/ubifs/budget.c:491 [ubifs]ubifs_budget_space =_ "UBIFS DBG budg (pid %d): -ENOSPC, but anyway try once again\012"
+fs/ubifs/budget.c:494 [ubifs]ubifs_budget_space =_ "UBIFS DBG budg (pid %d): FS is full, -ENOSPC\012"
+fs/ubifs/find.c:311 [ubifs]ubifs_find_dirty_leb =_ "UBIFS DBG find (pid %d): scanning LPT for a dirty LEB\012"
+fs/ubifs/find.c:322 [ubifs]ubifs_find_dirty_leb =_ "UBIFS DBG find (pid %d): found LEB %d, free %d, dirty %d, flags %#x\012"
+fs/ubifs/find.c:487 [ubifs]ubifs_find_free_space =_ "UBIFS DBG find (pid %d): min_space %d\012"
+fs/ubifs/find.c:564 [ubifs]ubifs_find_free_space =_ "UBIFS DBG find (pid %d): found LEB %d, free %d\012"
+fs/ubifs/find.c:699 [ubifs]ubifs_find_free_leb_for_idx =_ "UBIFS DBG find (pid %d): found LEB %d, free %d, dirty %d, flags %#x\012"
+fs/ubifs/find.c:758 [ubifs]ubifs_save_dirty_idx_lnums =_ "UBIFS DBG find (pid %d): found %d dirty index LEBs\012"
+fs/ubifs/find.c:763 [ubifs]ubifs_save_dirty_idx_lnums =_ "UBIFS DBG find (pid %d): dirtiest index LEB is %d with dirty %d and free %d\012"
+fs/ubifs/find.c:927 [ubifs]find_dirtiest_idx_leb =_ "UBIFS DBG find (pid %d): LEB %d, dirty %d and free %d flags %#x\012"
+fs/ubifs/find.c:862 [ubifs]find_dirty_idx_leb =_ "UBIFS DBG find (pid %d): found dirty LEB %d, free %d, dirty %d, flags %#x\012"
+fs/ubifs/find.c:897 [ubifs]get_idx_gc_leb =_ "UBIFS DBG find (pid %d): LEB %d, dirty %d and free %d flags %#x\012"
+fs/ubifs/tnc_commit.c:723 [ubifs]free_unused_idx_lebs =_ "UBIFS DBG cmt (pid %d): LEB %d\012"
+fs/ubifs/tnc_commit.c:149 [ubifs]fill_gap =_ "UBIFS DBG gc (pid %d): LEB %d:%d to %d len %d nodes written %d wasted bytes %d\012"
+fs/ubifs/tnc_commit.c:650 [ubifs]get_znodes_to_commit =_ "UBIFS DBG cmt (pid %d): no znodes to commit\012"
+fs/ubifs/tnc_commit.c:669 [ubifs]get_znodes_to_commit =_ "UBIFS DBG cmt (pid %d): committing %d znodes\012"
+fs/ubifs/tnc_commit.c:690 [ubifs]alloc_idx_lebs =_ "UBIFS DBG cmt (pid %d): need about %d empty LEBS for TNC commit\012"
+fs/ubifs/tnc_commit.c:701 [ubifs]alloc_idx_lebs =_ "UBIFS DBG cmt (pid %d): LEB %d\012"
+fs/ubifs/tnc_commit.c:360 [ubifs]layout_in_gaps =_ "UBIFS DBG gc (pid %d): %d znodes to write\012"
+fs/ubifs/tnc_commit.c:240 [ubifs]layout_leb_in_gaps =_ "UBIFS DBG gc (pid %d): LEB %d\012"
+fs/ubifs/tnc_commit.c:297 [ubifs]layout_leb_in_gaps =_ "UBIFS DBG gc (pid %d): LEB %d wrote %d index nodes\012"
+fs/ubifs/tnc_commit.c:321 [ubifs]layout_leb_in_gaps =_ "UBIFS DBG gc (pid %d): LEB %d wrote %d index nodes\012"
+fs/ubifs/tnc_commit.c:394 [ubifs]layout_in_gaps =_ "UBIFS DBG gc (pid %d): %d znodes remaining, need %d LEBs, have %d\012"
+fs/ubifs/tnc_commit.c:806 [ubifs]ubifs_tnc_start_commit =_ "UBIFS DBG cmt (pid %d): number of index LEBs %d\012"
+fs/ubifs/tnc_commit.c:807 [ubifs]ubifs_tnc_start_commit =_ "UBIFS DBG cmt (pid %d): size of index %llu\012"
+fs/ubifs/tnc_commit.c:1064 [ubifs]return_gap_lebs =_ "UBIFS DBG cmt (pid %d): \012"
+fs/ubifs/tnc_commit.c:1100 [ubifs]ubifs_tnc_end_commit =_ "UBIFS DBG cmt (pid %d): TNC height is %d\012"
+fs/ubifs/lpt.c:473 [ubifs]set_ltab =_ "UBIFS DBG lp (pid %d): LEB %d free %d dirty %d to %d %d\012"
+fs/ubifs/lpt.c:457 [ubifs]ubifs_add_lpt_dirt =_ "UBIFS DBG lp (pid %d): LEB %d add %d to %d\012"
+fs/ubifs/lpt.c:842 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): space_bits %d\012"
+fs/ubifs/lpt.c:843 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lpt_lnum_bits %d\012"
+fs/ubifs/lpt.c:844 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lpt_offs_bits %d\012"
+fs/ubifs/lpt.c:845 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lpt_spc_bits %d\012"
+fs/ubifs/lpt.c:846 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): pcnt_bits %d\012"
+fs/ubifs/lpt.c:847 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lnum_bits %d\012"
+fs/ubifs/lpt.c:848 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): pnode_sz %d\012"
+fs/ubifs/lpt.c:849 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): nnode_sz %d\012"
+fs/ubifs/lpt.c:850 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): ltab_sz %d\012"
+fs/ubifs/lpt.c:851 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lsave_sz %d\012"
+fs/ubifs/lpt.c:852 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lsave_cnt %d\012"
+fs/ubifs/lpt.c:853 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): lpt_hght %d\012"
+fs/ubifs/lpt.c:854 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): big_lpt %d\012"
+fs/ubifs/lpt.c:855 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): LPT root is at %d:%d\012"
+fs/ubifs/lpt.c:856 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): LPT head is at %d:%d\012"
+fs/ubifs/lpt.c:857 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): LPT ltab is at %d:%d\012"
+fs/ubifs/lpt.c:859 [ubifs]ubifs_create_dflt_lpt =_ "UBIFS DBG lp (pid %d): LPT lsave is at %d:%d\012"
+fs/ubifs/lpt.c:1500 [ubifs]ubifs_lpt_lookup =_ "UBIFS DBG lp (pid %d): LEB %d, free %d, dirty %d, flags %d\012"
+fs/ubifs/lpt.c:1640 [ubifs]ubifs_lpt_lookup_dirty =_ "UBIFS DBG lp (pid %d): LEB %d, free %d, dirty %d, flags %d\012"
+fs/ubifs/lpt.c:1815 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): space_bits %d\012"
+fs/ubifs/lpt.c:1816 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lpt_lnum_bits %d\012"
+fs/ubifs/lpt.c:1817 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lpt_offs_bits %d\012"
+fs/ubifs/lpt.c:1818 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lpt_spc_bits %d\012"
+fs/ubifs/lpt.c:1819 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): pcnt_bits %d\012"
+fs/ubifs/lpt.c:1820 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lnum_bits %d\012"
+fs/ubifs/lpt.c:1821 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): pnode_sz %d\012"
+fs/ubifs/lpt.c:1822 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): nnode_sz %d\012"
+fs/ubifs/lpt.c:1823 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): ltab_sz %d\012"
+fs/ubifs/lpt.c:1824 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lsave_sz %d\012"
+fs/ubifs/lpt.c:1825 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lsave_cnt %d\012"
+fs/ubifs/lpt.c:1826 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): lpt_hght %d\012"
+fs/ubifs/lpt.c:1827 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): big_lpt %d\012"
+fs/ubifs/lpt.c:1828 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): LPT root is at %d:%d\012"
+fs/ubifs/lpt.c:1829 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): LPT head is at %d:%d\012"
+fs/ubifs/lpt.c:1830 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): LPT ltab is at %d:%d\012"
+fs/ubifs/lpt.c:1832 [ubifs]lpt_init_rd =_ "UBIFS DBG lp (pid %d): LPT lsave is at %d:%d\012"
+fs/ubifs/lprops.c:532 [ubifs]ubifs_change_lp =_ "UBIFS DBG lp (pid %d): LEB %d, free %d, dirty %d, flags %d\012"
+fs/ubifs/recovery.c:152 [ubifs]get_master_node =_ "UBIFS DBG rcvry (pid %d): found a master node at %d:%d\012"
+fs/ubifs/recovery.c:163 [ubifs]get_master_node =_ "UBIFS DBG rcvry (pid %d): found corruption at %d:%d\012"
+fs/ubifs/recovery.c:198 [ubifs]write_rcvrd_mst_node =_ "UBIFS DBG rcvry (pid %d): recovery\012"
+fs/ubifs/recovery.c:1135 [ubifs]grab_empty_leb =_ "UBIFS DBG rcvry (pid %d): found empty LEB %d, run commit\012"
+fs/ubifs/recovery.c:925 [ubifs]recover_head =_ "UBIFS DBG rcvry (pid %d): cleaning head at %d:%d\012"
+fs/ubifs/recovery.c:234 [ubifs]ubifs_recover_master_node =_ "UBIFS DBG rcvry (pid %d): recovery\012"
+fs/ubifs/recovery.c:252 [ubifs]ubifs_recover_master_node =_ "UBIFS DBG rcvry (pid %d): recovery recovery\012"
+fs/ubifs/recovery.c:630 [ubifs]ubifs_recover_leb =_ "UBIFS DBG rcvry (pid %d): %d:%d, jhead %d, grouped %d\012"
+fs/ubifs/recovery.c:639 [ubifs]ubifs_recover_leb =_ "UBIFS DBG scan (pid %d): look at LEB %d:%d (%d bytes left)\012"
+fs/ubifs/recovery.c:670 [ubifs]ubifs_recover_leb =_ "UBIFS DBG rcvry (pid %d): found corruption (%d) at %d:%d\012"
+fs/ubifs/recovery.c:473 [ubifs]no_more_nodes =_ "UBIFS DBG rcvry (pid %d): unexpected bad common header at %d:%d\012"
+fs/ubifs/recovery.c:481 [ubifs]no_more_nodes =_ "UBIFS DBG rcvry (pid %d): unexpected data at %d:%d\012"
+fs/ubifs/recovery.c:575 [ubifs]drop_last_group =_ "UBIFS DBG rcvry (pid %d): dropping grouped node at %d:%d\012"
+fs/ubifs/recovery.c:600 [ubifs]drop_last_node =_ "UBIFS DBG rcvry (pid %d): dropping last node at %d:%d\012"
+fs/ubifs/recovery.c:434 [ubifs]clean_buf =_ "UBIFS DBG rcvry (pid %d): cleaning corruption at %d:%d\012"
+fs/ubifs/recovery.c:510 [ubifs]fix_unclean_leb =_ "UBIFS DBG rcvry (pid %d): need to fix LEB %d start %d endpt %d\012"
+fs/ubifs/recovery.c:522 [ubifs]fix_unclean_leb =_ "UBIFS DBG rcvry (pid %d): fixing LEB %d start %d endpt %d\012"
+fs/ubifs/recovery.c:862 [ubifs]ubifs_recover_log_leb =_ "UBIFS DBG rcvry (pid %d): LEB %d\012"
+fs/ubifs/recovery.c:805 [ubifs]get_cs_sqnum =_ "UBIFS DBG rcvry (pid %d): at %d:%d\012"
+fs/ubifs/recovery.c:831 [ubifs]get_cs_sqnum =_ "UBIFS DBG rcvry (pid %d): commit start sqnum %llu\012"
+fs/ubifs/recovery.c:960 [ubifs]ubifs_recover_inl_heads =_ "UBIFS DBG rcvry (pid %d): checking index head at %d:%d\012"
+fs/ubifs/recovery.c:965 [ubifs]ubifs_recover_inl_heads =_ "UBIFS DBG rcvry (pid %d): checking LPT head at %d:%d\012"
+fs/ubifs/recovery.c:1077 [ubifs]ubifs_clean_lebs =_ "UBIFS DBG rcvry (pid %d): recovery\012"
+fs/ubifs/recovery.c:988 [ubifs]clean_an_unclean_leb =_ "UBIFS DBG rcvry (pid %d): LEB %d len %d\012"
+fs/ubifs/recovery.c:1059 [ubifs]clean_an_unclean_leb =_ "UBIFS DBG rcvry (pid %d): cleaned LEB %d\012"
+fs/ubifs/recovery.c:1164 [ubifs]ubifs_rcvry_gc_commit =_ "UBIFS DBG rcvry (pid %d): GC head LEB %d, offs %d\012"
+fs/ubifs/recovery.c:1175 [ubifs]ubifs_rcvry_gc_commit =_ "UBIFS DBG rcvry (pid %d): could not find a dirty LEB\012"
+fs/ubifs/recovery.c:1186 [ubifs]ubifs_rcvry_gc_commit =_ "UBIFS DBG rcvry (pid %d): committing\012"
+fs/ubifs/recovery.c:1191 [ubifs]ubifs_rcvry_gc_commit =_ "UBIFS DBG rcvry (pid %d): GC'ing LEB %d\012"
+fs/ubifs/recovery.c:1216 [ubifs]ubifs_rcvry_gc_commit =_ "UBIFS DBG rcvry (pid %d): allocated LEB %d for GC\012"
+fs/ubifs/recovery.c:1550 [ubifs]ubifs_recover_size =_ "UBIFS DBG rcvry (pid %d): removing ino %lu\012"
+fs/ubifs/recovery.c:1444 [ubifs]fix_size_in_place =_ "UBIFS DBG rcvry (pid %d): inode %lu at %d:%d size %lld -> %lld\012"
+fs/ubifs/recovery.c:1486 [ubifs]inode_fix_size =_ "UBIFS DBG rcvry (pid %d): ino %lu size %lld -> %lld\012"
+fs/ubifs/ioctl.c:146 [ubifs]ubifs_ioctl =_ "UBIFS DBG gen (pid %d): get flags: %#x, i_flags %#x\012"
+fs/ubifs/ioctl.c:172 [ubifs]ubifs_ioctl =_ "UBIFS DBG gen (pid %d): set flags: %#x, i_flags %#x\012"
+fs/ubifs/lpt_commit.c:720 [ubifs]lpt_tgc_start =_ "UBIFS DBG lp (pid %d): LEB %d\012"
+fs/ubifs/lpt_commit.c:133 [ubifs]upd_ltab =_ "UBIFS DBG lp (pid %d): LEB %d free %d dirty %d to %d +%d\012"
+fs/ubifs/lpt_commit.c:1110 [ubifs]lpt_gc_lnum =_ "UBIFS DBG lp (pid %d): LEB %d\012"
+fs/ubifs/lpt_commit.c:744 [ubifs]lpt_tgc_end =_ "UBIFS DBG lp (pid %d): LEB %d\012"
+fs/ubifs/lpt_commit.c:1693 [ubifs]dbg_check_ltab =_ "UBIFS DBG lp (pid %d): succeeded\012"
+fs/ubifs/lpt_commit.c:1605 [ubifs]dbg_check_ltab_lnum =_ "UBIFS DBG lp (pid %d): LEB %d\012"
+fs/ubifs/lpt_commit.c:1185 [ubifs]ubifs_lpt_start_commit =_ "UBIFS DBG lp (pid %d): \012"
+fs/ubifs/lpt_commit.c:1215 [ubifs]ubifs_lpt_start_commit =_ "UBIFS DBG cmt (pid %d): no cnodes to commit\012"
+fs/ubifs/lpt_commit.c:116 [ubifs]get_cnodes_to_commit =_ "UBIFS DBG cmt (pid %d): committing %d cnodes\012"
+fs/ubifs/lpt_commit.c:117 [ubifs]get_cnodes_to_commit =_ "UBIFS DBG lp (pid %d): committing %d cnodes\012"
+fs/ubifs/lpt_commit.c:1287 [ubifs]ubifs_lpt_end_commit =_ "UBIFS DBG lp (pid %d): \012"
+fs/ubifs/lpt_commit.c:526 [ubifs]write_cnodes =_ "UBIFS DBG lp (pid %d): LPT root is at %d:%d\012"
+fs/ubifs/lpt_commit.c:527 [ubifs]write_cnodes =_ "UBIFS DBG lp (pid %d): LPT head is at %d:%d\012"
+fs/ubifs/lpt_commit.c:528 [ubifs]write_cnodes =_ "UBIFS DBG lp (pid %d): LPT ltab is at %d:%d\012"
+fs/ubifs/lpt_commit.c:530 [ubifs]write_cnodes =_ "UBIFS DBG lp (pid %d): LPT lsave is at %d:%d\012"
+fs/ubifs/tnc_misc.c:295 [ubifs]read_znode =_ "UBIFS DBG tnc (pid %d): LEB %d:%d, level %d, %d branch\012"
+fs/ubifs/tnc_misc.c:481 [ubifs]ubifs_tnc_read_node =_ "UBIFS DBG tnc (pid %d): key %s\012"
+fs/ubifs/tnc_misc.c:490 [ubifs]ubifs_tnc_read_node =_ "UBIFS DBG tnc (pid %d): looked for key %s\012"
+fs/ubifs/tnc_misc.c:491 [ubifs]ubifs_tnc_read_node =_ "UBIFS DBG tnc (pid %d): but found node's key %s\012"
+fs/ubifs/xattr.c:687 [ubifs]xattr_set =_ "UBIFS DBG gen (pid %d): xattr '%s', host ino %lu ('%pd'), size %zd\012"
+fs/ubifs/xattr.c:675 [ubifs]xattr_get =_ "UBIFS DBG gen (pid %d): xattr '%s', ino %lu ('%pd'), buf size %zd\012"
+fs/ubifs/xattr.c:407 [ubifs]ubifs_listxattr =_ "UBIFS DBG gen (pid %d): ino %lu ('%pd'), buffer size %zd\012"
+fs/autofs/inode.c:55 [autofs4]autofs_kill_sb =_ "shutting down\012"
+fs/autofs/inode.c:234 [autofs4]autofs_fill_super =_ "starting up, sbi = %p\012"
+fs/autofs/inode.c:322 [autofs4]autofs_fill_super =_ "pipe fd = %d, pgrp = %u\012"
+fs/autofs/root.c:249 [autofs4]autofs_mount_wait =_ "waiting for mount name=%pd\012"
+fs/autofs/root.c:251 [autofs4]autofs_mount_wait =_ "mount wait done status=%d\012"
+fs/autofs/root.c:877 [autofs4]autofs_root_ioctl_unlocked =_ "cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u\012"
+fs/autofs/root.c:848 [autofs4]autofs_ask_umount =_ "may umount %d\012"
+fs/autofs/root.c:538 [autofs4]autofs_dir_symlink =_ "%s <- %pd\012"
+fs/autofs/root.c:109 [autofs4]autofs_dentry_release =_ "releasing %p\012"
+fs/autofs/root.c:79 [autofs4]autofs_dir_open =_ "file=%p dentry=%p %pd\012"
+fs/autofs/root.c:746 [autofs4]autofs_dir_mkdir =_ "dentry %p, creating %pd\012"
+fs/autofs/root.c:481 [autofs4]autofs_lookup =_ "name = %pd\012"
+fs/autofs/root.c:492 [autofs4]autofs_lookup =_ "pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d\012"
+fs/autofs/root.c:404 [autofs4]autofs_d_manage =_ "dentry=%p %pd\012"
+fs/autofs/root.c:316 [autofs4]autofs_d_automount =_ "dentry=%p %pd\012"
+fs/autofs/root.c:689 [autofs4]autofs_dir_rmdir =_ "dentry %p, removing %pd\012"
+fs/autofs/waitq.c:25 [autofs4]autofs_catatonic_mode =_ "entering catatonic mode\012"
+fs/autofs/waitq.c:93 [autofs4]autofs_notify_daemon =_ "wait id = 0x%08lx, name = %.*s, type=%d\012"
+fs/autofs/waitq.c:460 [autofs4]autofs_wait =_ "new wait id = 0x%08lx, name = %.*s, nfy=%d\012"
+fs/autofs/waitq.c:470 [autofs4]autofs_wait =_ "existing wait id = 0x%08lx, name = %.*s, nfy=%d\012"
+fs/autofs/expire.c:36 [autofs4]autofs_mount_busy =_ "dentry %p %pd\012"
+fs/autofs/expire.c:68 [autofs4]autofs_mount_busy =_ "returning = %d\012"
+fs/autofs/expire.c:155 [autofs4]autofs_direct_busy =_ "top %p %pd\012"
+fs/autofs/expire.c:340 [autofs4]should_expire =_ "checking mountpoint %p %pd\012"
+fs/autofs/expire.c:359 [autofs4]should_expire =_ "checking symlink %p %pd\012"
+fs/autofs/expire.c:190 [autofs4]autofs_tree_busy =_ "top %p %pd\012"
+fs/autofs/expire.c:198 [autofs4]autofs_tree_busy =_ "dentry %p %pd\012"
+fs/autofs/expire.c:248 [autofs4]autofs_check_leaves =_ "parent %p %pd\012"
+fs/autofs/expire.c:252 [autofs4]autofs_check_leaves =_ "dentry %p %pd\012"
+fs/autofs/expire.c:482 [autofs4]autofs_expire_indirect =_ "returning %p %pd\012"
+fs/autofs/expire.c:518 [autofs4]autofs_expire_wait =_ "waiting for expire %p name=%pd\012"
+fs/autofs/expire.c:523 [autofs4]autofs_expire_wait =_ "expire done status=%d\012"
+fs/fuse/inode.c:1513 [fuse]fuse_exit =_ "exit\012"
+fs/debugfs/inode.c:318 [debugfs]start_creating =_ "creating file '%s'\012"
+block/partitions/efi.c:353 [efi]is_gpt_valid =_ "GUID Partition Table Header signature is wrong:%lld != %lld\012"
+block/partitions/efi.c:362 [efi]is_gpt_valid =_ "GUID Partition Table Header size is too large: %u > %u\012"
+block/partitions/efi.c:370 [efi]is_gpt_valid =_ "GUID Partition Table Header size is too small: %u < %zu\012"
+block/partitions/efi.c:381 [efi]is_gpt_valid =_ "GUID Partition Table Header CRC is wrong: %x != %x\012"
+block/partitions/efi.c:391 [efi]is_gpt_valid =_ "GPT my_lba incorrect: %lld != %lld\012"
+block/partitions/efi.c:402 [efi]is_gpt_valid =_ "GPT: first_usable_lba incorrect: %lld > %lld\012"
+block/partitions/efi.c:408 [efi]is_gpt_valid =_ "GPT: last_usable_lba incorrect: %lld > %lld\012"
+block/partitions/efi.c:414 [efi]is_gpt_valid =_ "GPT: last_usable_lba incorrect: %lld > %lld\012"
+block/partitions/efi.c:419 [efi]is_gpt_valid =_ "GUID Partition Entry Size check failed.\012"
+block/partitions/efi.c:428 [efi]is_gpt_valid =_ "GUID Partition Table is too large: %llu > %lu bytes\012"
+block/partitions/efi.c:439 [efi]is_gpt_valid =_ "GUID Partition Entry Array CRC check failed.\012"
+block/partitions/efi.c:221 [efi]is_pmbr_valid =_ "GPT: mbr size in lba (%u) different than whole disk (%u).\012"
+block/partitions/efi.c:612 [efi]find_valid_gpt =_ "Device has a %s MBR\012"
+block/partitions/efi.c:691 [efi]efi_partition =_ "GUID Partition Table is valid!  Yea!\012"
+block/bsg.c:224 [bsg]bsg_put_device =_ "%s: tearing down\012"
+block/bsg.c:259 [bsg]bsg_add_device =_ "%s: bound to <%s>, max queue %d\012"
+lib/percpu-refcount.c:144 [percpu_refcount]percpu_ref_switch_to_atomic_rcu =_ "global %ld percpu %ld"
+drivers/gpio/gpiolib.c:3165 [gpiolib]gpiod_set_debounce =_ "gpio-%d (%s): %s: missing set() or set_config() operations\012"
+drivers/gpio/gpiolib.c:3210 [gpiolib]gpiod_set_transitory =_ "Persistence not supported for GPIO %d\012"
+drivers/gpio/gpiolib.c:1200 [gpiolib]gpiochip_setup_dev =_ "(%s): added GPIO chardev (%d:%d)\012"
+drivers/gpio/gpiolib.c:1210 [gpiolib]gpiochip_setup_dev =_ "%s: registered GPIOs %d to %d on device: %s (%s)\012"
+drivers/gpio/gpiolib.c:2779 [gpiolib]gpiod_request =_ "gpio-%d (%s): %s: status %d\012"
+drivers/gpio/gpiolib.c:625 [gpiolib]linehandle_create =_ "registered chardev handle for line %d\012"
+drivers/gpio/gpiolib.c:660 [gpiolib]linehandle_create =_ "registered chardev handle for %d lines\012"
+drivers/gpio/gpiolib.c:4499 [gpiolib]gpiod_configure_flags =_ "no flags found for %s\012"
+drivers/gpio/gpiolib.c:4538 [gpiolib]gpiod_get_index =_ "GPIO lookup for consumer %s\012"
+drivers/gpio/gpiolib.c:4543 [gpiolib]gpiod_get_index =_ "using device tree for GPIO lookup\012"
+drivers/gpio/gpiolib.c:4556 [gpiolib]gpiod_get_index =_ "using lookup tables for GPIO lookup\012"
+drivers/gpio/gpiolib.c:4561 [gpiolib]gpiod_get_index =_ "No GPIO consumer %s found\012"
+drivers/gpio/gpiolib.c:4590 [gpiolib]gpiod_get_index =_ "setup of GPIO %s failed\012"
+drivers/gpio/gpiolib.c:4868 [gpiolib]gpiod_get_array =_ "GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\012"
+drivers/gpio/gpiolib.c:198 [gpiolib]gpiochip_find_base =_ "%s: found new base at %d\012"
+drivers/gpio/gpiolib-of.c:261 [gpiolib_of]of_get_named_gpiod_flags =_ "%s: can't parse '%s' property of node '%pOF[%d]'\012"
+drivers/gpio/gpiolib-of.c:280 [gpiolib_of]of_get_named_gpiod_flags =_ "%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\012"
+drivers/gpio/gpiolib-sysfs.c:531 [gpiolib_sysfs]unexport_store =_ "%s: status %d\012"
+drivers/gpio/gpiolib-sysfs.c:579 [gpiolib_sysfs]gpiod_export =_ "%s: called too early!\012"
+drivers/gpio/gpiolib-sysfs.c:584 [gpiolib_sysfs]gpiod_export =_ "%s: invalid gpio descriptor\012"
+drivers/gpio/gpiolib-sysfs.c:606 [gpiolib_sysfs]gpiod_export =_ "gpio-%d (%s): %s: unavailable (requested=%d, exported=%d)\012"
+drivers/gpio/gpiolib-sysfs.c:646 [gpiolib_sysfs]gpiod_export =_ "gpio-%d (%s): %s: status %d\012"
+drivers/gpio/gpiolib-sysfs.c:495 [gpiolib_sysfs]export_store =_ "%s: status %d\012"
+drivers/video/backlight/backlight.c:358 [backlight]backlight_device_register =_ "backlight_device_register: name=%s\012"
+drivers/video/backlight/backlight.c:190 [backlight]backlight_device_set_brightness =_ "set brightness to %lu\012"
+drivers/video/backlight/backlight.c:153 [backlight]bl_power_store =_ "set power to %lu\012"
+drivers/video/of_display_timing.c:175 [of_display_timing]of_get_display_timings =_ "%pOF: using %pOFn as default timing\012"
+drivers/video/of_display_timing.c:235 [of_display_timing]of_get_display_timings =_ "%pOF: got %d timings. Using timing #%d as default\012"
+drivers/clk/clk.c:1844 [clk]__clk_speculate_rates =_ "%s: clk notifier callback for clock %s aborted with error %d\012"
+drivers/clk/clk.c:1939 [clk]clk_calc_new_rates =_ "%s: %s not gated but wants to reparent\012"
+drivers/clk/clk.c:1948 [clk]clk_calc_new_rates =_ "%s: clk %s can not be parent of clk %s\012"
+drivers/clk/clk.c:4320 [clk]of_clk_add_provider =_ "Added clock from %pOF\012"
+drivers/clk/clk.c:4357 [clk]of_clk_add_hw_provider =_ "Added clk_hw provider from %pOF\012"
+drivers/clk/clk.c:2163 [clk]clk_core_set_rate_nolock =_ "%s: failed to set %s rate\012"
+drivers/clk/clk.c:2487 [clk]clk_core_set_parent_nolock =_ "%s: clk %s can not be parent of clk %s\012"
+drivers/clk/clk.c:3305 [clk]__clk_core_init =_ "%s: clk %s already initialized\012"
+drivers/clk/clk-gpio.c:269 [clk_gpio]gpio_clk_driver_probe =_ "%pOFn: %s: GPIOs not yet available, retry later\012"
+drivers/clk/sifive/fu540-prci.c:604 [fu540_prci]sifive_fu540_prci_probe =_ "SiFive FU540 PRCI probed\012"
+drivers/dma/dmaengine.c:593 [dmaengine]dma_get_slave_channel =_ "%s: failed to get %s: (%d)\012"
+drivers/dma/dmaengine.c:506 [dmaengine]private_candidate =_ "%s: wrong capabilities\012"
+drivers/dma/dmaengine.c:522 [dmaengine]private_candidate =_ "%s: %s busy\012"
+drivers/dma/dmaengine.c:527 [dmaengine]private_candidate =_ "%s: %s filter said false\012"
+drivers/dma/dmaengine.c:556 [dmaengine]find_candidate =_ "%s: %s module removed\012"
+drivers/dma/dmaengine.c:561 [dmaengine]find_candidate =_ "%s: failed to get %s: (%d)\012"
+drivers/dma/dmaengine.c:661 [dmaengine]__dma_request_channel =_ "%s: %s (%s)\012"
+drivers/dma/dmaengine.c:818 [dmaengine]dmaengine_get =_ "%s: failed to get %s: (%d)\012"
+drivers/dma/virt-dma.c:33 [virt_dma]vchan_tx_submit =_ "vchan %p: txd %p[%x]: submitted\012"
+drivers/dma/virt-dma.c:60 [virt_dma]vchan_tx_desc_free =_ "vchan %p: txd %p[%x]: freeing\012"
+drivers/dma/virt-dma.c:120 [virt_dma]vchan_dma_desc_free_list =_ "txd %p: freeing\012"
+drivers/dma/of-dma.c:39 [of_dma]of_dma_find_controller =_ "%s: can't find DMA controller %pOF\012"
+drivers/dma/dmatest.c:922 [dmatest]dmatest_cleanup_channel =_ "thread %s exited with status %d\012"
+drivers/dma/dmatest.c:1123 [dmatest]stop_threaded_test =_ "dropped channel %s\012"
+drivers/dma/dmatest.c:456 [dmatest]dbg_result =_ "%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\012"
+drivers/dma/dmatest.c:841 [dmatest]dmatest_func =_ "%s: verifying source buffer...\012"
+drivers/dma/dmatest.c:851 [dmatest]dmatest_func =_ "%s: verifying dest buffer...\012"
+drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c:435 [dw_axi_dmac_platform]dma_chan_prep_dma_memcpy =_ "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx"
+drivers/tty/tty_io.c:3080 [tty_io]tty_device_create_release =_ "releasing...\012"
+drivers/tty/tty_io.c:1537 [tty_io]tty_release_checks =_ "%s %s: bad idx %d\012"
+drivers/tty/tty_io.c:1547 [tty_io]tty_release_checks =_ "%s %s: bad driver table[%d] = %p\012"
+drivers/tty/tty_io.c:1555 [tty_io]tty_release_checks =_ "%s %s: bad other table[%d] = %p\012"
+drivers/tty/tty_io.c:1559 [tty_io]tty_release_checks =_ "%s %s: bad link = %p\012"
+drivers/tty/tty_jobctrl.c:110 [tty_jobctrl]__proc_set_tty =_ "%s %s: current tty %s not NULL!!\012"
+drivers/tty/vt/vt_ioctl.c:1436 [vt_ioctl]vt_move_to_console =_ "Suspend: Can't switch VCs."
+drivers/tty/serial/serial_core.c:658 [serial_core]uart_flush_buffer =_ "uart_flush_buffer(%d) called\012"
+drivers/tty/serial/serial_core.c:1662 [serial_core]uart_hangup =_ "uart_hangup(%d)\012"
+drivers/tty/serial/serial_core.c:1633 [serial_core]uart_wait_until_sent =_ "uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\012"
+drivers/tty/serial/serial_core.c:1554 [serial_core]uart_close =_ "uart_close(%d) called\012"
+drivers/tty/serial/8250/8250_core.c:114 [8250]serial8250_interrupt =_ "%s(%d): start\012"
+drivers/tty/serial/8250/8250_core.c:140 [8250]serial8250_interrupt =_ "%s(%d): end\012"
+drivers/tty/serial/8250/8250_core.c:322 [8250]univ8250_setup_irq =_ "%s - using backup timer\012"
+drivers/tty/serial/8250/8250_port.c:1687 [8250_base]serial8250_read_char =_ "%s: handling break\012"
+drivers/tty/serial/8250/8250_port.c:2287 [8250_base]serial8250_do_startup =_ "%s - enabling bad tx status workarounds\012"
+drivers/tty/serial/8250/8250_dma.c:231 [8250_base]serial8250_request_dma =_ "got both dma channels\012"
+drivers/tty/serial/8250/8250_dma.c:264 [8250_base]serial8250_release_dma =_ "dma channels released\012"
+drivers/tty/serial/8250/8250_dwlib.c:94 [8250_base]dw8250_setup_port =_ "Designware UART version %c.%c%c\012"
+drivers/gpu/drm/drm_modes.c:723 [drm]of_get_drm_display_mode =_ "%pOF: got %dx%d display mode\012"
+drivers/gpu/drm/drm_print.c:146 [drm]__drm_printfn_debug =p "%s %pV"
+drivers/base/component.c:240 [component]try_to_bring_up_master =_ "trying to bring up master\012"
+drivers/base/component.c:196 [component]find_components =_ "Looking for component %zu\012"
+drivers/base/component.c:207 [component]find_components =_ "found component %s, duplicate %u\012"
+drivers/base/component.c:243 [component]try_to_bring_up_master =_ "master has incomplete components\012"
+drivers/base/component.c:249 [component]try_to_bring_up_master =_ "master is not for this component (%s)\012"
+drivers/base/component.c:678 [component]__component_add =_ "adding component (ops %ps)\012"
+drivers/base/component.c:593 [component]component_bind =_ "binding %s (ops %ps)\012"
+drivers/base/core.c:455 [core]__device_link_del =_ "Dropping the link to %s\012"
+drivers/base/core.c:1360 [core]devm_attr_groups_remove =_ "%s: removing groups %p\012"
+drivers/base/core.c:1351 [core]devm_attr_group_remove =_ "%s: removing group %p\012"
+drivers/base/core.c:2812 [core]device_create_release =_ "device: '%s': %s\012"
+drivers/base/core.c:3041 [core]device_rename =_ "renaming to %s\012"
+drivers/base/core.c:2407 [core]device_unregister =_ "device: '%s': %s\012"
+drivers/base/core.c:2136 [core]device_add =_ "device: '%s': %s\012"
+drivers/base/core.c:1573 [core]devices_kset_move_last =_ "devices_kset: Moving %s to end of list\012"
+drivers/base/core.c:421 [core]device_link_add =_ "Linked as a consumer to %s\012"
+drivers/base/core.c:3111 [core]device_move =_ "device: '%s': %s: moving to '%s'\012"
+drivers/base/core.c:1559 [core]devices_kset_move_after =_ "devices_kset: Moving %s after %s\012"
+drivers/base/core.c:1543 [core]devices_kset_move_before =_ "devices_kset: Moving %s before %s\012"
+drivers/base/core.c:1203 [core]dev_uevent =_ "device: '%s': %s: bus uevent() returned %d\012"
+drivers/base/core.c:1212 [core]dev_uevent =_ "device: '%s': %s: class uevent() returned %d\012"
+drivers/base/core.c:1221 [core]dev_uevent =_ "device: '%s': %s: dev_type uevent() returned %d\012"
+drivers/base/bus.c:89 [bus]driver_release =_ "driver: '%s': %s\012"
+drivers/base/bus.c:888 [bus]bus_unregister =_ "bus: '%s': unregistering\012"
+drivers/base/bus.c:859 [bus]bus_register =_ "bus: '%s': registered\012"
+drivers/base/bus.c:450 [bus]bus_add_device =_ "bus: '%s': add device %s\012"
+drivers/base/bus.c:531 [bus]bus_remove_device =_ "bus: '%s': remove device %s\012"
+drivers/base/bus.c:603 [bus]bus_add_driver =_ "bus: '%s': add driver %s\012"
+drivers/base/bus.c:677 [bus]bus_remove_driver =_ "bus: '%s': remove driver %s\012"
+drivers/base/dd.c:113 [dd]deferred_probe_work_func =_ "Retrying from deferred list\012"
+drivers/base/dd.c:858 [dd]__device_attach_async_helper =_ "async probe completed\012"
+drivers/base/dd.c:127 [dd]driver_deferred_probe_add =_ "Added to deferred list\012"
+drivers/base/dd.c:137 [dd]driver_deferred_probe_del =_ "Removed from deferred list\012"
+drivers/base/dd.c:376 [dd]driver_bound =_ "driver: '%s': %s: bound to device '%s'\012"
+drivers/base/dd.c:907 [dd]__device_attach =_ "scheduling asynchronous probe\012"
+drivers/base/dd.c:505 [dd]really_probe =_ "Driver %s force probe deferral\012"
+drivers/base/dd.c:518 [dd]really_probe =_ "bus: '%s': %s: probing driver %s with device %s\012"
+drivers/base/dd.c:594 [dd]really_probe =_ "bus: '%s': %s: bound device %s to driver %s\012"
+drivers/base/dd.c:621 [dd]really_probe =_ "Driver %s requests probe deferral\012"
+drivers/base/dd.c:627 [dd]really_probe =_ "%s: probe of %s rejects match %d\012"
+drivers/base/dd.c:672 [dd]driver_probe_done =_ "%s: probe_count = %d\012"
+drivers/base/dd.c:714 [dd]driver_probe_device =_ "bus: '%s': %s: matched device %s with driver %s\012"
+drivers/base/dd.c:1024 [dd]__driver_attach_async_helper =_ "driver %s async attach completed: %d\012"
+drivers/base/dd.c:816 [dd]__device_attach_driver =_ "Device match requests probe deferral\012"
+drivers/base/dd.c:819 [dd]__device_attach_driver =_ "Bus failed to match device: %d"
+drivers/base/dd.c:1049 [dd]__driver_attach =_ "Device match requests probe deferral\012"
+drivers/base/dd.c:1052 [dd]__driver_attach =_ "Bus failed to match device: %d"
+drivers/base/dd.c:1064 [dd]__driver_attach =_ "probing driver %s asynchronously\012"
+drivers/base/class.c:53 [class]class_release =_ "class '%s': release.\012"
+drivers/base/class.c:59 [class]class_release =_ "class '%s' does not have a release() function, be careful\012"
+drivers/base/class.c:207 [class]class_create_release =_ "%s called for %s\012"
+drivers/base/class.c:200 [class]class_unregister =_ "device class '%s': unregistering\012"
+drivers/base/class.c:157 [class]__class_register =_ "device class '%s': registering\012"
+drivers/base/platform.c:534 [platform]platform_device_add =_ "Registering platform device '%s'. Parent at %s\012"
+drivers/base/platform.c:937 [platform]platform_unregister_drivers =_ "unregistering platform driver %ps\012"
+drivers/base/platform.c:902 [platform]__platform_register_drivers =_ "registering platform driver %ps\012"
+drivers/base/platform.c:916 [platform]__platform_register_drivers =_ "unregistering platform driver %ps\012"
+drivers/base/power/clock_ops.c:599 [clock_ops]pm_clk_notify =_ "%s() %ld\012"
+drivers/base/firmware_loader/main.c:556 [firmware_class]fw_set_page_data =_ "%s: fw-%s fw_priv=%p data=%p size=%u\012"
+drivers/base/firmware_loader/main.c:250 [firmware_class]__free_fw_priv =_ "%s: fw-%s fw_priv=%p data=%p size=%u\012"
+drivers/base/firmware_loader/main.c:701 [firmware_class]_request_firmware_prepare =_ "using built-in %s\012"
+drivers/base/firmware_loader/main.c:224 [firmware_class]alloc_lookup_fw_priv =_ "batched request - sharing the same struct fw_priv and lookup for multiple requests\012"
+drivers/base/firmware_loader/main.c:193 [firmware_class]__allocate_fw_priv =_ "%s: fw-%s fw_priv=%p\012"
+drivers/base/firmware_loader/main.c:504 [firmware_class]fw_get_filesystem_firmware =_ "loading %s failed for no such file or directory.\012"
+drivers/base/firmware_loader/main.c:520 [firmware_class]fw_get_filesystem_firmware =_ "direct-loading %s\012"
+drivers/base/regmap/regmap.c:1534 [regmap]_regmap_raw_write_impl =_ "Writing window %d/%zu\012"
+drivers/base/regmap/regcache.c:716 [regcache]regcache_sync_block_raw_flush =_ "Writing %zu bytes for %d registers from 0x%x-0x%x\012"
+drivers/base/regmap/regcache.c:219 [regcache]regcache_exit =_ "Destroying %s cache\012"
+drivers/base/regmap/regcache.c:191 [regcache]regcache_init =_ "Initializing %s cache\012"
+drivers/base/regmap/regcache.c:322 [regcache]regcache_default_sync =_ "Synced register %#x, value %#x\012"
+drivers/base/regmap/regcache.c:352 [regcache]regcache_sync =_ "Syncing %s cache\012"
+drivers/base/regmap/regcache.c:423 [regcache]regcache_sync_region =_ "Syncing %s cache from %d-%d\012"
+drivers/base/regmap/regcache.c:698 [regcache]regcache_sync_block_single =_ "Synced register %#x, value %#x\012"
+drivers/base/regmap/regmap-debugfs.c:549 [regmap_debugfs]regmap_debugfs_init =_ "regmap locking disabled - not creating debugfs entries\012"
+drivers/base/arch_topology.c:116 [arch_topology]topology_normalize_cpu_scale =_ "cpu_capacity: capacity_scale=%u\012"
+drivers/base/arch_topology.c:119 [arch_topology]topology_normalize_cpu_scale =_ "cpu_capacity: cpu=%d raw_capacity=%u\012"
+drivers/base/arch_topology.c:124 [arch_topology]topology_normalize_cpu_scale =_ "cpu_capacity: CPU%d cpu_capacity=%lu\012"
+drivers/base/arch_topology.c:152 [arch_topology]topology_parse_cpu_capacity =_ "cpu_capacity: %pOF cpu_capacity=%u (raw)\012"
+drivers/misc/eeprom/at24.c:376 [at24]at24_regmap_write =_ "write %zu@%d --> %d (%ld)\012"
+drivers/misc/eeprom/at24.c:316 [at24]at24_regmap_read =_ "read %zu@%d --> %d (%ld)\012"
+drivers/mfd/syscon.c:260 [syscon]syscon_probe =_ "regmap %pR registered\012"
+drivers/dma-buf/dma-buf.c:1260 [dma_buf]dma_buf_init_debugfs =_ "dma_buf: debugfs: failed to create node bufinfo\012"
+drivers/mtd/mtdcore.c:392 [mtd]mtd_debugfs_populate =_ "won't show data in debugfs\012"
+drivers/mtd/mtdcore.c:689 [mtd]add_mtd_device =_ "mtd: Giving out device %d to %s\012"
+drivers/mtd/mtdcore.c:777 [mtd]mtd_set_dev_defaults =_ "mtd device won't show a device symlink in sysfs\012"
+drivers/mtd/mtdsuper.c:31 [mtd]mtd_test_super =_ "MTDSB: Match on device %d (\042%s\042)\012"
+drivers/mtd/mtdsuper.c:36 [mtd]mtd_test_super =_ "MTDSB: No match, device %d (\042%s\042), device %d (\042%s\042)\012"
+drivers/mtd/mtdsuper.c:72 [mtd]mtd_get_sb =_ "MTDSB: Device %d (\042%s\042) is already mounted\012"
+drivers/mtd/mtdsuper.c:77 [mtd]mtd_get_sb =_ "MTDSB: New superblock for device %d (\042%s\042)\012"
+drivers/mtd/mtdsuper.c:131 [mtd]get_tree_mtd =_ "MTDSB: dev_name \042%s\042\012"
+drivers/mtd/mtdsuper.c:146 [mtd]get_tree_mtd =_ "MTDSB: mtd:%%s, name \042%s\042\012"
+drivers/mtd/mtdsuper.c:162 [mtd]get_tree_mtd =_ "MTDSB: mtd%%d, mtdnr %d\012"
+drivers/mtd/mtdsuper.c:178 [mtd]get_tree_mtd =_ "MTDSB: lookup_bdev() returned 0\012"
+drivers/mtd/mtdpart.c:838 [mtd]mtd_part_do_parse =_ "%s: parser %s: %i\012"
+drivers/mtd/mtdpart.c:981 [mtd]parse_mtd_partitions =_ "%s: parsing partitions %s\012"
+drivers/mtd/mtdpart.c:986 [mtd]parse_mtd_partitions =_ "%s: got parser %s\012"
+drivers/mtd/mtdchar.c:104 [mtd]mtdchar_close =_ "MTD_close\012"
+drivers/mtd/mtdchar.c:55 [mtd]mtdchar_open =_ "MTD_open\012"
+drivers/mtd/mtdchar.c:237 [mtd]mtdchar_write =_ "MTD_write\012"
+drivers/mtd/mtdchar.c:147 [mtd]mtdchar_read =_ "MTD_read\012"
+drivers/mtd/mtdchar.c:642 [mtd]mtdchar_ioctl =_ "MTD_ioctl\012"
+drivers/mtd/parsers/ofpart.c:50 [ofpart]parse_fixed_partitions =_ "%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\012"
+drivers/mtd/parsers/ofpart.c:88 [ofpart]parse_fixed_partitions =_ "%s: ofpart partition %pOF (%pOF) missing reg property.\012"
+drivers/mtd/parsers/ofpart.c:101 [ofpart]parse_fixed_partitions =_ "%s: ofpart partition %pOF (%pOF) error parsing reg property.\012"
+drivers/mtd/mtdblock.c:88 [mtdblock]write_cached_data =_ "mtdblock: writing cached data for \042%s\042 at 0x%lx, size 0x%x\012"
+drivers/mtd/mtdblock.c:275 [mtdblock]mtdblock_release =_ "mtdblock_release\012"
+drivers/mtd/mtdblock.c:291 [mtdblock]mtdblock_release =_ "ok\012"
+drivers/mtd/mtdblock.c:250 [mtdblock]mtdblock_open =_ "mtdblock_open\012"
+drivers/mtd/mtdblock.c:266 [mtdblock]mtdblock_open =_ "ok\012"
+drivers/mtd/mtdblock.c:186 [mtdblock]do_cached_read =_ "mtdblock: read on \042%s\042 at 0x%lx, size 0x%x\012"
+drivers/mtd/mtdblock.c:116 [mtdblock]do_cached_write =_ "mtdblock: write on \042%s\042 at 0x%lx, size 0x%x\012"
+drivers/mtd/spi-nor/spi-nor.c:2550 [spi_nor]spi_nor_read =_ "from 0x%08x, len %zd\012"
+drivers/mtd/spi-nor/spi-nor.c:2674 [spi_nor]spi_nor_write =_ "to 0x%08x, len %zd\012"
+drivers/mtd/spi-nor/spi-nor.c:2590 [spi_nor]sst_write =_ "to 0x%08x, len %zd\012"
+drivers/mtd/spi-nor/spi-nor.c:1247 [spi_nor]spi_nor_erase =_ "at 0x%llx, len %lld\012"
+drivers/mtd/spi-nor/spi-nor.c:862 [spi_nor]erase_chip =_ " %lldKiB\012"
+drivers/mtd/spi-nor/spi-nor.c:4344 [spi_nor]spi_nor_default_setup =_ "SPI n-n-n protocols are not supported.\012"
+drivers/mtd/spi-nor/spi-nor.c:4965 [spi_nor]spi_nor_scan =_ "mtd .name = %s, .size = 0x%llx (%lldMiB), .erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\012"
+drivers/mtd/spi-nor/spi-nor.c:4976 [spi_nor]spi_nor_scan =_ "mtd.eraseregions[%d] = { .offset = 0x%llx, .erasesize = 0x%.8x (%uKiB), .numblocks = %d }\012"
+drivers/mtd/ubi/vtbl.c:293 [ubi]create_vtbl =_ "UBI DBG gen (pid %d): create volume table (copy #%d)\012"
+drivers/mtd/ubi/vtbl.c:396 [ubi]process_lvol =_ "UBI DBG gen (pid %d): check layout volume\012"
+drivers/mtd/ubi/vmt.c:170 [ubi]ubi_create_volume =_ "UBI DBG gen (pid %d): search for vacant volume ID\012"
+drivers/mtd/ubi/vmt.c:187 [ubi]ubi_create_volume =_ "UBI DBG gen (pid %d): create device %d, volume %d, %llu bytes, type %d, name %s\012"
+drivers/mtd/ubi/vmt.c:346 [ubi]ubi_remove_volume =_ "UBI DBG gen (pid %d): remove device %d, volume %d\012"
+drivers/mtd/ubi/vmt.c:424 [ubi]ubi_resize_volume =_ "UBI DBG gen (pid %d): re-size device %d, volume %d to from %d to %d PEBs\012"
+drivers/mtd/ubi/vmt.c:580 [ubi]ubi_add_volume =_ "UBI DBG gen (pid %d): add volume %d\012"
+drivers/mtd/ubi/vmt.c:621 [ubi]ubi_free_volume =_ "UBI DBG gen (pid %d): free volume %d\012"
+drivers/mtd/ubi/upd.c:46 [ubi]set_update_marker =_ "UBI DBG gen (pid %d): set update marker for volume %d\012"
+drivers/mtd/ubi/upd.c:50 [ubi]set_update_marker =_ "UBI DBG gen (pid %d): already set\012"
+drivers/mtd/ubi/upd.c:80 [ubi]clear_update_marker =_ "UBI DBG gen (pid %d): clear update marker for volume %d\012"
+drivers/mtd/ubi/upd.c:230 [ubi]write_leb =_ "UBI DBG gen (pid %d): all %d bytes contain 0xFF - skip\012"
+drivers/mtd/ubi/upd.c:119 [ubi]ubi_start_update =_ "UBI DBG gen (pid %d): start update of volume %d, %llu bytes\012"
+drivers/mtd/ubi/upd.c:174 [ubi]ubi_start_leb_change =_ "UBI DBG gen (pid %d): start changing LEB %d:%d, %u bytes\012"
+drivers/mtd/ubi/upd.c:271 [ubi]ubi_more_update_data =_ "UBI DBG gen (pid %d): write %d of %lld bytes, %lld already passed\012"
+drivers/mtd/ubi/upd.c:386 [ubi]ubi_more_leb_change_data =_ "UBI DBG gen (pid %d): write %d of %lld bytes, %lld already passed\012"
+drivers/mtd/ubi/build.c:576 [ubi]io_init =_ "UBI DBG gen (pid %d): sizeof(struct ubi_ainf_peb) %zu\012"
+drivers/mtd/ubi/build.c:577 [ubi]io_init =_ "UBI DBG gen (pid %d): sizeof(struct ubi_wl_entry) %zu\012"
+drivers/mtd/ubi/build.c:650 [ubi]io_init =_ "UBI DBG gen (pid %d): min_io_size      %d\012"
+drivers/mtd/ubi/build.c:651 [ubi]io_init =_ "UBI DBG gen (pid %d): max_write_size   %d\012"
+drivers/mtd/ubi/build.c:652 [ubi]io_init =_ "UBI DBG gen (pid %d): hdrs_min_io_size %d\012"
+drivers/mtd/ubi/build.c:653 [ubi]io_init =_ "UBI DBG gen (pid %d): ec_hdr_alsize    %d\012"
+drivers/mtd/ubi/build.c:654 [ubi]io_init =_ "UBI DBG gen (pid %d): vid_hdr_alsize   %d\012"
+drivers/mtd/ubi/build.c:671 [ubi]io_init =_ "UBI DBG gen (pid %d): vid_hdr_offset   %d\012"
+drivers/mtd/ubi/build.c:672 [ubi]io_init =_ "UBI DBG gen (pid %d): vid_hdr_aloffset %d\012"
+drivers/mtd/ubi/build.c:673 [ubi]io_init =_ "UBI DBG gen (pid %d): vid_hdr_shift    %d\012"
+drivers/mtd/ubi/build.c:674 [ubi]io_init =_ "UBI DBG gen (pid %d): leb_start        %d\012"
+drivers/mtd/ubi/build.c:700 [ubi]io_init =_ "UBI DBG gen (pid %d): max_erroneous    %d\012"
+drivers/mtd/ubi/build.c:461 [ubi]uif_init =_ "UBI DBG gen (pid %d): %s major is %u\012"
+drivers/mtd/ubi/cdev.c:123 [ubi]vol_cdev_release =_ "UBI DBG gen (pid %d): release device %d, volume %d, mode %d\012"
+drivers/mtd/ubi/cdev.c:134 [ubi]vol_cdev_release =_ "UBI DBG gen (pid %d): only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel\012"
+drivers/mtd/ubi/cdev.c:107 [ubi]vol_cdev_open =_ "UBI DBG gen (pid %d): open device %d, volume %d, mode %d\012"
+drivers/mtd/ubi/cdev.c:182 [ubi]vol_cdev_read =_ "UBI DBG gen (pid %d): read %zd bytes from offset %lld of volume %d\012"
+drivers/mtd/ubi/cdev.c:196 [ubi]vol_cdev_read =_ "UBI DBG gen (pid %d): read from corrupted volume %d\012"
+drivers/mtd/ubi/cdev.c:852 [ubi]ubi_cdev_ioctl =_ "UBI DBG gen (pid %d): create volume\012"
+drivers/mtd/ubi/cdev.c:881 [ubi]ubi_cdev_ioctl =_ "UBI DBG gen (pid %d): remove volume\012"
+drivers/mtd/ubi/cdev.c:913 [ubi]ubi_cdev_ioctl =_ "UBI DBG gen (pid %d): re-size volume\012"
+drivers/mtd/ubi/cdev.c:945 [ubi]ubi_cdev_ioctl =_ "UBI DBG gen (pid %d): re-name volumes\012"
+drivers/mtd/ubi/cdev.c:757 [ubi]rename_volumes =_ "UBI DBG gen (pid %d): will rename volume %d from \042%s\042 to \042%s\042\012"
+drivers/mtd/ubi/cdev.c:815 [ubi]rename_volumes =_ "UBI DBG gen (pid %d): will remove volume %d, name \042%s\042\012"
+drivers/mtd/ubi/cdev.c:1019 [ubi]ctrl_cdev_ioctl =_ "UBI DBG gen (pid %d): attach MTD device\012"
+drivers/mtd/ubi/cdev.c:1060 [ubi]ctrl_cdev_ioctl =_ "UBI DBG gen (pid %d): detach MTD device\012"
+drivers/mtd/ubi/cdev.c:262 [ubi]vol_cdev_direct_write =_ "UBI DBG gen (pid %d): requested: write %zd bytes to offset %lld of volume %u\012"
+drivers/mtd/ubi/cdev.c:484 [ubi]vol_cdev_ioctl =_ "UBI DBG gen (pid %d): erase LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:520 [ubi]ubi_leb_write =_ "UBI DBG gen (pid %d): write %d bytes to LEB %d:%d:%d\012"
+drivers/mtd/ubi/kapi.c:565 [ubi]ubi_leb_change =_ "UBI DBG gen (pid %d): atomically write %d bytes to LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:665 [ubi]ubi_leb_unmap =_ "UBI DBG gen (pid %d): unmap LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:605 [ubi]ubi_leb_erase =_ "UBI DBG gen (pid %d): erase LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:701 [ubi]ubi_leb_map =_ "UBI DBG gen (pid %d): map LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:739 [ubi]ubi_is_mapped =_ "UBI DBG gen (pid %d): test LEB %d:%d\012"
+drivers/mtd/ubi/kapi.c:429 [ubi]ubi_leb_read =_ "UBI DBG gen (pid %d): read %d bytes from LEB %d:%d:%d\012"
+drivers/mtd/ubi/kapi.c:469 [ubi]ubi_leb_read_sg =_ "UBI DBG gen (pid %d): read %d bytes from LEB %d:%d:%d\012"
+drivers/mtd/ubi/kapi.c:331 [ubi]ubi_close_volume =_ "UBI DBG gen (pid %d): close device %d, volume %d, mode %d\012"
+drivers/mtd/ubi/kapi.c:121 [ubi]ubi_open_volume =_ "UBI DBG gen (pid %d): open device %d, volume %d, mode %d\012"
+drivers/mtd/ubi/kapi.c:239 [ubi]ubi_open_volume_nm =_ "UBI DBG gen (pid %d): open device %d, volume %s, mode %d\012"
+drivers/mtd/ubi/kapi.c:295 [ubi]ubi_open_volume_path =_ "UBI DBG gen (pid %d): open volume %s, mode %d\012"
+drivers/mtd/ubi/eba.c:961 [ubi]try_write_vid_and_data =_ "UBI DBG eba (pid %d): write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d\012"
+drivers/mtd/ubi/eba.c:468 [ubi]ubi_eba_unmap_leb =_ "UBI DBG eba (pid %d): erase LEB %d:%d, PEB %d\012"
+drivers/mtd/ubi/eba.c:622 [ubi]ubi_eba_read_leb =_ "UBI DBG eba (pid %d): read %d bytes from offset %d of LEB %d:%d (unmapped)\012"
+drivers/mtd/ubi/eba.c:630 [ubi]ubi_eba_read_leb =_ "UBI DBG eba (pid %d): read %d bytes from offset %d of LEB %d:%d, PEB %d\012"
+drivers/mtd/ubi/eba.c:1031 [ubi]ubi_eba_write_leb =_ "UBI DBG eba (pid %d): write %d bytes at offset %d of LEB %d:%d, PEB %d\012"
+drivers/mtd/ubi/eba.c:1234 [ubi]ubi_eba_atomic_leb_change =_ "UBI DBG eba (pid %d): change LEB %d:%d\012"
+drivers/mtd/ubi/eba.c:1315 [ubi]ubi_eba_copy_leb =_ "UBI DBG wl (pid %d): copy LEB %d:%d, PEB %d to PEB %d\012"
+drivers/mtd/ubi/eba.c:1336 [ubi]ubi_eba_copy_leb =_ "UBI DBG wl (pid %d): volume %d is being removed, cancel\012"
+drivers/mtd/ubi/eba.c:1357 [ubi]ubi_eba_copy_leb =_ "UBI DBG wl (pid %d): contention on LEB %d:%d, cancel\012"
+drivers/mtd/ubi/eba.c:1368 [ubi]ubi_eba_copy_leb =_ "UBI DBG wl (pid %d): LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel\012"
+drivers/mtd/ubi/eba.c:1380 [ubi]ubi_eba_copy_leb =_ "UBI DBG wl (pid %d): read %d bytes of data\012"
+drivers/mtd/ubi/eba.c:1618 [ubi]ubi_eba_init =_ "UBI DBG eba (pid %d): initialize EBA sub-system\012"
+drivers/mtd/ubi/eba.c:1690 [ubi]ubi_eba_init =_ "UBI DBG eba (pid %d): EBA sub-system is initialized\012"
+drivers/mtd/ubi/io.c:579 [ubi]ubi_io_is_bad =_ "UBI DBG io (pid %d): PEB %d is bad\012"
+drivers/mtd/ubi/io.c:120 [ubi]ubi_io_read =_ "UBI DBG io (pid %d): read %d bytes from PEB %d:%d\012"
+drivers/mtd/ubi/io.c:197 [ubi]ubi_io_read =_ "UBI DBG gen (pid %d): bit-flip (emulated)\012"
+drivers/mtd/ubi/io.c:693 [ubi]ubi_io_read_ec_hdr =_ "UBI DBG io (pid %d): read EC header from PEB %d\012"
+drivers/mtd/ubi/io.c:728 [ubi]ubi_io_read_ec_hdr =_ "UBI DBG bld (pid %d): no EC header found at PEB %d, only 0xFF bytes\012"
+drivers/mtd/ubi/io.c:745 [ubi]ubi_io_read_ec_hdr =_ "UBI DBG bld (pid %d): bad magic number at PEB %d: %08x instead of %08x\012"
+drivers/mtd/ubi/io.c:759 [ubi]ubi_io_read_ec_hdr =_ "UBI DBG bld (pid %d): bad EC header CRC at PEB %d, calculated %#08x, read %#08x\012"
+drivers/mtd/ubi/io.c:972 [ubi]ubi_io_read_vid_hdr =_ "UBI DBG io (pid %d): read VID header from PEB %d\012"
+drivers/mtd/ubi/io.c:990 [ubi]ubi_io_read_vid_hdr =_ "UBI DBG bld (pid %d): no VID header found at PEB %d, only 0xFF bytes\012"
+drivers/mtd/ubi/io.c:1003 [ubi]ubi_io_read_vid_hdr =_ "UBI DBG bld (pid %d): bad magic number at PEB %d: %08x instead of %08x\012"
+drivers/mtd/ubi/io.c:1017 [ubi]ubi_io_read_vid_hdr =_ "UBI DBG bld (pid %d): bad CRC at PEB %d, calculated %#08x, read %#08x\012"
+drivers/mtd/ubi/io.c:229 [ubi]ubi_io_write =_ "UBI DBG io (pid %d): write %d bytes to PEB %d:%d\012"
+drivers/mtd/ubi/io.c:802 [ubi]ubi_io_write_ec_hdr =_ "UBI DBG io (pid %d): write EC header to PEB %d\012"
+drivers/mtd/ubi/io.c:1056 [ubi]ubi_io_write_vid_hdr =_ "UBI DBG io (pid %d): write VID header to PEB %d\012"
+drivers/mtd/ubi/io.c:312 [ubi]do_sync_erase =_ "UBI DBG io (pid %d): erase PEB %d\012"
+drivers/mtd/ubi/wl.c:306 [ubi]prot_queue_add =_ "UBI DBG wl (pid %d): added PEB %d EC %d to the protection queue\012"
+drivers/mtd/ubi/wl.c:433 [ubi]prot_queue_del =_ "UBI DBG wl (pid %d): deleted PEB %d from the protection queue\012"
+drivers/mtd/ubi/wl.c:598 [ubi]schedule_erase =_ "UBI DBG wl (pid %d): schedule erasure of PEB %d, EC %d, torture %d\012"
+drivers/mtd/ubi/wl.c:1041 [ubi]ensure_wear_leveling =_ "UBI DBG wl (pid %d): schedule wear-leveling\012"
+drivers/mtd/ubi/wl.c:1043 [ubi]ensure_wear_leveling =_ "UBI DBG wl (pid %d): schedule scrubbing\012"
+drivers/mtd/ubi/wl.c:453 [ubi]sync_erase =_ "UBI DBG wl (pid %d): erase PEB %d, old EC %llu\012"
+drivers/mtd/ubi/wl.c:479 [ubi]sync_erase =_ "UBI DBG wl (pid %d): erased PEB %d, new EC %llu\012"
+drivers/mtd/ubi/wl.c:1091 [ubi]__erase_worker =_ "UBI DBG wl (pid %d): erase PEB %d EC %d LEB %d:%d\012"
+drivers/mtd/ubi/wl.c:520 [ubi]serve_prot_queue =_ "UBI DBG wl (pid %d): PEB %d EC %d protection over, move to used tree\012"
+drivers/mtd/ubi/wl.c:1204 [ubi]erase_worker =_ "UBI DBG wl (pid %d): cancel erasure of PEB %d EC %d\012"
+drivers/mtd/ubi/wl.c:632 [ubi]do_sync_erase =_ "UBI DBG wl (pid %d): sync erase of PEB %i\012"
+drivers/mtd/ubi/wl.c:696 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): cancel WL, a list is empty: free %d, used %d\012"
+drivers/mtd/ubi/wl.c:738 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): no WL needed: min used EC %d, max free EC %d\012"
+drivers/mtd/ubi/wl.c:748 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): move PEB %d EC %d to PEB %d EC %d\012"
+drivers/mtd/ubi/wl.c:759 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): scrub PEB %d to PEB %d\012"
+drivers/mtd/ubi/wl.c:791 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): PEB %d has no VID header\012"
+drivers/mtd/ubi/wl.c:801 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): PEB %d has no VID header but has bit-flips\012"
+drivers/mtd/ubi/wl.c:811 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): PEB %d has ECC errors, maybe from an interrupted erasure\012"
+drivers/mtd/ubi/wl.c:912 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): done\012"
+drivers/mtd/ubi/wl.c:906 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): PEB %d (LEB %d:%d) was put meanwhile, erase\012"
+drivers/mtd/ubi/wl.c:925 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)\012"
+drivers/mtd/ubi/wl.c:928 [ubi]wear_leveling_worker =_ "UBI DBG wl (pid %d): cancel moving PEB %d to PEB %d (%d)\012"
+drivers/mtd/ubi/wl.c:1234 [ubi]ubi_wl_put_peb =_ "UBI DBG wl (pid %d): PEB %d\012"
+drivers/mtd/ubi/wl.c:1249 [ubi]ubi_wl_put_peb =_ "UBI DBG wl (pid %d): PEB %d is being moved, wait\012"
+drivers/mtd/ubi/wl.c:1266 [ubi]ubi_wl_put_peb =_ "UBI DBG wl (pid %d): PEB %d is the target of data moving\012"
+drivers/mtd/ubi/wl.c:1343 [ubi]ubi_wl_scrub_peb =_ "UBI DBG wl (pid %d): the PEB %d is not in proper tree, retry\012"
+drivers/mtd/ubi/wl.c:1395 [ubi]ubi_wl_flush =_ "UBI DBG wl (pid %d): flush pending work for LEB %d:%d (%d pending works)\012"
+drivers/mtd/ubi/wl.c:1665 [ubi]ubi_thread =_ "UBI DBG wl (pid %d): background thread \042%s\042 is killed\012"
+drivers/mtd/ubi/wl.c:1809 [ubi]ubi_wl_init =_ "UBI DBG wl (pid %d): add PEB %d EC %d to the used tree\012"
+drivers/mtd/ubi/wl.c:1813 [ubi]ubi_wl_init =_ "UBI DBG wl (pid %d): add PEB %d EC %d to the scrub tree\012"
+drivers/mtd/ubi/wl.c:1861 [ubi]ubi_wl_init =_ "UBI DBG wl (pid %d): found %i PEBs\012"
+drivers/mtd/ubi/wl.c:1919 [ubi]ubi_wl_close =_ "UBI DBG wl (pid %d): close the WL sub-system\012"
+drivers/mtd/ubi/wl.c:2050 [ubi]produce_free_peb =_ "UBI DBG wl (pid %d): do one work synchronously\012"
+drivers/mtd/ubi/wl.c:408 [ubi]wl_get_wle =_ "UBI DBG wl (pid %d): PEB %d EC %d\012"
+drivers/mtd/ubi/attach.c:235 [ubi]add_to_list =_ "UBI DBG bld (pid %d): add to free: PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:237 [ubi]add_to_list =_ "UBI DBG bld (pid %d): add to erase: PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:239 [ubi]add_to_list =_ "UBI DBG bld (pid %d): add to alien: PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:147 [ubi]find_or_add_av =_ "UBI DBG bld (pid %d): added volume %d\012"
+drivers/mtd/ubi/attach.c:475 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): second PEB %d is newer, copy_flag is unset\012"
+drivers/mtd/ubi/attach.c:482 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): first PEB %d is newer, copy_flag is unset\012"
+drivers/mtd/ubi/attach.c:521 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): PEB %d CRC error: calculated %#08x, must be %#08x\012"
+drivers/mtd/ubi/attach.c:526 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): PEB %d CRC is OK\012"
+drivers/mtd/ubi/attach.c:534 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): second PEB %d is newer, copy_flag is set\012"
+drivers/mtd/ubi/attach.c:536 [ubi]ubi_compare_lebs =_ "UBI DBG bld (pid %d): first PEB %d is newer, copy_flag is set\012"
+drivers/mtd/ubi/attach.c:577 [ubi]ubi_add_to_av =_ "UBI DBG bld (pid %d): PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d\012"
+drivers/mtd/ubi/attach.c:610 [ubi]ubi_add_to_av =_ "UBI DBG bld (pid %d): this LEB already exists: PEB %d, sqnum %llu, EC %d\012"
+drivers/mtd/ubi/attach.c:751 [ubi]ubi_remove_av =_ "UBI DBG bld (pid %d): remove attaching information about volume %d\012"
+drivers/mtd/ubi/attach.c:827 [ubi]ubi_early_get_peb =_ "UBI DBG bld (pid %d): return free PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:847 [ubi]ubi_early_get_peb =_ "UBI DBG bld (pid %d): return PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:1395 [ubi]scan_all =_ "UBI DBG gen (pid %d): process PEB %d\012"
+drivers/mtd/ubi/attach.c:950 [ubi]scan_peb =_ "UBI DBG bld (pid %d): scan PEB %d\012"
+drivers/mtd/ubi/attach.c:272 [ubi]add_corrupted =_ "UBI DBG bld (pid %d): add to corrupted: PEB %d, EC %d\012"
+drivers/mtd/ubi/attach.c:310 [ubi]add_fastmap =_ "UBI DBG bld (pid %d): add to fastmap list: PEB %d, vol_id %d, sqnum: %llu\012"
+drivers/mtd/ubi/attach.c:1599 [ubi]ubi_attach =_ "UBI DBG gen (pid %d): max. sequence number:       %llu\012"
+drivers/spi/spi.c:3107 [spi]spi_setup =_ "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\012"
+drivers/spi/spi.c:607 [spi]spi_add_device =_ "registered child %s\012"
+drivers/spi/spi.c:2483 [spi]spi_register_controller =_ "registered %s %s\012"
+drivers/spi/spidev.c:763 [spidev]spidev_probe =_ "no minor number available!\012"
+drivers/spi/spidev.c:415 [spidev]spidev_ioctl =_ "spi mode %x\012"
+drivers/spi/spidev.c:432 [spidev]spidev_ioctl =_ "%csb first\012"
+drivers/spi/spidev.c:445 [spidev]spidev_ioctl =_ "%d bits per word\012"
+drivers/spi/spidev.c:458 [spidev]spidev_ioctl =_ "%d Hz (max)\012"
+drivers/spi/spidev.c:568 [spidev]spidev_open =_ "spidev: nothing for minor %d\012"
+drivers/spi/spidev.c:575 [spidev]spidev_open =_ "open/ENOMEM\012"
+drivers/spi/spidev.c:584 [spidev]spidev_open =_ "open/ENOMEM\012"
+drivers/spi/spi-dw-quad.c:423 [spi_dw_quad]qspi_hw_init =_ "Detected FIFO size: %u bytes\012"
+drivers/spi/spi-dw.c:459 [spi_dw]spi_hw_init =_ "Detected FIFO size: %u bytes\012"
+drivers/net/phy/phylink.c:343 [phylink]phylink_mac_config =_ "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\012"
+drivers/net/phy/phylink.c:343 [phylink]phylink_mac_config =_ "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\012"
+drivers/net/phy/phylink.c:770 [phylink]phylink_bringup_phy =_ "phy: setting supported %*pb advertising %*pb\012"
+drivers/net/phy/phylink.c:770 [phylink]phylink_bringup_phy =_ "phy: setting supported %*pb advertising %*pb\012"
+drivers/net/phy/phylink.c:719 [phylink]phylink_phy_change =_ "phy link %s %s/%s/%s\012"
+drivers/net/phy/phylink.c:719 [phylink]phylink_phy_change =_ "phy link %s %s/%s/%s\012"
+drivers/net/phy/phylink.c:948 [phylink]phylink_mac_change =_ "mac link %s\012"
+drivers/net/phy/phylink.c:948 [phylink]phylink_mac_change =_ "mac link %s\012"
+drivers/net/phy/phy.c:941 [libphy]phy_state_machine =_ "PHY state change %s -> %s\012"
+drivers/net/phy/phy_device.c:2334 [libphy]phy_driver_register =_ "%s: Registered new driver\012"
+drivers/net/phy/mdio_device.c:192 [libphy]mdio_driver_register =_ "mdio_driver_register: %s\012"
+drivers/net/phy/mdio_device.c:80 [libphy]mdio_device_register =_ "mdio_device_register\012"
+drivers/net/phy/mscc.c:1379 [mscc]vsc8574_config_pre_init =_ "FW CRC is not the expected one, patching FW\012"
+drivers/net/phy/mscc.c:1546 [mscc]vsc8584_config_pre_init =_ "FW CRC is not the expected one, patching FW\012"
+drivers/net/phy/realtek.c:202 [realtek]rtl8211f_config_init =_ "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\012"
+drivers/net/phy/realtek.c:206 [realtek]rtl8211f_config_init =_ "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\012"
+drivers/net/ethernet/cadence/macb_main.c:1992 [macb]macb_alloc_rx_buffers =_ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\012"
+drivers/net/ethernet/cadence/macb_main.c:3344 [macb]macb_configure_caps =_ "Cadence caps 0x%08x\012"
+drivers/net/ethernet/cadence/macb_main.c:2413 [macb]macb_open =_ "open\012"
+drivers/net/ethernet/cadence/macb_main.c:1879 [macb]macb_init_rx_buffer_size =_ "RX buffer must be multiple of %d bytes, expanding\012"
+drivers/net/ethernet/cadence/macb_main.c:1886 [macb]macb_init_rx_buffer_size =_ "mtu [%u] rx_buffer_size [%zu]\012"
+drivers/net/ethernet/cadence/macb_main.c:2012 [macb]macb_alloc_consistent =_ "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\012"
+drivers/net/ethernet/cadence/macb_main.c:2026 [macb]macb_alloc_consistent =_ "Allocated RX ring of %d bytes at %08lx (mapped %p)\012"
+drivers/net/ethernet/cadence/macb_main.c:2219 [macb]macb_configure_dma =_ "Cadence configure DMA with 0x%08x\012"
+drivers/net/ethernet/cadence/macb_main.c:3004 [macb]gem_add_flow_filter =_ "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\012"
+drivers/net/ethernet/cadence/macb_main.c:3058 [macb]gem_del_flow_filter =_ "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\012"
+drivers/net/ethernet/cadence/macb_main.c:1844 [macb]macb_start_xmit =_ "tx_head = %u, tx_tail = %u\012"
+drivers/net/ethernet/cadence/macb_main.c:1974 [macb]gem_alloc_rx_buffers =_ "Allocated %d RX struct sk_buff entries at %p\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1758 [stmmac]stmmac_stop_tx_dma =_ "DMA TX processes stopped in channel %d\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1745 [stmmac]stmmac_stop_rx_dma =_ "DMA RX processes stopped in channel %d\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1732 [stmmac]stmmac_start_tx_dma =_ "DMA TX processes started in channel %d\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:542 [stmmac]stmmac_hwtstamp_set =_ "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:287 [stmmac]print_pkt =_ "len = %d byte, buf addr: 0x%p\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:288 [stmmac]print_pkt =_ ""
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1719 [stmmac]stmmac_start_rx_dma =_ "DMA RX processes started in channel %d\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:991 [stmmac]stmmac_check_pcs_mode =_ "PCS RGMII support enabled\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:994 [stmmac]stmmac_check_pcs_mode =_ "PCS SGMII support enabled\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1303 [stmmac]init_dma_rx_desc_rings =_ "SKB addresses:\012skb\011\011skb data\011dma data\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1310 [stmmac]init_dma_rx_desc_rings =_ "(%s) dma_rx_phy=0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1378 [stmmac]init_dma_tx_desc_rings =_ "(%s) dma_tx_phy=0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:458 [stmmac]stmmac_get_tx_hwtstamp =_ "get valid TX hw timestamp %llu\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:1957 [stmmac]stmmac_tx_clean =_ "%s: restart transmit\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3467 [stmmac]stmmac_rx =_ "%s: descriptor ring:\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:489 [stmmac]stmmac_get_rx_hwtstamp =_ "get valid RX hw timestamp %llu\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:494 [stmmac]stmmac_get_rx_hwtstamp =_ "cannot get RX hw timestamp\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3048 [stmmac]stmmac_tso_xmit =_ "%s: stop transmitted packets\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3263 [stmmac]stmmac_xmit =_ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3272 [stmmac]stmmac_xmit =_ ">>> frame to be transmitted: "
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:3278 [stmmac]stmmac_xmit =_ "%s: stop transmitted packets\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:404 [stmmac]stmmac_eee_init =_ "disable EEE\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:420 [stmmac]stmmac_eee_init =_ "Energy-Efficient Ethernet initialized\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:261 [stmmac]dwmac1000_pmt =_ "GMAC: WOL Magic frame\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:265 [stmmac]dwmac1000_pmt =_ "GMAC: WOL on global unicast\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:237 [stmmac]dwmac1000_flow_ctrl =_ "GMAC Flow-Control:\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:239 [stmmac]dwmac1000_flow_ctrl =_ "\011Receive Flow-Control ON\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:243 [stmmac]dwmac1000_flow_ctrl =_ "\011Transmit Flow-Control ON\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:248 [stmmac]dwmac1000_flow_ctrl =_ "\011duplex mode: PAUSE %d\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:161 [stmmac]dwmac1000_set_filter =_ "%s: # mcasts %d, # unicast %d\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c:142 [stmmac]dwmac1000_set_mchash =_ "STMMAC: err in setting multicast filter\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:184 [stmmac]dwmac1000_dma_operation_mode_tx =_ "GMAC: enable TX store and forward mode\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:192 [stmmac]dwmac1000_dma_operation_mode_tx =_ "GMAC: disabling TX SF (threshold %d)\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:156 [stmmac]dwmac1000_dma_operation_mode_rx =_ "GMAC: enable RX store and forward mode\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:159 [stmmac]dwmac1000_dma_operation_mode_rx =_ "GMAC: disable RX SF mode (threshold %d)\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c:141 [stmmac]dwmac1000_configure_fc =_ "GMAC: disabling flow control, rxfifo too small(%d)\012"
+drivers/net/ethernet/stmicro/stmmac/mmc_core.c:190 [stmmac]dwmac_mmc_ctrl =_ "stmmac: MMC ctrl register (offset 0x%x): 0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c:230 [stmmac]stmmac_ptp_unregister =_ "Removed PTP HW clock successfully on %s\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c:282 [stmmac]dwmac4_dma_tx_chan_op_mode =_ "GMAC: enable TX store and forward mode\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c:286 [stmmac]dwmac4_dma_tx_chan_op_mode =_ "GMAC: disabling TX SF (threshold %d)\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c:202 [stmmac]dwmac4_dma_rx_chan_op_mode =_ "GMAC: enable RX store and forward mode\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c:205 [stmmac]dwmac4_dma_rx_chan_op_mode =_ "GMAC: disable RX SF mode (threshold %d)\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:303 [stmmac]dwmac4_pmt =_ "GMAC: WOL Magic frame\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:307 [stmmac]dwmac4_pmt =_ "GMAC: WOL on global unicast\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:483 [stmmac]dwmac4_flow_ctrl =_ "GMAC Flow-Control:\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:485 [stmmac]dwmac4_flow_ctrl =_ "\011Receive Flow-Control ON\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:491 [stmmac]dwmac4_flow_ctrl =_ "\011Transmit Flow-Control ON\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:494 [stmmac]dwmac4_flow_ctrl =_ "\011duplex mode: PAUSE %d\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:236 [stmmac]dwmac4_config_cbs =_ "Queue %d configured as AVB. Parameters:\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:237 [stmmac]dwmac4_config_cbs =_ "\011send_slope: 0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:238 [stmmac]dwmac4_config_cbs =_ "\011idle_slope: 0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:239 [stmmac]dwmac4_config_cbs =_ "\011high_credit: 0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c:240 [stmmac]dwmac4_config_cbs =_ "\011low_credit: 0x%08x\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c:344 [stmmac_platform]stmmac_dt_phy =_ "Found MDIO subnode\012"
+drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c:594 [stmmac_platform]stmmac_probe_config_dt =_ "PTP rate %d\012"
+drivers/usb/core/hub.c:463 [usbcore]set_port_led =_ "indicator %s status %d\012"
+drivers/usb/core/hub.c:724 [usbcore]hub_irq =_ "transfer --> %d\012"
+drivers/usb/core/hub.c:958 [usbcore]hub_port_logical_disconnect =_ "logical disconnect\012"
+drivers/usb/core/hub.c:909 [usbcore]hub_power_on =_ "enabling power on all ports\012"
+drivers/usb/core/hub.c:912 [usbcore]hub_power_on =_ "trying to enable port power on non-switchable hub\012"
+drivers/usb/core/hub.c:1123 [usbcore]hub_activate =_ "status %04x change %04x\012"
+drivers/usb/core/hub.c:2799 [usbcore]hub_port_wait_reset =_ "not %sreset yet, waiting %dms\012"
+drivers/usb/core/hub.c:2902 [usbcore]hub_port_reset =_ "port_wait_reset: err = %d\012"
+drivers/usb/core/hub.c:2940 [usbcore]hub_port_reset =_ "hot reset failed, warm reset\012"
+drivers/usb/core/hub.c:2947 [usbcore]hub_port_reset =_ "not enabled, trying %sreset again...\012"
+drivers/usb/core/hub.c:4575 [usbcore]hub_port_init =_ "device reset changed speed!\012"
+drivers/usb/core/hub.c:4725 [usbcore]hub_port_init =_ "device reset changed speed!\012"
+drivers/usb/core/hub.c:4802 [usbcore]hub_port_init =_ "Failed set isoch delay, error %d\012"
+drivers/usb/core/hub.c:4840 [usbcore]hub_port_init =_ "ep0 maxpacket = %d\012"
+drivers/usb/core/hub.c:5874 [usbcore]usb_reset_device =_ "device reset not allowed in state %d\012"
+drivers/usb/core/hub.c:5880 [usbcore]usb_reset_device =_ "%s for root hub!\012"
+drivers/usb/core/hub.c:5711 [usbcore]usb_reset_and_verify_device =_ "device reset not allowed in state %d\012"
+drivers/usb/core/hub.c:5632 [usbcore]descriptors_changed =_ "config index %d, error %d\012"
+drivers/usb/core/hub.c:5641 [usbcore]descriptors_changed =_ "config index %d changed (#%d)\012"
+drivers/usb/core/hub.c:5652 [usbcore]descriptors_changed =_ "serial string error %d\012"
+drivers/usb/core/hub.c:5655 [usbcore]descriptors_changed =_ "serial string changed\012"
+drivers/usb/core/hub.c:2214 [usbcore]usb_disconnect =_ "unregistering device\012"
+drivers/usb/core/hub.c:2526 [usbcore]usb_new_device =_ "udev %d, busnum %d, minor = %d\012"
+drivers/usb/core/hub.c:4434 [usbcore]hub_port_debounce =_ "debounce total %dms stable %dms status 0x%x\012"
+drivers/usb/core/hub.c:5404 [usbcore]hub_event =_ "state %d ports %d chg %04x evt %04x\012"
+drivers/usb/core/hub.c:5431 [usbcore]hub_event =_ "resetting for error %d\012"
+drivers/usb/core/hub.c:5435 [usbcore]hub_event =_ "error resetting hub: %d\012"
+drivers/usb/core/hub.c:5301 [usbcore]port_event =_ "enable change, status %08x\012"
+drivers/usb/core/hub.c:5322 [usbcore]port_event =_ "over-current change #%u\012"
+drivers/usb/core/hub.c:5333 [usbcore]port_event =_ "reset change\012"
+drivers/usb/core/hub.c:5338 [usbcore]port_event =_ "warm reset change\012"
+drivers/usb/core/hub.c:5343 [usbcore]port_event =_ "link state change\012"
+drivers/usb/core/hub.c:5365 [usbcore]port_event =_ "do warm reset\012"
+drivers/usb/core/hub.c:5196 [usbcore]hub_port_connect_change =_ "status %04x, change %04x, %s\012"
+drivers/usb/core/hub.c:5074 [usbcore]hub_port_connect =_ "get status %d ?\012"
+drivers/usb/core/hub.c:5140 [usbcore]hub_port_connect =_ "%dmA power budget left\012"
+drivers/usb/core/hub.c:5475 [usbcore]hub_event =_ "power change\012"
+drivers/usb/core/hub.c:5487 [usbcore]hub_event =_ "over-current change\012"
+drivers/usb/core/hub.c:6031 [usbcore]usb_hub_adjust_deviceremovable =_ "DeviceRemovable is changed to 1 according to platform information.\012"
+drivers/usb/core/hub.c:6047 [usbcore]usb_hub_adjust_deviceremovable =_ "DeviceRemovable is changed to 1 according to platform information.\012"
+drivers/usb/core/hub.c:1462 [usbcore]hub_configure =_ "compound device; port removable status: %s\012"
+drivers/usb/core/hub.c:1464 [usbcore]hub_configure =_ "standalone hub\012"
+drivers/usb/core/hub.c:1468 [usbcore]hub_configure =_ "ganged power switching\012"
+drivers/usb/core/hub.c:1471 [usbcore]hub_configure =_ "individual port power switching\012"
+drivers/usb/core/hub.c:1475 [usbcore]hub_configure =_ "no power switching (usb 1.0)\012"
+drivers/usb/core/hub.c:1481 [usbcore]hub_configure =_ "global over-current protection\012"
+drivers/usb/core/hub.c:1484 [usbcore]hub_configure =_ "individual port over-current protection\012"
+drivers/usb/core/hub.c:1488 [usbcore]hub_configure =_ "no over-current protection\012"
+drivers/usb/core/hub.c:1499 [usbcore]hub_configure =_ "Single TT\012"
+drivers/usb/core/hub.c:1505 [usbcore]hub_configure =_ "TT per port\012"
+drivers/usb/core/hub.c:1517 [usbcore]hub_configure =_ "Unrecognized hub protocol %d\012"
+drivers/usb/core/hub.c:1528 [usbcore]hub_configure =_ "TT requires at most %d FS bit times (%d ns)\012"
+drivers/usb/core/hub.c:1535 [usbcore]hub_configure =_ "TT requires at most %d FS bit times (%d ns)\012"
+drivers/usb/core/hub.c:1541 [usbcore]hub_configure =_ "TT requires at most %d FS bit times (%d ns)\012"
+drivers/usb/core/hub.c:1547 [usbcore]hub_configure =_ "TT requires at most %d FS bit times (%d ns)\012"
+drivers/usb/core/hub.c:1554 [usbcore]hub_configure =_ "Port indicators are supported\012"
+drivers/usb/core/hub.c:1558 [usbcore]hub_configure =_ "power on to power good time: %dms\012"
+drivers/usb/core/hub.c:1585 [usbcore]hub_configure =_ "hub controller current requirement: %dmA\012"
+drivers/usb/core/hub.c:1601 [usbcore]hub_configure =_ "%umA bus power budget for each child\012"
+drivers/usb/core/hub.c:1613 [usbcore]hub_configure =_ "local power source is %s\012"
+drivers/usb/core/hub.c:1617 [usbcore]hub_configure =_ "%sover-current condition exists\012"
+drivers/usb/core/hcd.c:1114 [usbcore]usb_calc_bus_time =_ "%s: bogus device speed!\012"
+drivers/usb/core/hcd.c:2852 [usbcore]usb_remove_hcd =_ "roothub graceful disconnect\012"
+drivers/usb/core/hcd.c:2695 [usbcore]usb_add_hcd =_ "pool alloc failed\012"
+drivers/usb/core/hcd.c:2773 [usbcore]usb_add_hcd =_ "supports USB remote wakeup\012"
+drivers/usb/core/hcd.c:992 [usbcore]register_root_hub =_ "can't read %s device descriptor %d\012"
+drivers/usb/core/hcd.c:1003 [usbcore]register_root_hub =_ "can't read %s bos descriptor %d\012"
+drivers/usb/core/hcd.c:2426 [usbcore]__usb_create_hcd =_ "hcd address0 mutex alloc failed\012"
+drivers/usb/core/hcd.c:2435 [usbcore]__usb_create_hcd =_ "hcd bandwidth mutex alloc failed\012"
+drivers/usb/core/hcd.c:813 [usbcore]rh_queue_status =_ "not queuing rh status urb\012"
+drivers/usb/core/hcd.c:641 [usbcore]rh_call_control =_ "root hub device address %d\012"
+drivers/usb/core/hcd.c:656 [usbcore]rh_call_control =_ "no endpoint features yet\012"
+drivers/usb/core/hcd.c:702 [usbcore]rh_call_control =_ "CTRL: TypeReq=0x%x val=0x%x idx=0x%x len=%d ==> %d\012"
+drivers/usb/core/hcd.c:1625 [usbcore]usb_hcd_unlink_urb =_ "hcd_unlink_urb %pK fail %d\012"
+drivers/usb/core/hcd.c:1780 [usbcore]usb_hcd_flush_endpoint =_ "shutdown urb %pK ep%d%s-%s\012"
+drivers/usb/core/urb.c:415 [usbcore]usb_submit_urb =_ "bogus endpoint ep%d%s in %s (bad maxpacket %d)\012"
+drivers/usb/core/message.c:72 [usbcore]usb_start_wait_urb =_ "%s timed out on ep%d%s len=%u/%u\012"
+drivers/usb/core/message.c:2267 [usbcore]cdc_parse_cdc_header =_ "Ignoring descriptor: type %02x, length %ud\012"
+drivers/usb/core/message.c:802 [usbcore]usb_get_langid =_ "default language 0x%04x\012"
+drivers/usb/core/message.c:855 [usbcore]usb_string =_ "wrong descriptor type %02x for string %d (\042%s\042)\012"
+drivers/usb/core/message.c:555 [usbcore]usb_sg_wait =_ "%s, submit --> %d\012"
+drivers/usb/core/message.c:1242 [usbcore]usb_disable_device =_ "unregistering interface %s\012"
+drivers/usb/core/message.c:1265 [usbcore]usb_disable_device =_ "%s nuking %s URBs\012"
+drivers/usb/core/message.c:1380 [usbcore]usb_set_interface =_ "selecting invalid interface %d\012"
+drivers/usb/core/message.c:1437 [usbcore]usb_set_interface =_ "manual set_interface for iface %d, alt %d\012"
+drivers/usb/core/message.c:2028 [usbcore]usb_set_configuration =_ "adding %s (config #%d, interface %d)\012"
+drivers/usb/core/driver.c:255 [usbcore]usb_probe_device =_ "%s\012"
+drivers/usb/core/driver.c:845 [usbcore]usb_uevent =_ "usb %s: already deleted?\012"
+drivers/usb/core/driver.c:849 [usbcore]usb_uevent =_ "usb %s: bus removed?\012"
+drivers/usb/core/driver.c:292 [usbcore]usb_probe_interface =_ "%s\012"
+drivers/usb/core/driver.c:314 [usbcore]usb_probe_interface =_ "%s - got id\012"
+drivers/usb/core/driver.c:1018 [usbcore]usb_forced_unbind_intf =_ "forced unbind\012"
+drivers/usb/core/config.c:793 [usbcore]usb_parse_configuration =_ "skipped %d descriptor%s after %s\012"
+drivers/usb/core/config.c:558 [usbcore]usb_parse_interface =_ "skipped %d descriptor%s after %s\012"
+drivers/usb/core/config.c:479 [usbcore]usb_parse_endpoint =_ "skipped %d descriptor%s after %s\012"
+drivers/usb/core/file.c:185 [usbcore]usb_register_dev =_ "looking for a minor, starting at %d\012"
+drivers/usb/core/file.c:236 [usbcore]usb_deregister_dev =_ "removing %d minor\012"
+drivers/usb/core/devio.c:2368 [usbcore]proc_disconnect_claim =_ "disconnect by usbfs\012"
+drivers/usb/core/devio.c:2236 [usbcore]proc_ioctl =_ "disconnect by usbfs\012"
+drivers/usb/core/generic.c:187 [usbcore]usb_choose_configuration =_ "configuration #%d chosen from %d choice%s\012"
+drivers/usb/core/quirks.c:624 [usbcore]usb_detect_quirks =_ "USB quirks for this device: %x\012"
+drivers/usb/core/quirks.c:645 [usbcore]usb_detect_interface_quirks =_ "USB interface quirks for this device: %x\012"
+drivers/usb/core/port.c:344 [usbcore]link_peers =_ "usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\012"
+drivers/usb/core/port.c:396 [usbcore]link_peers_report =_ "peered to %s\012"
+drivers/usb/core/port.c:399 [usbcore]link_peers_report =_ "failed to peer to %s (%d)\012"
+drivers/usb/dwc3/core.c:1496 [dwc3]dwc3_probe =_ "%s dma_get_mask : %#llx\012"
+drivers/usb/dwc3/gadget.c:782 [dwc3]dwc3_gadget_ep_enable =_ "dwc3: invalid parameters\012"
+drivers/usb/dwc3/gadget.c:787 [dwc3]dwc3_gadget_ep_enable =_ "dwc3: missing wMaxPacketSize\012"
+drivers/usb/dwc3/gadget.c:814 [dwc3]dwc3_gadget_ep_disable =_ "dwc3: invalid parameters\012"
+drivers/usb/host/ohci-q.c:792 [ohci_hcd]td_done =_ "urb %p iso td %p (%d) len %d cc %d\012"
+drivers/usb/host/ohci-q.c:826 [ohci_hcd]td_done =_ "urb %p td %p (%d) cc %d, len=%d/%d\012"
+drivers/usb/host/ohci-q.c:293 [ohci_hcd]periodic_unlink =_ "unlink %sed %p branch %d [%dus.], interval %d\012"
+drivers/usb/host/ohci-q.c:251 [ohci_hcd]ed_schedule =_ "ERR %d, interval %d msecs, load %d\012"
+drivers/usb/host/ohci-q.c:149 [ohci_hcd]periodic_link =_ "link %sed %p branch %d [%dus.], interval %d\012"
+drivers/usb/host/ohci-dbg.c:61 [ohci_hcd]ohci_dump_intr_mask =_ "%s 0x%08x%s%s%s%s%s%s%s%s%s\012"
+drivers/usb/host/ohci-mem.c:121 [ohci_hcd]td_free =_ "no hash for td %p\012"
+drivers/usb/host/ohci-hcd.c:1279 [ohci_hcd]ohci_hcd_mod_init =_ "%s: block sizes: ed %zd td %zd\012"
+drivers/usb/host/ohci-dbg.c:72 [ohci_hcd]maybe_print_eds =_ "%s %08x\012"
+drivers/usb/host/ohci-dbg.c:111 [ohci_hcd]ohci_dump_status =_ "OHCI %d.%d, %s legacy support registers, rh state %s\012"
+drivers/usb/host/ohci-dbg.c:126 [ohci_hcd]ohci_dump_status =_ "control 0x%03x%s%s%s HCFS=%s%s%s%s%s CBSR=%d\012"
+drivers/usb/host/ohci-dbg.c:136 [ohci_hcd]ohci_dump_status =_ "cmdstatus 0x%05x SOC=%d%s%s%s%s\012"
+drivers/usb/host/ohci-dbg.c:213 [ohci_hcd]ohci_dump_roothub =_ "roothub.a %08x POTPGT=%d%s%s%s%s%s NDP=%d(%d)\012"
+drivers/usb/host/ohci-dbg.c:220 [ohci_hcd]ohci_dump_roothub =_ "roothub.b %08x PPCM=%04x DR=%04x\012"
+drivers/usb/host/ohci-dbg.c:231 [ohci_hcd]ohci_dump_roothub =_ "roothub.status %08x%s%s%s%s%s%s\012"
+drivers/usb/host/ohci-dbg.c:236 [ohci_hcd]ohci_dump_roothub =_ "roothub.portstatus [%d] 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\012"
+drivers/usb/host/ohci-dbg.c:242 [ohci_hcd]ohci_dump =_ "OHCI controller state\012"
+drivers/usb/host/ohci-dbg.c:248 [ohci_hcd]ohci_dump =_ "hcca frame #%04x\012"
+drivers/usb/host/ohci-hcd.c:790 [ohci_hcd]io_watchdog_func =_ "takeback pending TD for dev %d ep 0x%x\012"
+drivers/usb/host/ohci-hcd.c:479 [ohci_hcd]ohci_init =_ "USB HC TakeOver from BIOS/SMM\012"
+drivers/usb/host/ohci-dbg.c:776 [ohci_hcd]create_debug_files =_ "created debug files\012"
+drivers/usb/host/ohci-hub.c:753 [ohci_hcd]ohci_hub_control =_ "%s roothub.portstatus [%d] = 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\012"
+drivers/usb/host/ohci-hub.c:651 [ohci_hcd]root_port_reset =_ "port[%d] reset timeout, stat %08x\012"
+drivers/usb/host/ohci-hcd.c:891 [ohci_hcd]ohci_irq =_ "device removed!\012"
+drivers/usb/host/ohci-hcd.c:925 [ohci_hcd]ohci_irq =_ "rhsc\012"
+drivers/usb/host/ohci-hcd.c:947 [ohci_hcd]ohci_irq =_ "resume detect\012"
+drivers/usb/host/ohci-q.c:894 [ohci_hcd]ed_halted =_ "urb %p path %s ep%d%s %08x cc %d --> status %d\012"
+drivers/usb/host/ohci-hcd.c:560 [ohci_hcd]ohci_run =_ "fminterval delta %d\012"
+drivers/usb/host/ohci-hcd.c:646 [ohci_hcd]ohci_run =_ "enabling initreset quirk\012"
+drivers/usb/host/ohci-hcd.c:287 [ohci_hcd]ohci_urb_enqueue =_ "iso underrun %p (%u+%u < %u)\012"
+drivers/usb/host/xhci.c:1344 [xhci_hcd]xhci_check_args =_ "xHCI %s called with invalid args\012"
+drivers/usb/host/xhci.c:1348 [xhci_hcd]xhci_check_args =_ "xHCI %s called for root hub\012"
+drivers/usb/host/xhci.c:1356 [xhci_hcd]xhci_check_args =_ "xHCI %s called with unaddressed device\012"
+drivers/usb/host/xhci.c:1363 [xhci_hcd]xhci_check_args =_ "xHCI %s called with udev and virt_dev does not match\012"
+drivers/usb/host/xhci.c:2970 [xhci_hcd]xhci_reset_bandwidth =_ "%s called for udev %p\012"
+drivers/usb/host/xhci.c:3206 [xhci_hcd]xhci_endpoint_reset =_ "%s: Failed to queue stop ep command, %d "
+drivers/usb/host/xhci.c:3229 [xhci_hcd]xhci_endpoint_reset =_ "%s: Failed to queue config ep command, %d "
+drivers/usb/host/xhci.c:3109 [xhci_hcd]xhci_endpoint_disable =_ "endpoint disable with ep_state 0x%x\012"
+drivers/usb/host/xhci.c:1750 [xhci_hcd]xhci_drop_endpoint =_ "%s called for udev %p\012"
+drivers/usb/host/xhci.c:1754 [xhci_hcd]xhci_drop_endpoint =_ "xHCI %s - can't drop slot or ep 0 %#x\012"
+drivers/usb/host/xhci.c:1799 [xhci_hcd]xhci_drop_endpoint =_ "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\012"
+drivers/usb/host/xhci.c:1846 [xhci_hcd]xhci_add_endpoint =_ "xHCI %s - can't add slot or ep 0 %#x\012"
+drivers/usb/host/xhci.c:1887 [xhci_hcd]xhci_add_endpoint =_ "%s - could not initialize ep %#x\012"
+drivers/usb/host/xhci.c:1923 [xhci_hcd]xhci_add_endpoint =_ "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\012"
+drivers/usb/host/xhci.c:5224 [xhci_hcd]xhci_gen_setup =_ "Resetting HCD\012"
+drivers/usb/host/xhci.c:5229 [xhci_hcd]xhci_gen_setup =_ "Reset complete\012"
+drivers/usb/host/xhci.c:5245 [xhci_hcd]xhci_gen_setup =_ "Enabling 64-bit DMA addresses.\012"
+drivers/usb/host/xhci.c:5255 [xhci_hcd]xhci_gen_setup =_ "Enabling 32-bit DMA addresses.\012"
+drivers/usb/host/xhci.c:5259 [xhci_hcd]xhci_gen_setup =_ "Calling HCD init\012"
+drivers/usb/host/xhci.c:5264 [xhci_hcd]xhci_gen_setup =_ "Called HCD init\012"
+drivers/usb/host/xhci.c:5045 [xhci_hcd]xhci_update_hub_device =_ "Could not allocate xHCI TT structure.\012"
+drivers/usb/host/xhci.c:5068 [xhci_hcd]xhci_update_hub_device =_ "xHCI version %x needs hub TT think time and number of ports\012"
+drivers/usb/host/xhci.c:5086 [xhci_hcd]xhci_update_hub_device =_ "xHCI version %x doesn't need hub TT think time or number of ports\012"
+drivers/usb/host/xhci.c:5093 [xhci_hcd]xhci_update_hub_device =_ "Set up %s for hub device.\012"
+drivers/usb/host/xhci.c:2880 [xhci_hcd]xhci_check_bandwidth =_ "%s called for udev %p\012"
+drivers/usb/host/xhci.c:3423 [xhci_hcd]xhci_alloc_streams =_ "Driver wants %u stream IDs (including stream 0).\012"
+drivers/usb/host/xhci.c:3428 [xhci_hcd]xhci_alloc_streams =_ "xHCI controller does not support streams.\012"
+drivers/usb/host/xhci.c:3330 [xhci_hcd]xhci_calculate_streams_and_bitmask =_ "Ep 0x%x only supports %u stream IDs.\012"
+drivers/usb/host/xhci.c:3300 [xhci_hcd]xhci_calculate_streams_entries =_ "xHCI HW only supports %u stream ctx entries.\012"
+drivers/usb/host/xhci.c:3479 [xhci_hcd]xhci_alloc_streams =_ "Need %u stream ctx entries for %u stream IDs.\012"
+drivers/usb/host/xhci.c:3530 [xhci_hcd]xhci_alloc_streams =_ "Slot %u ep ctx %u now has streams.\012"
+drivers/usb/host/xhci.c:1477 [xhci_hcd]xhci_urb_enqueue =_ "urb submitted during PCI suspend\012"
+drivers/usb/host/xhci.c:1481 [xhci_hcd]xhci_urb_enqueue =_ "Can't queue urb, port error, link inactive\012"
+drivers/usb/host/xhci.c:1524 [xhci_hcd]xhci_urb_enqueue =_ "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\012"
+drivers/usb/host/xhci.c:3974 [xhci_hcd]xhci_alloc_dev =_ "FIXME: allocate a command ring segment\012"
+drivers/usb/host/xhci.c:3714 [xhci_hcd]xhci_discover_or_reset_device =_ "The device to be reset with slot ID %u does not exist. Re-allocate the device\012"
+drivers/usb/host/xhci.c:3732 [xhci_hcd]xhci_discover_or_reset_device =_ "The device to be reset with slot ID %u does not match the udev. Re-allocate the device\012"
+drivers/usb/host/xhci.c:3748 [xhci_hcd]xhci_discover_or_reset_device =_ "Resetting device with slot ID %u\012"
+drivers/usb/host/xhci.c:3757 [xhci_hcd]xhci_discover_or_reset_device =_ "Couldn't allocate command structure.\012"
+drivers/usb/host/xhci.c:3766 [xhci_hcd]xhci_discover_or_reset_device =_ "FIXME: allocate a command ring segment\012"
+drivers/usb/host/xhci.c:3791 [xhci_hcd]xhci_discover_or_reset_device =_ "Can't reset device (slot ID %u) in %s state\012"
+drivers/usb/host/xhci.c:3792 [xhci_hcd]xhci_discover_or_reset_device =_ "Not freeing device rings.\012"
+drivers/usb/host/xhci.c:3797 [xhci_hcd]xhci_discover_or_reset_device =_ "Successful reset device command.\012"
+drivers/usb/host/xhci.c:4094 [xhci_hcd]xhci_setup_device =_ "Slot already in default state\012"
+drivers/usb/host/xhci-mem.c:2093 [xhci_hcd]xhci_check_trb_in_td_math =_ "TRB math tests passed.\012"
+drivers/usb/host/xhci-mem.c:815 [xhci_hcd]xhci_free_tt_info =_ "Bad real port.\012"
+drivers/usb/host/xhci-mem.c:1262 [xhci_hcd]xhci_microframes_to_exponent =_ "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\012"
+drivers/usb/host/xhci-mem.c:994 [xhci_hcd]xhci_alloc_virt_device =_ "Slot %d output ctx = 0x%llx (dma)\012"
+drivers/usb/host/xhci-mem.c:1002 [xhci_hcd]xhci_alloc_virt_device =_ "Slot %d input ctx = 0x%llx (dma)\012"
+drivers/usb/host/xhci-mem.c:1023 [xhci_hcd]xhci_alloc_virt_device =_ "Set slot id %d dcbaa entry %p to 0x%llx\012"
+drivers/usb/host/xhci-mem.c:1136 [xhci_hcd]xhci_setup_addressable_virt_dev =_ "FIXME xHCI doesn't support wireless speeds\012"
+drivers/usb/host/xhci-mem.c:1154 [xhci_hcd]xhci_setup_addressable_virt_dev =_ "Set root hub portnum to %d\012"
+drivers/usb/host/xhci-mem.c:1155 [xhci_hcd]xhci_setup_addressable_virt_dev =_ "Set fake root hub portnum to %d\012"
+drivers/usb/host/xhci-mem.c:1194 [xhci_hcd]xhci_setup_addressable_virt_dev =_ "udev->tt = %p\012"
+drivers/usb/host/xhci-mem.c:1195 [xhci_hcd]xhci_setup_addressable_virt_dev =_ "udev->ttport = 0x%x\012"
+drivers/usb/host/xhci-mem.c:626 [xhci_hcd]xhci_alloc_stream_info =_ "Allocating %u streams and %u stream context array entries.\012"
+drivers/usb/host/xhci-mem.c:628 [xhci_hcd]xhci_alloc_stream_info =_ "Command ring has no reserved TRBs available\012"
+drivers/usb/host/xhci-mem.c:686 [xhci_hcd]xhci_alloc_stream_info =_ "Setting stream %d ring ptr to 0x%08llx\012"
+drivers/usb/host/xhci-mem.c:949 [xhci_hcd]xhci_free_virt_devices_depth_first =_ "Bad vdev->real_port.\012"
+drivers/usb/host/xhci-mem.c:2196 [xhci_hcd]xhci_add_in_port =_ "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\012"
+drivers/usb/host/xhci-ring.c:2952 [xhci_hcd]prepare_ring =_ "WARN halted endpoint, queueing URB anyway.\012"
+drivers/usb/host/xhci-ring.c:3028 [xhci_hcd]prepare_transfer =_ "Can't prepare ring for bad stream ID %u\012"
+drivers/usb/host/xhci-ring.c:4017 [xhci_hcd]queue_command =_ "xHCI dying or halted, can't queue_command\012"
+drivers/usb/host/xhci-ring.c:3153 [xhci_hcd]check_interval =_ "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\012"
+drivers/usb/host/xhci-ring.c:282 [xhci_hcd]xhci_ring_cmd_db =_ "// Ding dong!\012"
+drivers/usb/host/xhci-ring.c:318 [xhci_hcd]xhci_handle_stopped_cmd_ring =_ "Turn aborted command %p to no-op\012"
+drivers/usb/host/xhci-ring.c:1950 [xhci_hcd]xhci_td_cleanup =_ "Giveback URB %p, len = %d, expected = %d, status = %d\012"
+drivers/usb/host/xhci-ring.c:979 [xhci_hcd]xhci_stop_endpoint_command_watchdog =_ "Stop EP timer raced with cmd completion, exit"
+drivers/usb/host/xhci-ring.c:1368 [xhci_hcd]xhci_handle_command_timeout =_ "Command timeout\012"
+drivers/usb/host/xhci-ring.c:345 [xhci_hcd]xhci_abort_cmd_ring =_ "Abort command ring\012"
+drivers/usb/host/xhci-ring.c:378 [xhci_hcd]xhci_abort_cmd_ring =_ "No stop event for abort, ring start fail?\012"
+drivers/usb/host/xhci-ring.c:1375 [xhci_hcd]xhci_handle_command_timeout =_ "host removed, ring start fail?\012"
+drivers/usb/host/xhci-ring.c:1382 [xhci_hcd]xhci_handle_command_timeout =_ "Command timeout on stopped ring\012"
+drivers/usb/host/xhci-ring.c:1908 [xhci_hcd]xhci_is_vendor_info_code =_ "Vendor defined info completion code %u\012"
+drivers/usb/host/xhci-ring.c:1909 [xhci_hcd]xhci_is_vendor_info_code =_ "Treating code as success.\012"
+drivers/usb/host/xhci-ring.c:3243 [xhci_hcd]xhci_align_td =_ "Unaligned %d bytes, buff len %d\012"
+drivers/usb/host/xhci-ring.c:3248 [xhci_hcd]xhci_align_td =_ "split align, new buff len %d\012"
+drivers/usb/host/xhci-ring.c:3286 [xhci_hcd]xhci_align_td =_ "Bounce align, new buff len %d\012"
+drivers/usb/host/xhci-ring.c:3751 [xhci_hcd]xhci_queue_isoc_tx =_ "Isoc URB with zero packets?\012"
+drivers/usb/host/xhci-ring.c:3690 [xhci_hcd]xhci_get_isoc_frame_id =_ "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\012"
+drivers/usb/host/xhci-ring.c:1041 [xhci_hcd]update_ring_for_set_deq_completion =_ "Unable to find new dequeue pointer\012"
+drivers/usb/host/xhci-ring.c:1297 [xhci_hcd]xhci_handle_cmd_reset_dev =_ "Completed reset device command.\012"
+drivers/usb/host/xhci-ring.c:2866 [xhci_hcd]xhci_irq =_ "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\012"
+drivers/usb/host/xhci-ring.c:2764 [xhci_hcd]xhci_handle_event =_ "xHCI host dying, returning from event handler.\012"
+drivers/usb/host/xhci-ring.c:1620 [xhci_hcd]handle_port_status =_ "ignore port event for removed USB3 hcd\012"
+drivers/usb/host/xhci-ring.c:1631 [xhci_hcd]handle_port_status =_ "Port change event, %d-%d, id %d, portsc: 0x%x\012"
+drivers/usb/host/xhci-ring.c:1636 [xhci_hcd]handle_port_status =_ "resume root hub\012"
+drivers/usb/host/xhci-ring.c:1648 [xhci_hcd]handle_port_status =_ "port resume event for port %d\012"
+drivers/usb/host/xhci-ring.c:1657 [xhci_hcd]handle_port_status =_ "remote wake SS port %d\012"
+drivers/usb/host/xhci-ring.c:1672 [xhci_hcd]handle_port_status =_ "resume HS port %d\012"
+drivers/usb/host/xhci-ring.c:1693 [xhci_hcd]handle_port_status =_ "resume SS port %d finished\012"
+drivers/usb/host/xhci-ring.c:1752 [xhci_hcd]handle_port_status =_ "%s: starting port polling.\012"
+drivers/usb/host/xhci-ring.c:2418 [xhci_hcd]handle_tx_event =_ "Stopped on Transfer TRB for slot %u ep %u\012"
+drivers/usb/host/xhci-ring.c:2423 [xhci_hcd]handle_tx_event =_ "Stopped on No-op or Link TRB for slot %u ep %u\012"
+drivers/usb/host/xhci-ring.c:2428 [xhci_hcd]handle_tx_event =_ "Stopped with short packet transfer detected for slot %u ep %u\012"
+drivers/usb/host/xhci-ring.c:2433 [xhci_hcd]handle_tx_event =_ "Stalled endpoint for slot %u ep %u\012"
+drivers/usb/host/xhci-ring.c:2440 [xhci_hcd]handle_tx_event =_ "Transfer error for slot %u ep %u on endpoint\012"
+drivers/usb/host/xhci-ring.c:2445 [xhci_hcd]handle_tx_event =_ "Babble error for slot %u ep %u on endpoint\012"
+drivers/usb/host/xhci-ring.c:2478 [xhci_hcd]handle_tx_event =_ "underrun event on endpoint\012"
+drivers/usb/host/xhci-ring.c:2483 [xhci_hcd]handle_tx_event =_ "Underrun Event for slot %d ep %d still with TDs queued?\012"
+drivers/usb/host/xhci-ring.c:2486 [xhci_hcd]handle_tx_event =_ "overrun event on endpoint\012"
+drivers/usb/host/xhci-ring.c:2491 [xhci_hcd]handle_tx_event =_ "Overrun Event for slot %d ep %d still with TDs queued?\012"
+drivers/usb/host/xhci-ring.c:2503 [xhci_hcd]handle_tx_event =_ "Miss service interval error for slot %u ep %u, set skip flag\012"
+drivers/usb/host/xhci-ring.c:2509 [xhci_hcd]handle_tx_event =_ "No Ping response error for slot %u ep %u, Skip one Isoc TD\012"
+drivers/usb/host/xhci-ring.c:2553 [xhci_hcd]handle_tx_event =_ "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\012"
+drivers/usb/host/xhci-ring.c:2571 [xhci_hcd]handle_tx_event =_ "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\012"
+drivers/usb/host/xhci-ring.c:2632 [xhci_hcd]handle_tx_event =_ "Found td. Clear skip flag for slot %u ep %u.\012"
+drivers/usb/host/xhci-ring.c:2097 [xhci_hcd]process_ctrl_td =_ "TRB error %u, halted endpoint index = %u\012"
+drivers/usb/host/xhci-ring.c:2120 [xhci_hcd]process_ctrl_td =_ "Waiting for status stage event\012"
+drivers/usb/host/xhci-ring.c:2278 [xhci_hcd]process_bulk_intr_td =_ "ep %#x - asked for %d bytes, %d bytes untransferred\012"
+drivers/usb/host/xhci-ring.c:2285 [xhci_hcd]process_bulk_intr_td =_ "ep %#x - asked for %d bytes, %d bytes untransferred\012"
+drivers/usb/host/xhci-ring.c:1547 [xhci_hcd]handle_device_notification =_ "Device Wake Notification event for slot ID %u\012"
+drivers/usb/host/xhci-ring.c:1528 [xhci_hcd]handle_vendor_event =_ "Vendor specific event TRB type = %u\012"
+drivers/usb/host/xhci-hub.c:580 [xhci_hcd]xhci_set_port_power =_ "set port power %d-%d %s, portsc: 0x%x\012"
+drivers/usb/host/xhci-hub.c:692 [xhci_hcd]xhci_set_link_state =_ "Set port %d-%d link state, portsc: 0x%x, write 0x%x"
+drivers/usb/host/xhci-hub.c:1124 [xhci_hcd]xhci_hub_control =_ "Wrong hub descriptor type for USB 3.0 roothub.\012"
+drivers/usb/host/xhci-hub.c:862 [xhci_hcd]xhci_handle_usb2_port_link_resume =_ "resume USB2 port %d-%d\012"
+drivers/usb/host/xhci-hub.c:882 [xhci_hcd]xhci_handle_usb2_port_link_resume =_ "slot_id is zero\012"
+drivers/usb/host/xhci-hub.c:1157 [xhci_hcd]xhci_hub_control =_ "Get port status %d-%d read: 0x%x, return 0x%x"
+drivers/usb/host/xhci-hub.c:1242 [xhci_hcd]xhci_hub_control =_ "Disable port %d\012"
+drivers/usb/host/xhci-hub.c:1258 [xhci_hcd]xhci_hub_control =_ "Enable port %d\012"
+drivers/usb/host/xhci-hub.c:1281 [xhci_hcd]xhci_hub_control =_ "CTC flag is 0, port already supports entering compliance mode\012"
+drivers/usb/host/xhci-hub.c:1291 [xhci_hcd]xhci_hub_control =_ "Enable compliance mode transition for port %d\012"
+drivers/usb/host/xhci-hub.c:1342 [xhci_hcd]xhci_hub_control =_ "missing U0 port change event for port %d\012"
+drivers/usb/host/xhci-hub.c:1387 [xhci_hcd]xhci_hub_control =_ "set port reset, actual port %d status  = 0x%x\012"
+drivers/usb/host/xhci-hub.c:1395 [xhci_hcd]xhci_hub_control =_ "set port remote wake mask, actual port %d status  = 0x%x\012"
+drivers/usb/host/xhci-hub.c:624 [xhci_hcd]xhci_enter_test_mode =_ "Disable all slots\012"
+drivers/usb/host/xhci-hub.c:637 [xhci_hcd]xhci_enter_test_mode =_ "Disable all port (PP = 0)\012"
+drivers/usb/host/xhci-hub.c:645 [xhci_hcd]xhci_enter_test_mode =_ "Stop controller\012"
+drivers/usb/host/xhci-hub.c:654 [xhci_hcd]xhci_enter_test_mode =_ "Enter Test Mode: %d, Port_id=%d\012"
+drivers/usb/host/xhci-hub.c:1448 [xhci_hcd]xhci_hub_control =_ "clear USB_PORT_FEAT_SUSPEND\012"
+drivers/usb/host/xhci-hub.c:1449 [xhci_hcd]xhci_hub_control =_ "PORTSC %04x\012"
+drivers/usb/host/xhci-hub.c:1473 [xhci_hcd]xhci_hub_control =_ "slot_id is zero\012"
+drivers/usb/host/xhci-hub.c:551 [xhci_hcd]xhci_clear_port_change_bit =_ "clear port%d %s change, portsc: 0x%x\012"
+drivers/usb/host/xhci-hub.c:486 [xhci_hcd]xhci_disable_port =_ "Ignoring request to disable SuperSpeed port.\012"
+drivers/usb/host/xhci-hub.c:492 [xhci_hcd]xhci_disable_port =_ "Broken Port Enabled/Disabled, ignoring port disable request.\012"
+drivers/usb/host/xhci-hub.c:500 [xhci_hcd]xhci_disable_port =_ "disable port %d-%d, portsc: 0x%x\012"
+drivers/usb/host/xhci-hub.c:1576 [xhci_hcd]xhci_hub_status_data =_ "%s: stopping port polling.\012"
+drivers/usb/host/xhci-dbg.c:31 [xhci_hcd]xhci_dbg_trace =_ "%pV\012"
+drivers/usb/gadget/udc/core.c:1135 [udc_core]usb_udc_release =_ "releasing '%s'\012"
+drivers/usb/gadget/udc/core.c:1295 [udc_core]usb_gadget_remove_driver =_ "unregistering UDC driver [%s]\012"
+drivers/usb/gadget/udc/core.c:1350 [udc_core]udc_bind_to_driver =_ "registering UDC driver [%s]\012"
+drivers/input/serio/libps2.c:433 [libps2]ps2_handle_ack =_ "unexpected %#02x\012"
+drivers/input/serio/libps2.c:43 [libps2]ps2_do_sendbyte =_ "failed to write %#02x: %d\012"
+drivers/input/serio/libps2.c:73 [libps2]ps2_do_sendbyte =_ "%02x - %d (%x), attempt %d\012"
+drivers/input/serio/libps2.c:93 [libps2]ps2_sendbyte =_ "%02x - %x\012"
+drivers/input/serio/libps2.c:317 [libps2]__ps2_command =_ "%02x [%*ph] - %x/%08lx [%*ph]\012"
+drivers/input/serio/libps2.c:366 [libps2]ps2_sliced_command =_ "%02x - %d\012"
+drivers/input/input.c:1855 [input_core]devm_input_device_release =_ "%s: dropping reference to %s\012"
+drivers/input/input.c:2239 [input_core]input_register_device =_ "%s: registering %s with devres.\012"
+drivers/input/input.c:2112 [input_core]devm_input_device_unregister =_ "%s: unregistering device %s\012"
+drivers/input/ff-core.c:105 [input_core]input_ff_upload =_ "invalid or not supported effect type in upload\012"
+drivers/input/ff-core.c:113 [input_core]input_ff_upload =_ "invalid or not supported wave form in upload\012"
+drivers/input/ff-core.c:241 [input_core]input_ff_flush =_ "flushing now\012"
+drivers/input/keyboard/atkbd.c:378 [atkbd]atkbd_interrupt =_ "Received %02x flags %02x\012"
+drivers/input/keyboard/atkbd.c:446 [atkbd]atkbd_interrupt =_ "Keyboard on %s reports too many keys pressed.\012"
+drivers/input/keyboard/atkbd.c:1225 [atkbd]atkbd_reconnect =_ "reconnect request, but serio is disconnected, ignoring...\012"
+drivers/rtc/class.c:350 [rtc_core]__rtc_register_device =_ "no ops set\012"
+drivers/rtc/class.c:370 [rtc_core]__rtc_register_device =_ "char device (%d:%d)\012"
+drivers/rtc/interface.c:97 [rtc_core]__rtc_read_time =_ "read_time: fail to read: %d\012"
+drivers/rtc/interface.c:105 [rtc_core]__rtc_read_time =_ "read_time: rtc_time isn't valid\012"
+drivers/rtc/interface.c:339 [rtc_core]__rtc_read_alarm =_ "alarm rollover: %s\012"
+drivers/rtc/interface.c:350 [rtc_core]__rtc_read_alarm =_ "alarm rollover: %s\012"
+drivers/rtc/interface.c:365 [rtc_core]__rtc_read_alarm =_ "alarm rollover: %s\012"
+drivers/rtc/dev.c:450 [rtc_core]rtc_dev_prepare =_ "too many RTC devices\012"
+drivers/i2c/i2c-boardinfo.c:67 [i2c_boardinfo]i2c_register_board_info =_ "i2c-core: can't register boardinfo!\012"
+drivers/i2c/i2c-core-base.c:249 [i2c_core]i2c_recover_bus =_ "Trying i2c bus recovery\012"
+drivers/i2c/i2c-core-base.c:425 [i2c_core]i2c_device_remove =_ "remove\012"
+drivers/i2c/i2c-core-base.c:330 [i2c_core]i2c_device_probe =_ "Using Host Notify IRQ\012"
+drivers/i2c/i2c-core-base.c:379 [i2c_core]i2c_device_probe =_ "probe\012"
+drivers/i2c/i2c-core-base.c:1504 [i2c_core]i2c_do_del_adapter =_ "Removing %s at 0x%x\012"
+drivers/i2c/i2c-core-base.c:1550 [i2c_core]i2c_del_adapter =_ "attempting to delete unregistered adapter [%s]\012"
+drivers/i2c/i2c-core-base.c:1587 [i2c_core]i2c_del_adapter =_ "adapter [%s] unregistered\012"
+drivers/i2c/i2c-core-base.c:1567 [i2c_core]i2c_del_adapter =_ "Removing %s at 0x%x\012"
+drivers/i2c/i2c-core-base.c:1715 [i2c_core]i2c_register_driver =_ "driver [%s] registered\012"
+drivers/i2c/i2c-core-base.c:1741 [i2c_core]i2c_del_driver =_ "driver [%s] unregistered\012"
+drivers/i2c/i2c-core-base.c:2028 [i2c_core]i2c_transfer =_ "I2C level transfers not supported\012"
+drivers/i2c/i2c-core-base.c:2369 [i2c_core]i2c_get_dma_safe_msg_buf =_ "DMA buffer for addr=0x%02x with length 0 is bogus\012"
+drivers/i2c/i2c-core-base.c:2377 [i2c_core]i2c_get_dma_safe_msg_buf =_ "using bounce buffer for addr=0x%02x, len=%d\012"
+drivers/i2c/i2c-core-base.c:789 [i2c_core]i2c_new_client_device =_ "client [%s] registered with bus id %s\012"
+drivers/i2c/i2c-core-base.c:1360 [i2c_core]i2c_register_adapter =_ "adapter [%s] registered\012"
+drivers/i2c/i2c-core-base.c:2303 [i2c_core]i2c_new_probed_device =_ "Address 0x%02x already in use, not probing\012"
+drivers/i2c/i2c-core-base.c:2313 [i2c_core]i2c_new_probed_device =_ "Probing failed, no device found\012"
+drivers/i2c/i2c-core-base.c:1009 [i2c_core]i2c_new_ancillary_device =_ "Address for %s : 0x%x\012"
+drivers/i2c/i2c-core-base.c:2245 [i2c_core]i2c_detect =_ "This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. If you need it, check 'Documentation/i2c/instantiating-devices.rst' for alternatives.\012"
+drivers/i2c/i2c-core-base.c:2262 [i2c_core]i2c_detect =_ "found normal entry for adapter %d, addr 0x%02x\012"
+drivers/i2c/i2c-core-base.c:2218 [i2c_core]i2c_detect_address =_ "Creating %s at 0x%02x\012"
+drivers/i2c/i2c-core-smbus.c:80 [i2c_core]i2c_smbus_check_pec =_ "Bad PEC 0x%02x vs. 0x%02x\012"
+drivers/i2c/i2c-core-of.c:94 [i2c_core]of_i2c_register_devices =_ "of_i2c: walking child nodes\012"
+drivers/i2c/i2c-core-of.c:71 [i2c_core]of_i2c_register_device =_ "of_i2c: register %pOF\012"
+drivers/i2c/i2c-dev.c:327 [i2c_dev]i2cdev_ioctl_smbus =_ "size out of range (%x) in ioctl I2C_SMBUS.\012"
+drivers/i2c/i2c-dev.c:336 [i2c_dev]i2cdev_ioctl_smbus =_ "read_write out of range (%x) in ioctl I2C_SMBUS.\012"
+drivers/i2c/i2c-dev.c:352 [i2c_dev]i2cdev_ioctl_smbus =_ "data is NULL pointer in ioctl I2C_SMBUS.\012"
+drivers/i2c/i2c-dev.c:171 [i2c_dev]i2cdev_write =_ "i2c-dev: i2c-%d writing %zu bytes.\012"
+drivers/i2c/i2c-dev.c:396 [i2c_dev]i2cdev_ioctl =_ "ioctl, cmd=0x%02x, arg=0x%02lx\012"
+drivers/i2c/i2c-dev.c:687 [i2c_dev]i2cdev_detach_adapter =_ "i2c-dev: adapter [%s] unregistered\012"
+drivers/i2c/i2c-dev.c:661 [i2c_dev]i2cdev_attach_adapter =_ "i2c-dev: adapter [%s] registered as minor %d\012"
+drivers/i2c/i2c-dev.c:147 [i2c_dev]i2cdev_read =_ "i2c-dev: i2c-%d reading %zu bytes.\012"
+drivers/i2c/busses/i2c-designware-common.c:203 [i2c_designware_core]i2c_dw_set_sda_hold =_ "SDA Hold Time TX:RX = %d:%d\012"
+drivers/i2c/busses/i2c-designware-common.c:332 [i2c_designware_core]i2c_dw_handle_tx_abort =_ "%s: %s\012"
+drivers/i2c/busses/i2c-designware-master.c:630 [i2c_designware_core]i2c_dw_isr =_ "enabled=%#x stat=%#x\012"
+drivers/i2c/busses/i2c-designware-master.c:74 [i2c_designware_core]i2c_dw_set_timings_master =_ "Standard Mode HCNT:LCNT = %d:%d\012"
+drivers/i2c/busses/i2c-designware-master.c:111 [i2c_designware_core]i2c_dw_set_timings_master =_ "Fast Mode%s HCNT:LCNT = %d:%d\012"
+drivers/i2c/busses/i2c-designware-master.c:125 [i2c_designware_core]i2c_dw_set_timings_master =_ "High Speed Mode HCNT:LCNT = %d:%d\012"
+drivers/i2c/busses/i2c-designware-master.c:143 [i2c_designware_core]i2c_dw_set_timings_master =_ "Bus speed: %s%s\012"
+drivers/i2c/busses/i2c-designware-master.c:429 [i2c_designware_core]i2c_dw_xfer =_ "%s: msgs: %d\012"
+drivers/media/mc/mc-device.c:716 [mc]media_device_init =_ "Media device initialized\012"
+drivers/media/mc/mc-device.c:575 [mc]media_device_release =_ "Media device released\012"
+drivers/media/mc/mc-device.c:842 [mc]media_device_unregister =_ "Media device unregistered\012"
+drivers/media/mc/mc-device.c:765 [mc]__media_device_register =_ "Media device registered\012"
+drivers/media/mc/mc-devnode.c:193 [mc]media_release =_ "%s: Media Release\012"
+drivers/media/mc/mc-devnode.c:63 [mc]media_devnode_release =_ "%s: Media Devnode Deallocated\012"
+drivers/media/mc/mc-entity.c:102 [mc]dev_dbg_obj =_ "%s id %u: entity '%s'\012"
+drivers/media/mc/mc-entity.c:114 [mc]dev_dbg_obj =_ "%s id %u: %s link id %u ==> id %u\012"
+drivers/media/mc/mc-entity.c:126 [mc]dev_dbg_obj =_ "%s id %u: %s%spad '%s':%d\012"
+drivers/media/mc/mc-entity.c:138 [mc]dev_dbg_obj =_ "%s id %u: intf_devnode %s - major: %d, minor: %d\012"
+drivers/media/mc/mc-entity.c:304 [mc]media_graph_walk_start =_ "begin graph walk at '%s'\012"
+drivers/media/mc/mc-entity.c:322 [mc]media_graph_walk_iter =_ "walk: skipping disabled link '%s':%u -> '%s':%u\012"
+drivers/media/mc/mc-entity.c:334 [mc]media_graph_walk_iter =_ "walk: skipping entity '%s' (already seen)\012"
+drivers/media/mc/mc-entity.c:342 [mc]media_graph_walk_iter =_ "walk: pushing '%s' on stack\012"
+drivers/media/mc/mc-entity.c:362 [mc]media_graph_walk_next =_ "walk: returning entity '%s'\012"
+drivers/media/mc/mc-entity.c:480 [mc]__media_pipeline_start =_ "link validation failed for '%s':%u -> '%s':%u, error %d\012"
+drivers/media/mc/mc-entity.c:494 [mc]__media_pipeline_start =_ "'%s':%u must be connected by an enabled link\012"
+drivers/media/mc/mc-request.c:66 [mc]media_request_release =_ "request: release %s\012"
+drivers/media/mc/mc-request.c:126 [mc]media_request_ioctl_queue =_ "request: queue %s\012"
+drivers/media/mc/mc-request.c:146 [mc]media_request_ioctl_queue =_ "request: unable to queue %s, request in state %s\012"
+drivers/media/mc/mc-request.c:181 [mc]media_request_ioctl_queue =_ "request: can't queue %s (%d)\012"
+drivers/media/mc/mc-request.c:198 [mc]media_request_ioctl_reinit =_ "request: %s not in idle or complete state, cannot reinit\012"
+drivers/media/mc/mc-request.c:205 [mc]media_request_ioctl_reinit =_ "request: %s is being accessed, cannot reinit\012"
+drivers/media/mc/mc-request.c:283 [mc]media_request_get_by_fd =_ "cannot find request_fd %d\012"
+drivers/media/mc/mc-request.c:333 [mc]media_request_alloc =_ "request: allocated %s\012"
+drivers/media/mc/mc-dev-allocator.c:130 [mc]media_device_delete =_ "%s: module %s put owner module reference\012"
+drivers/media/mc/mc-dev-allocator.c:47 [mc]media_device_instance_release =_ "%s: releasing Media Device\012"
+drivers/media/mc/mc-dev-allocator.c:80 [mc]__media_device_get =_ "%s: module %s got owner reference\012"
+drivers/media/mc/mc-dev-allocator.c:93 [mc]__media_device_get =_ "%s: Allocated media device for owner %s\012"
+drivers/media/v4l2-core/v4l2-async.c:409 [videodev]v4l2_async_notifier_asd_valid =_ "subdev descriptor already listed in this or other notifiers\012"
+drivers/media/dvb-core/dvb_frontend.c:2907 [dvb_core]dvb_frontend_suspend =_ "%s: adap=%d fe=%d\012"
+drivers/media/dvb-core/dvb_frontend.c:2927 [dvb_core]dvb_frontend_resume =_ "%s: adap=%d fe=%d\012"
+drivers/media/dvb-core/dvb_frontend.c:356 [dvb_core]dvb_frontend_swzigzag_update_delay =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:455 [dvb_core]dvb_frontend_swzigzag_autotune =_ "%s: drift:%i inversion:%i auto_step:%i auto_sub_step:%i started_auto_step:%i\012"
+drivers/media/dvb-core/dvb_frontend.c:1606 [dvb_core]emulate_delivery_system =_ "%s: Using defaults for SYS_ISDBT\012"
+drivers/media/dvb-core/dvb_frontend.c:1625 [dvb_core]emulate_delivery_system =_ "%s: change delivery system on cache to %d\012"
+drivers/media/dvb-core/dvb_frontend.c:2858 [dvb_core]dvb_frontend_release =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:912 [dvb_core]dvb_frontend_get_frequency_limits =_ "frequency interval: tuner: %u...%u, frontend: %u...%u"
+drivers/media/dvb-core/dvb_frontend.c:805 [dvb_core]dvb_frontend_stop =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:2732 [dvb_core]dvb_frontend_open =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:854 [dvb_core]dvb_frontend_start =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:1005 [dvb_core]dvb_frontend_clear_cache =_ "%s: Clearing cache for delivery system %d\012"
+drivers/media/dvb-core/dvb_frontend.c:2714 [dvb_core]dvb_frontend_poll =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:3017 [dvb_core]dvb_unregister_frontend =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:329 [dvb_core]dvb_frontend_init =_ "%s: initialising adapter %i frontend %i (%s)...\012"
+drivers/media/dvb-core/dvb_frontend.c:1256 [dvb_core]dtv_property_legacy_params_sync =_ "%s: Preparing QPSK req\012"
+drivers/media/dvb-core/dvb_frontend.c:1261 [dvb_core]dtv_property_legacy_params_sync =_ "%s: Preparing QAM req\012"
+drivers/media/dvb-core/dvb_frontend.c:1267 [dvb_core]dtv_property_legacy_params_sync =_ "%s: Preparing OFDM req\012"
+drivers/media/dvb-core/dvb_frontend.c:1299 [dvb_core]dtv_property_legacy_params_sync =_ "%s: Preparing VSB req\012"
+drivers/media/dvb-core/dvb_frontend.c:245 [dvb_core]dvb_frontend_add_event =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:655 [dvb_core]dvb_frontend_thread =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:704 [dvb_core]dvb_frontend_thread =_ "%s: Frontend ALGO = DVBFE_ALGO_HW\012"
+drivers/media/dvb-core/dvb_frontend.c:707 [dvb_core]dvb_frontend_thread =_ "%s: Retune requested, FESTATE_RETUNE\012"
+drivers/media/dvb-core/dvb_frontend.c:718 [dvb_core]dvb_frontend_thread =_ "%s: state changed, adding current state\012"
+drivers/media/dvb-core/dvb_frontend.c:724 [dvb_core]dvb_frontend_thread =_ "%s: Frontend ALGO = DVBFE_ALGO_SW\012"
+drivers/media/dvb-core/dvb_frontend.c:728 [dvb_core]dvb_frontend_thread =_ "%s: Frontend ALGO = DVBFE_ALGO_CUSTOM, state=%d\012"
+drivers/media/dvb-core/dvb_frontend.c:730 [dvb_core]dvb_frontend_thread =_ "%s: Retune requested, FESTAT_RETUNE\012"
+drivers/media/dvb-core/dvb_frontend.c:766 [dvb_core]dvb_frontend_thread =_ "%s: UNDEFINED ALGO !\012"
+drivers/media/dvb-core/dvb_frontend.c:2328 [dvb_core]dvb_get_property =_ "%s: properties.num = %d\012"
+drivers/media/dvb-core/dvb_frontend.c:2330 [dvb_core]dvb_get_property =_ "%s: properties.props = %p\012"
+drivers/media/dvb-core/dvb_frontend.c:1555 [dvb_core]dtv_property_process_get =_ "%s: FE property %d doesn't exist\012"
+drivers/media/dvb-core/dvb_frontend.c:1563 [dvb_core]dtv_property_process_get =_ "%s: GET cmd 0x%08x (%s) = 0x%08x\012"
+drivers/media/dvb-core/dvb_frontend.c:1570 [dvb_core]dtv_property_process_get =_ "%s: GET cmd 0x%08x (%s) len %d: %*ph\012"
+drivers/media/dvb-core/dvb_frontend.c:287 [dvb_core]dvb_frontend_get_event =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:2398 [dvb_core]dvb_frontend_handle_ioctl =_ "%s:\012"
+drivers/media/dvb-core/dvb_frontend.c:2406 [dvb_core]dvb_frontend_handle_ioctl =_ "%s: properties.num = %d\012"
+drivers/media/dvb-core/dvb_frontend.c:2408 [dvb_core]dvb_frontend_handle_ioctl =_ "%s: properties.props = %p\012"
+drivers/media/dvb-core/dvb_frontend.c:1821 [dvb_core]dtv_property_process_set =_ "%s: SET cmd 0x%08x (%s) to 0x%08x\012"
+drivers/media/dvb-core/dvb_frontend.c:1837 [dvb_core]dtv_property_process_set =_ "%s: Setting the frontend from property cache\012"
+drivers/media/dvb-core/dvb_frontend.c:1675 [dvb_core]dvbv5_set_delivery_system =_ "%s: Changing delivery system to %d\012"
+drivers/media/dvb-core/dvb_frontend.c:1691 [dvb_core]dvbv5_set_delivery_system =_ "%s: Delivery system %d not supported.\012"
+drivers/media/dvb-core/dvb_frontend.c:1712 [dvb_core]dvbv5_set_delivery_system =_ "%s: Delivery system %d not supported on emulation mode.\012"
+drivers/media/dvb-core/dvb_frontend.c:1718 [dvb_core]dvbv5_set_delivery_system =_ "%s: Using delivery system %d emulated as if it were %d\012"
+drivers/media/dvb-core/dvb_frontend.c:2483 [dvb_core]dvb_frontend_handle_ioctl =_ "%s: current delivery system on cache: %d, V3 type: %d\012"
+drivers/media/dvb-core/dvb_frontend.c:1768 [dvb_core]dvbv3_set_delivery_system =_ "%s: Using delivery system to %d\012"
+drivers/media/dvb-core/dvb_frontend.c:1787 [dvb_core]dvbv3_set_delivery_system =_ "%s: Couldn't find a delivery system that works with FE_SET_FRONTEND\012"
+drivers/media/dvb-core/dvb_frontend.c:1175 [dvb_core]dtv_property_cache_sync =_ "%s: Preparing QPSK req\012"
+drivers/media/dvb-core/dvb_frontend.c:1180 [dvb_core]dtv_property_cache_sync =_ "%s: Preparing QAM req\012"
+drivers/media/dvb-core/dvb_frontend.c:1186 [dvb_core]dtv_property_cache_sync =_ "%s: Preparing OFDM req\012"
+drivers/media/dvb-core/dvb_frontend.c:1219 [dvb_core]dtv_property_cache_sync =_ "%s: Preparing ATSC req\012"
+drivers/media/dvb-core/dvb_frontend.c:1995 [dvb_core]dvb_frontend_do_ioctl =_ "%s: (%d)\012"
+drivers/media/dvb-core/dvb_frontend.c:2965 [dvb_core]dvb_register_frontend =_ "%s:\012"
+drivers/media/dvb-core/dvb_net.c:1158 [dvb_core]dvb_net_feed_stop =_ "stop secfeed\012"
+drivers/media/dvb-core/dvb_net.c:1163 [dvb_core]dvb_net_feed_stop =_ "release secfilter\012"
+drivers/media/dvb-core/dvb_net.c:1172 [dvb_core]dvb_net_feed_stop =_ "release multi_filter[%d]\012"
+drivers/media/dvb-core/dvb_net.c:1186 [dvb_core]dvb_net_feed_stop =_ "stop tsfeed\012"
+drivers/media/dvb-core/dvb_net.c:1044 [dvb_core]dvb_net_filter_sec_set =_ "filter mac=%pM mask=%pM\012"
+drivers/media/dvb-core/dvb_net.c:1056 [dvb_core]dvb_net_feed_start =_ "rx_mode %i\012"
+drivers/media/dvb-core/dvb_net.c:1066 [dvb_core]dvb_net_feed_start =_ "alloc secfeed\012"
+drivers/media/dvb-core/dvb_net.c:1085 [dvb_core]dvb_net_feed_start =_ "set secfilter\012"
+drivers/media/dvb-core/dvb_net.c:1092 [dvb_core]dvb_net_feed_start =_ "set multi_secfilter[%d]\012"
+drivers/media/dvb-core/dvb_net.c:1099 [dvb_core]dvb_net_feed_start =_ "set multi_secfilter[0]\012"
+drivers/media/dvb-core/dvb_net.c:1105 [dvb_core]dvb_net_feed_start =_ "set secfilter\012"
+drivers/media/dvb-core/dvb_net.c:1110 [dvb_core]dvb_net_feed_start =_ "start filtering\012"
+drivers/media/dvb-core/dvb_net.c:1116 [dvb_core]dvb_net_feed_start =_ "alloc tsfeed\012"
+drivers/media/dvb-core/dvb_net.c:1139 [dvb_core]dvb_net_feed_start =_ "start filtering\012"
+drivers/media/dvb-core/dvb_net.c:1226 [dvb_core]wq_set_multicast_list =_ "promiscuous mode\012"
+drivers/media/dvb-core/dvb_net.c:1229 [dvb_core]wq_set_multicast_list =_ "allmulti mode\012"
+drivers/media/dvb-core/dvb_net.c:1235 [dvb_core]wq_set_multicast_list =_ "set_mc_list, %d entries\012"
+drivers/media/dvb-core/dvb_net.c:693 [dvb_core]dvb_net_ule_check_crc =_ "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\012"
+drivers/media/dvb-core/dvb_net.c:277 [dvb_core]handle_ule_extensions =_ "ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\012"
+drivers/media/common/videobuf2/videobuf2-memops.c:113 [videobuf2_memops]vb2_common_vm_close =_ "%s: %p, refcount: %d, vma: %08lx-%08lx\012"
+drivers/media/common/videobuf2/videobuf2-memops.c:95 [videobuf2_memops]vb2_common_vm_open =_ "%s: %p, refcount: %d, vma: %08lx-%08lx\012"
+drivers/media/common/videobuf2/videobuf2-dma-contig.c:205 [videobuf2_dma_contig]vb2_dc_mmap =_ "%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\012"
+drivers/media/common/videobuf2/videobuf2-dma-contig.c:472 [videobuf2_dma_contig]vb2_dc_get_userptr =_ "size is zero\012"
+drivers/media/platform/aspeed-video.c:427 [aspeed_video]aspeed_video_write =_ "write %03x[%08x]\012"
+drivers/media/platform/aspeed-video.c:419 [aspeed_video]aspeed_video_read =_ "read %03x[%08x]\012"
+drivers/media/platform/aspeed-video.c:412 [aspeed_video]aspeed_video_update =_ "update %03x[%08x -> %08x]\012"
+drivers/media/platform/aspeed-video.c:726 [aspeed_video]aspeed_video_calc_compressed_size =_ "Max compressed size: %x\012"
+drivers/media/platform/aspeed-video.c:527 [aspeed_video]aspeed_video_irq_res_change =_ "Resolution changed; resetting\012"
+drivers/media/platform/aspeed-video.c:762 [aspeed_video]aspeed_video_get_resolution =_ "Timed out; first mode detect\012"
+drivers/media/platform/aspeed-video.c:780 [aspeed_video]aspeed_video_get_resolution =_ "Timed out; second mode detect\012"
+drivers/media/platform/aspeed-video.c:814 [aspeed_video]aspeed_video_get_resolution =_ "Invalid resolution detected\012"
+drivers/media/platform/aspeed-video.c:832 [aspeed_video]aspeed_video_get_resolution =_ "Got resolution: %dx%d\012"
+drivers/media/platform/aspeed-video.c:1457 [aspeed_video]aspeed_video_stop_streaming =_ "Timed out when stopping streaming\012"
+drivers/media/platform/aspeed-video.c:438 [aspeed_video]aspeed_video_start_frame =_ "No signal; don't start frame\012"
+drivers/media/platform/aspeed-video.c:444 [aspeed_video]aspeed_video_start_frame =_ "Engine busy; don't start frame\012"
+drivers/media/platform/aspeed-video.c:453 [aspeed_video]aspeed_video_start_frame =_ "No buffers; don't start frame\012"
+drivers/power/supply/power_supply_core.c:118 [power_supply]power_supply_changed =_ "%s\012"
+drivers/power/supply/power_supply_core.c:335 [power_supply]power_supply_am_i_supplied =_ "%s count %u err %d\012"
+drivers/power/supply/power_supply_core.c:827 [power_supply]power_supply_dev_release =_ "%s\012"
+drivers/power/supply/power_supply_core.c:82 [power_supply]power_supply_changed_work =_ "%s\012"
+drivers/power/supply/power_supply_core.c:254 [power_supply]power_supply_check_supplies =_ "Failed to find supply!\012"
+drivers/power/supply/power_supply_core.c:192 [power_supply]power_supply_populate_supplied_from =_ "%s %d\012"
+drivers/power/supply/power_supply_sysfs.c:131 [power_supply]power_supply_show_property =_ "driver has no data for `%s' property\012"
+drivers/power/supply/power_supply_sysfs.c:390 [power_supply]power_supply_uevent =_ "No power supply yet\012"
+drivers/hwmon/hwmon.c:778 [hwmon]hwmon_device_unregister =_ "hwmon_device_unregister() failed: bad class ID!\012"
+drivers/mmc/core/core.c:270 [mmc_core]mmc_mrq_pr_debug =_ "<%s: starting CMD%u arg %08x flags %08x>\012"
+drivers/mmc/core/core.c:276 [mmc_core]mmc_mrq_pr_debug =_ "%s: starting %sCMD%u arg %08x flags %08x\012"
+drivers/mmc/core/core.c:279 [mmc_core]mmc_mrq_pr_debug =_ "%s: starting CQE transfer for tag %d blkaddr %u\012"
+drivers/mmc/core/core.c:288 [mmc_core]mmc_mrq_pr_debug =_ "%s:     blksz %d blocks %d flags %08x tsac %d ms nsac %d\012"
+drivers/mmc/core/core.c:294 [mmc_core]mmc_mrq_pr_debug =_ "%s:     CMD%u arg %08x flags %08x\012"
+drivers/mmc/core/core.c:511 [mmc_core]mmc_cqe_request_done =_ "%s: CQE req done (direct CMD%u): %d\012"
+drivers/mmc/core/core.c:514 [mmc_core]mmc_cqe_request_done =_ "%s: CQE transfer done tag %d\012"
+drivers/mmc/core/core.c:520 [mmc_core]mmc_cqe_request_done =_ "%s:     %d bytes transferred: %d\012"
+drivers/mmc/core/core.c:481 [mmc_core]mmc_cqe_start_req =_ "%s: failed to start CQE direct CMD%u, error %d\012"
+drivers/mmc/core/core.c:484 [mmc_core]mmc_cqe_start_req =_ "%s: failed to start CQE transfer for tag %d, error %d\012"
+drivers/mmc/core/core.c:184 [mmc_core]mmc_request_done =_ "%s: req done <CMD%u>: %d: %08x %08x %08x %08x\012"
+drivers/mmc/core/core.c:190 [mmc_core]mmc_request_done =_ "%s: req done (CMD%u): %d: %08x %08x %08x %08x\012"
+drivers/mmc/core/core.c:195 [mmc_core]mmc_request_done =_ "%s:     %d bytes transferred: %d\012"
+drivers/mmc/core/core.c:203 [mmc_core]mmc_request_done =_ "%s:     (CMD%u): %d: %08x %08x %08x %08x\012"
+drivers/mmc/core/core.c:2230 [mmc_core]_mmc_detect_card_removed =_ "%s: card removed too slowly\012"
+drivers/mmc/core/core.c:2235 [mmc_core]_mmc_detect_card_removed =_ "%s: card remove detected\012"
+drivers/mmc/core/core.c:2083 [mmc_core]mmc_calc_max_discard =_ "%s: calculated max. discard sectors %u for timeout %u ms\012"
+drivers/mmc/core/core.c:430 [mmc_core]mmc_wait_for_req_done =_ "%s: req failed (CMD%u): %d, retrying...\012"
+drivers/mmc/core/core.c:126 [mmc_core]mmc_command_done =_ "%s: cmd done, tfr ongoing (CMD%u)\012"
+drivers/mmc/core/core.c:908 [mmc_core]mmc_set_ios =_ "%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u timing %u\012"
+drivers/mmc/core/core.c:1173 [mmc_core]mmc_set_initial_signal_voltage =_ "Initial signal voltage of 3.3v\012"
+drivers/mmc/core/core.c:1175 [mmc_core]mmc_set_initial_signal_voltage =_ "Initial signal voltage of 1.8v\012"
+drivers/mmc/core/core.c:1177 [mmc_core]mmc_set_initial_signal_voltage =_ "Initial signal voltage of 1.2v\012"
+drivers/mmc/core/core.c:1261 [mmc_core]mmc_set_uhs_voltage =_ "%s: Signal voltage switch failed, power cycling card\012"
+drivers/mmc/core/core.c:2171 [mmc_core]mmc_rescan_try_freq =_ "%s: %s: trying to init card at %u Hz\012"
+drivers/mmc/core/host.c:186 [mmc_core]mmc_of_parse =_ "\042bus-width\042 property is missing, assuming 1 bit.\012"
+drivers/mmc/core/host.c:361 [mmc_core]mmc_of_parse_voltage =_ "%pOF: voltage-ranges unspecified\012"
+drivers/mmc/core/mmc.c:1595 [mmc_core]mmc_init_card =_ "%s: Perhaps the card was replaced\012"
+drivers/mmc/core/quirks.h:169 [mmc_core]mmc_fixup_device =_ "calling %ps\012"
+drivers/mmc/core/mmc.c:536 [mmc_core]mmc_decode_ext_csd =_ "%s: MAN_BKOPS_EN bit is set\012"
+drivers/mmc/core/mmc.c:542 [mmc_core]mmc_decode_ext_csd =_ "%s: AUTO_BKOPS_EN bit is set\012"
+drivers/mmc/core/mmc.c:649 [mmc_core]mmc_decode_ext_csd =_ "%s: Command Queue supported depth %u\012"
+drivers/mmc/core/mmc_ops.c:870 [mmc_core]mmc_interrupt_hpi =_ "%s: HPI cannot be sent. Card state=%d\012"
+drivers/mmc/core/sd.c:961 [mmc_core]mmc_sd_init_card =_ "%s: Perhaps the card was replaced\012"
+drivers/mmc/core/sdio.c:615 [mmc_core]mmc_sdio_init_card =_ "%s: Perhaps the card was replaced\012"
+drivers/mmc/core/sdio.c:624 [mmc_core]mmc_sdio_init_card =_ "%s: Perhaps the card was replaced\012"
+drivers/mmc/core/sdio.c:739 [mmc_core]mmc_sdio_init_card =_ "%s: Perhaps the card was replaced\012"
+drivers/mmc/core/quirks.h:169 [mmc_core]mmc_fixup_device =_ "calling %ps\012"
+drivers/mmc/core/sdio_io.c:118 [mmc_core]sdio_disable_func =_ "SDIO: Disabling device %s...\012"
+drivers/mmc/core/sdio_io.c:130 [mmc_core]sdio_disable_func =_ "SDIO: Disabled device %s\012"
+drivers/mmc/core/sdio_io.c:135 [mmc_core]sdio_disable_func =_ "SDIO: Failed to disable device %s\012"
+drivers/mmc/core/sdio_io.c:68 [mmc_core]sdio_enable_func =_ "SDIO: Enabling device %s...\012"
+drivers/mmc/core/sdio_io.c:93 [mmc_core]sdio_enable_func =_ "SDIO: Enabled device %s\012"
+drivers/mmc/core/sdio_io.c:98 [mmc_core]sdio_enable_func =_ "SDIO: Failed to enable device %s\012"
+drivers/mmc/core/sdio_irq.c:40 [mmc_core]sdio_get_pending_irqs =_ "%s: error %d reading SDIO_CCCR_INTx\012"
+drivers/mmc/core/sdio_irq.c:159 [mmc_core]sdio_irq_thread =_ "%s: IRQ thread started (poll period = %lu jiffies)\012"
+drivers/mmc/core/sdio_irq.c:220 [mmc_core]sdio_irq_thread =_ "%s: IRQ thread exiting with code %d\012"
+drivers/mmc/core/sdio_irq.c:307 [mmc_core]sdio_claim_irq =_ "SDIO: Enabling IRQ for %s...\012"
+drivers/mmc/core/sdio_irq.c:310 [mmc_core]sdio_claim_irq =_ "SDIO: IRQ for %s already in use.\012"
+drivers/mmc/core/sdio_irq.c:350 [mmc_core]sdio_release_irq =_ "SDIO: Disabling IRQ for %s...\012"
+drivers/mmc/core/regulator.c:243 [mmc_core]mmc_regulator_get_supply =_ "No vmmc regulator found\012"
+drivers/mmc/core/regulator.c:255 [mmc_core]mmc_regulator_get_supply =_ "No vqmmc regulator found\012"
+drivers/mmc/core/quirks.h:169 [mmc_block]mmc_fixup_device =_ "calling %ps\012"
+drivers/mmc/core/block.c:423 [mmc_block]ioctl_do_sanitize =_ "%s: %s - SANITIZE IN PROGRESS...\012"
+drivers/mmc/core/block.c:434 [mmc_block]ioctl_do_sanitize =_ "%s: %s - SANITIZE COMPLETED\012"
+drivers/mmc/core/block.c:1470 [mmc_block]mmc_blk_cqe_recovery =_ "%s: CQE recovery start\012"
+drivers/mmc/core/block.c:1478 [mmc_block]mmc_blk_cqe_recovery =_ "%s: CQE recovery done\012"
+drivers/mmc/host/sdhci.c:3469 [sdhci]sdhci_cqe_disable =_ "%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\012"
+drivers/mmc/host/sdhci.c:3749 [sdhci]sdhci_setup_host =_ "%s: sdhci: Version:   0x%08x | Present:  0x%08x\012"
+drivers/mmc/host/sdhci.c:3752 [sdhci]sdhci_setup_host =_ "%s: sdhci: Caps:      0x%08x | Caps_1:   0x%08x\012"
+drivers/mmc/host/sdhci.c:3769 [sdhci]sdhci_setup_host =_ "%s: sdhci: Controller doesn't have SDMA capability\012"
+drivers/mmc/host/sdhci.c:3775 [sdhci]sdhci_setup_host =_ "%s: sdhci: Disabling DMA as it is marked broken\012"
+drivers/mmc/host/sdhci.c:3785 [sdhci]sdhci_setup_host =_ "%s: sdhci: Disabling ADMA as it is marked broken\012"
+drivers/mmc/host/sdhci.c:3969 [sdhci]sdhci_setup_host =_ "%s: sdhci: Auto-CMD23 available\012"
+drivers/mmc/host/sdhci.c:3971 [sdhci]sdhci_setup_host =_ "%s: sdhci: Auto-CMD23 unavailable\012"
+drivers/mmc/host/sdhci.c:956 [sdhci]sdhci_calc_timeout =_ "%s: sdhci: Too large timeout 0x%x requested for CMD%d!\012"
+drivers/mmc/host/sdhci.c:3445 [sdhci]sdhci_cqe_enable =_ "%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\012"
+drivers/mmc/host/sdhci.c:1085 [sdhci]sdhci_prepare_data =_ "%s: sdhci: Reverting to PIO because of bad alignment\012"
+drivers/mmc/host/sdhci.c:1080 [sdhci]sdhci_prepare_data =_ "%s: sdhci: Reverting to PIO because of transfer size (%d)\012"
+drivers/mmc/host/sdhci.c:2414 [sdhci]__sdhci_execute_tuning =_ "%s: Tuning timeout, falling back to fixed sampling clock\012"
+drivers/mmc/host/sdhci.c:3065 [sdhci]sdhci_irq =_ "%s: sdhci: IRQ status 0x%08x\012"
+drivers/mmc/host/sdhci.c:1485 [sdhci]sdhci_finish_command =_ "%s: sdhci: Cannot wait for busy signal when also doing a data transfer"
+drivers/mmc/host/sdhci.c:480 [sdhci]sdhci_read_block_pio =_ "%s: sdhci: PIO reading\012"
+drivers/mmc/host/sdhci.c:524 [sdhci]sdhci_write_block_pio =_ "%s: sdhci: PIO writing\012"
+drivers/mmc/host/sdhci.c:597 [sdhci]sdhci_transfer_pio =_ "%s: sdhci: PIO transfer complete.\012"
+drivers/mmc/host/sdhci.c:3013 [sdhci]sdhci_data_irq =_ "%s: sdhci: DMA base %pad, transferred 0x%06x bytes, next %pad\012"
+drivers/leds/led-class.c:322 [led_class]led_classdev_register_ext =_ "Registered led device: %s\012"
+drivers/clocksource/dw_apb_timer.c:127 [dw_apb_timer]apbt_shutdown =_ "%s CPU %d state=shutdown\012"
+drivers/clocksource/dw_apb_timer.c:198 [dw_apb_timer]apbt_resume =_ "%s CPU %d state=resume\012"
+drivers/clocksource/dw_apb_timer.c:141 [dw_apb_timer]apbt_set_oneshot =_ "%s CPU %d state=oneshot\012"
+drivers/clocksource/dw_apb_timer.c:174 [dw_apb_timer]apbt_set_periodic =_ "%s CPU %d state=periodic\012"
+drivers/clocksource/dw_apb_timer.c:186 [dw_apb_timer]apbt_set_periodic =_ "Setting clock period %lu for HZ %d\012"
+drivers/clocksource/dw_apb_timer_of.c:151 [dw_apb_timer_of]dw_apb_timer_init =_ "%s: found clockevent timer\012"
+drivers/clocksource/dw_apb_timer_of.c:155 [dw_apb_timer_of]dw_apb_timer_init =_ "%s: found clocksource timer\012"
+drivers/hid/hid-input.c:1947 [hid]hidinput_connect =_ "Some usages could not be mapped, please use HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE if this is legitimate.\012"
+drivers/hid/usbhid/hid-core.c:201 [usbhid]usbhid_restart_out_queue =_ "Kicking head %d tail %d"
+drivers/hid/usbhid/hid-core.c:240 [usbhid]usbhid_restart_ctrl_queue =_ "Kicking head %d tail %d"
+drivers/hid/usbhid/hid-core.c:110 [usbhid]hid_retry_timeout =_ "retrying intr urb\012"
+drivers/hid/usbhid/hid-core.c:124 [usbhid]hid_reset =_ "clear halt\012"
+drivers/hid/usbhid/hid-core.c:131 [usbhid]hid_reset =_ "clear-halt failed: %d\012"
+drivers/hid/usbhid/hid-core.c:137 [usbhid]hid_reset =_ "resetting device\012"
+drivers/hid/usbhid/hid-pidff.c:825 [usbhid]pidff_find_reports =_ "found usage 0x%02x from field->logical\012"
+drivers/hid/usbhid/hid-pidff.c:845 [usbhid]pidff_find_reports =_ "found usage 0x%02x from collection array\012"
+drivers/hid/usbhid/hid-pidff.c:768 [usbhid]pidff_find_fields =_ "maxusage and report_count do not match, skipping\012"
+drivers/hid/usbhid/hid-pidff.c:775 [usbhid]pidff_find_fields =_ "found %d at %d->%d\012"
+drivers/hid/usbhid/hid-pidff.c:787 [usbhid]pidff_find_fields =_ "failed to locate %d\012"
+drivers/hid/usbhid/hid-pidff.c:226 [usbhid]pidff_set_signed =_ "calculated from %d to %d\012"
+drivers/hid/usbhid/hid-pidff.c:211 [usbhid]pidff_set =_ "calculated from %d to %d\012"
+drivers/hid/usbhid/hid-pidff.c:252 [usbhid]pidff_set_envelope_report =_ "attack %u => %d\012"
+drivers/hid/usbhid/hid-pidff.c:539 [usbhid]pidff_erase_effect =_ "starting to erase %d/%d\012"
+drivers/hid/usbhid/hid-pidff.c:458 [usbhid]pidff_request_effect_upload =_ "create_new_effect sent, type: %d\012"
+drivers/hid/usbhid/hid-pidff.c:465 [usbhid]pidff_request_effect_upload =_ "pid_block_load requested\012"
+drivers/hid/usbhid/hid-pidff.c:473 [usbhid]pidff_request_effect_upload =_ "device reported free memory: %d bytes\012"
+drivers/hid/usbhid/hid-pidff.c:480 [usbhid]pidff_request_effect_upload =_ "not enough memory free: %d bytes\012"
+drivers/hid/usbhid/hid-pidff.c:702 [usbhid]pidff_upload_effect =_ "uploaded\012"
+drivers/hid/usbhid/hid-pidff.c:1236 [usbhid]hid_pidff_init =_ "starting pid init\012"
+drivers/hid/usbhid/hid-pidff.c:1239 [usbhid]hid_pidff_init =_ "not a PID device, no output report\012"
+drivers/hid/usbhid/hid-pidff.c:860 [usbhid]pidff_reports_ok =_ "%d missing\012"
+drivers/hid/usbhid/hid-pidff.c:1255 [usbhid]hid_pidff_init =_ "reports not ok, aborting\012"
+drivers/hid/usbhid/hid-pidff.c:922 [usbhid]pidff_find_special_fields =_ "finding special fields\012"
+drivers/hid/usbhid/hid-pidff.c:943 [usbhid]pidff_find_special_fields =_ "search done\012"
+drivers/hid/usbhid/hid-pidff.c:1177 [usbhid]pidff_reset =_ "pid_pool requested again\012"
+drivers/hid/usbhid/hid-pidff.c:1280 [usbhid]hid_pidff_init =_ "max effects is %d\012"
+drivers/hid/usbhid/hid-pidff.c:1287 [usbhid]hid_pidff_init =_ "max simultaneous effects is %d\012"
+drivers/hid/usbhid/hid-pidff.c:1291 [usbhid]hid_pidff_init =_ "device memory size is %d bytes\012"
+drivers/of/base.c:2333 [base]of_map_rid =_ "%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\012"
+drivers/of/base.c:2108 [base]of_alias_get_alias_list =_ "%s: Looking for stem: %s\012"
+drivers/of/base.c:2111 [base]of_alias_get_alias_list =_ "%s: stem: %s, id: %d\012"
+drivers/of/base.c:2115 [base]of_alias_get_alias_list =_ "%s: stem comparison didn't pass %s\012"
+drivers/of/base.c:2120 [base]of_alias_get_alias_list =_ "%s: Allocated ID %d\012"
+drivers/of/base.c:1981 [base]of_alias_add =_ "adding DT alias:%s: stem=%s id=%i node=%pOF\012"
+drivers/of/device.c:126 [device]of_dma_configure =_ "dma_pfn_offset(%#08lx)\012"
+drivers/of/device.c:160 [device]of_dma_configure =_ "device is%sdma coherent\012"
+drivers/of/device.c:167 [device]of_dma_configure =_ "device is%sbehind an iommu\012"
+drivers/of/platform.c:140 [platform]of_device_alloc =_ "not all legacy IRQ resources mapped for %pOFn\012"
+drivers/of/platform.c:361 [platform]of_platform_bus_create =_ "%s() - skipping %pOF, no compatible prop\012"
+drivers/of/platform.c:367 [platform]of_platform_bus_create =_ "%s() - skipping %pOF node\012"
+drivers/of/platform.c:373 [platform]of_platform_bus_create =_ "%s() - skipping %pOF, already populated\012"
+drivers/of/platform.c:314 [platform]of_dev_lookup =_ "%pOF: devname=%s\012"
+drivers/of/platform.c:327 [platform]of_dev_lookup =_ "%pOF: compatible match\012"
+drivers/of/platform.c:397 [platform]of_platform_bus_create =_ "   create child: %pOF\012"
+drivers/of/platform.c:428 [platform]of_platform_bus_probe =_ "%s()\012"
+drivers/of/platform.c:429 [platform]of_platform_bus_probe =_ " starting at: %pOF\012"
+drivers/of/platform.c:480 [platform]of_platform_populate =_ "%s()\012"
+drivers/of/platform.c:481 [platform]of_platform_populate =_ " starting at: %pOF\012"
+drivers/of/property.c:437 [property]of_property_match_string =_ "comparing %s with %s\012"
+drivers/of/property.c:796 [property]of_graph_get_remote_node =_ "no valid endpoint (%d, %d) for node %pOF\012"
+drivers/of/property.c:803 [property]of_graph_get_remote_node =_ "no valid remote node\012"
+drivers/of/property.c:808 [property]of_graph_get_remote_node =_ "not available for remote node\012"
+drivers/of/fdt.c:200 [fdt]populate_properties =_ "fixed up name for %s -> %s\012"
+drivers/of/fdt.c:75 [fdt]of_fdt_limit_memory =_ "Limiting number of entries to %d\012"
+drivers/of/fdt.c:376 [fdt]__unflatten_device_tree =_ " -> unflatten_device_tree()\012"
+drivers/of/fdt.c:379 [fdt]__unflatten_device_tree =_ "No device tree pointer\012"
+drivers/of/fdt.c:383 [fdt]__unflatten_device_tree =_ "Unflattening device tree:\012"
+drivers/of/fdt.c:384 [fdt]__unflatten_device_tree =_ "magic: %08x\012"
+drivers/of/fdt.c:385 [fdt]__unflatten_device_tree =_ "size: %08x\012"
+drivers/of/fdt.c:386 [fdt]__unflatten_device_tree =_ "version: %08x\012"
+drivers/of/fdt.c:399 [fdt]__unflatten_device_tree =_ "  size is %d, allocating...\012"
+drivers/of/fdt.c:410 [fdt]__unflatten_device_tree =_ "  unflattening %p...\012"
+drivers/of/fdt.c:420 [fdt]__unflatten_device_tree =_ "unflattened tree is detached\012"
+drivers/of/fdt.c:423 [fdt]__unflatten_device_tree =_ " <- unflatten_device_tree()\012"
+drivers/of/fdt.c:974 [fdt]early_init_dt_scan_root =_ "dt_root_size_cells = %x\012"
+drivers/of/fdt.c:979 [fdt]early_init_dt_scan_root =_ "dt_root_addr_cells = %x\012"
+drivers/of/fdt.c:1050 [fdt]early_init_dt_scan_chosen =_ "search \042chosen\042, depth: %d, uname: %s\012"
+drivers/of/fdt.c:881 [fdt]early_init_dt_check_for_initrd =_ "Looking for initrd properties... "
+drivers/of/fdt.c:898 [fdt]early_init_dt_check_for_initrd =_ "initrd_start=0x%llx  initrd_end=0x%llx\012"
+drivers/of/fdt.c:1081 [fdt]early_init_dt_scan_chosen =_ "Command line is: %s\012"
+drivers/of/fdt.c:1017 [fdt]early_init_dt_scan_memory =_ "memory scan node %s, reg size %d,\012"
+drivers/of/fdt.c:1028 [fdt]early_init_dt_scan_memory =_ " - %llx ,  %llx\012"
+drivers/of/fdt.c:505 [fdt]__reserved_mem_reserve_reg =_ "Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\012"
+drivers/of/fdt_address.c:80 [fdt_address]fdt_bus_default_map =_ "default map, cp=%llx, s=%llx, da=%llx\012"
+drivers/of/fdt_address.c:173 [fdt_address]fdt_translate_address =_ "** translation for device %s **\012"
+drivers/of/fdt_address.c:198 [fdt_address]fdt_translate_address =_ "bus (na=%d, ns=%d) on %s\012"
+drivers/of/fdt_address.c:209 [fdt_address]fdt_translate_address =_ "reached root node\012"
+drivers/of/fdt_address.c:224 [fdt_address]fdt_translate_address =_ "parent bus (na=%d, ns=%d) on %s\012"
+drivers/of/fdt_address.c:125 [fdt_address]fdt_translate_one =_ "empty ranges, 1:1 translation\012"
+drivers/of/fdt_address.c:129 [fdt_address]fdt_translate_one =_ "walking ranges...\012"
+drivers/of/fdt_address.c:140 [fdt_address]fdt_translate_one =_ "not found !\012"
+drivers/of/fdt_address.c:147 [fdt_address]fdt_translate_one =_ "with offset: %llx\012"
+drivers/of/address.c:77 [address]of_bus_default_map =_ "default map, cp=%llx, s=%llx, da=%llx\012"
+drivers/of/address.c:402 [address]of_bus_isa_map =_ "ISA map, cp=%llx, s=%llx, da=%llx\012"
+drivers/of/address.c:582 [address]__of_translate_address =_ "** translation for device %pOF **\012"
+drivers/of/address.c:597 [address]__of_translate_address =_ "Bad cell count for %pOF\012"
+drivers/of/address.c:603 [address]__of_translate_address =_ "bus is %s (na=%d, ns=%d) on %pOF\012"
+drivers/of/address.c:617 [address]__of_translate_address =_ "reached root node\012"
+drivers/of/address.c:630 [address]__of_translate_address =_ "indirectIO matched(%pOF) 0x%llx\012"
+drivers/of/address.c:644 [address]__of_translate_address =_ "parent bus is %s (na=%d, ns=%d) on %pOF\012"
+drivers/of/address.c:523 [address]of_translate_one =_ "no ranges; cannot translate\012"
+drivers/of/address.c:529 [address]of_translate_one =_ "empty ranges; 1:1 translation\012"
+drivers/of/address.c:533 [address]of_translate_one =_ "walking ranges...\012"
+drivers/of/address.c:544 [address]of_translate_one =_ "not found !\012"
+drivers/of/address.c:551 [address]of_translate_one =_ "with offset: %llx\012"
+drivers/of/address.c:960 [address]of_dma_get_range =_ "no dma-ranges found for node(%pOF)\012"
+drivers/of/address.c:987 [address]of_dma_get_range =_ "dma_addr(%llx) cpu_addr(%llx) size(%llx)\012"
+drivers/of/irq.c:120 [irq]of_irq_parse_raw =_ " -> no parent found !\012"
+drivers/of/irq.c:124 [irq]of_irq_parse_raw =_ "of_irq_parse_raw: ipar=%pOF, size=%d\012"
+drivers/of/irq.c:143 [irq]of_irq_parse_raw =_ " -> addrsize=%d\012"
+drivers/of/irq.c:163 [irq]of_irq_parse_raw =_ " -> got it !\012"
+drivers/of/irq.c:172 [irq]of_irq_parse_raw =_ " -> no reg passed in when needed !\012"
+drivers/of/irq.c:180 [irq]of_irq_parse_raw =_ " -> no map, getting parent\012"
+drivers/of/irq.c:199 [irq]of_irq_parse_raw =_ " -> match=%d (imaplen=%d)\012"
+drivers/of/irq.c:211 [irq]of_irq_parse_raw =_ " -> imap parent not found !\012"
+drivers/of/irq.c:223 [irq]of_irq_parse_raw =_ " -> parent lacks #interrupt-cells!\012"
+drivers/of/irq.c:231 [irq]of_irq_parse_raw =_ " -> newintsize=%d, newaddrsize=%d\012"
+drivers/of/irq.c:243 [irq]of_irq_parse_raw =_ " -> imaplen=%d\012"
+drivers/of/irq.c:261 [irq]of_irq_parse_raw =_ " -> new parent: %pOF\012"
+drivers/of/irq.c:293 [irq]of_irq_parse_one =_ "of_irq_parse_one: dev=%pOF, index=%d\012"
+drivers/of/irq.c:319 [irq]of_irq_parse_one =_ " parent=%pOF, intsize=%d\012"
+drivers/of/irq.c:332 [irq]of_irq_parse_one =_ " intspec=%d\012"
+drivers/of/irq.c:539 [irq]of_irq_init =_ "of_irq_init: init %pOF (%p), parent %p\012"
+drivers/of/of_mdio.c:99 [of_mdio]of_mdiobus_register_phy =_ "registered phy %pOFn at address %i\012"
+drivers/of/of_mdio.c:129 [of_mdio]of_mdiobus_register_device =_ "registered mdio device %pOFn at address %i\012"
+drivers/of/of_reserved_mem.c:139 [of_reserved_mem]__reserved_mem_alloc_size =_ "allocated memory for '%s' node: base %pa, size %ld MiB\012"
+drivers/of/of_reserved_mem.c:150 [of_reserved_mem]__reserved_mem_alloc_size =_ "allocated memory for '%s' node: base %pa, size %ld MiB\012"
+drivers/nvmem/core.c:406 [nvmem_core]nvmem_register =_ "Registering nvmem device %s\012"
+net/core/sock.c:1714 [sock]__sk_destruct =_ "%s: optmem leakage (%d bytes) detected\012"
+net/core/dev.c:7117 [dev]__netdev_adjacent_dev_insert =_ "Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\012"
+net/core/dev.c:7134 [dev]__netdev_adjacent_dev_insert =_ "Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\012"
+net/core/dev.c:7174 [dev]__netdev_adjacent_dev_remove =_ "Remove adjacency: dev %s adj_dev %s ref_nr %d\012"
+net/core/dev.c:7188 [dev]__netdev_adjacent_dev_remove =_ "adjacency: %s to %s ref_nr - %d = %d\012"
+net/core/dev.c:7201 [dev]__netdev_adjacent_dev_remove =_ "adjacency: dev_put for %s, because link removed from %s to %s\012"
+net/core/dev.c:8619 [dev]netdev_fix_features =_ "Dropping TSO features since no SG feature.\012"
+net/core/dev.c:8625 [dev]netdev_fix_features =_ "Dropping TSO features since no CSUM feature.\012"
+net/core/dev.c:8632 [dev]netdev_fix_features =_ "Dropping TSO6 features since no CSUM feature.\012"
+net/core/dev.c:8646 [dev]netdev_fix_features =_ "Dropping NETIF_F_GSO since no SG feature.\012"
+net/core/dev.c:8654 [dev]netdev_fix_features =_ "Dropping partially supported GSO features since no GSO partial.\012"
+net/core/dev.c:8665 [dev]netdev_fix_features =_ "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\012"
+net/core/dev.c:8673 [dev]netdev_fix_features =_ "Dropping LRO feature since RX-FCS is requested.\012"
+net/core/dev.c:8678 [dev]netdev_fix_features =_ "Dropping HW-GRO feature since RX-FCS is requested.\012"
+net/core/dev.c:8577 [dev]netdev_sync_upper_features =_ "Dropping feature %pNF, upper dev %s has it off.\012"
+net/core/dev.c:8711 [dev]__netdev_update_features =_ "Features changed: %pNF -> %pNF\012"
+net/core/dev.c:8596 [dev]netdev_sync_lower_features =_ "Disabling feature %pNF on lower dev %s.\012"
+net/core/dev.c:8483 [dev]rollback_registered_many =_ "unregister_netdevice: device %s/%p never was registered\012"
+net/ipv4/route.c:1240 [route]ip_rt_bug =_ "%s: %pI4 -> %pI4, %s\012"
+net/ipv4/ip_fragment.c:463 [ip_fragment]ip_frag_reasm =_ "queue_glue: no memory for gluing queue %p\012"
+net/ipv4/ip_output.c:235 [ip_output]ip_finish_output2 =_ "%s: No header cache and no neighbour!\012"
+net/ipv4/tcp.c:2102 [tcp]tcp_recvmsg =_ "TCP(%s:%d): Application bug, race in MSG_PEEK\012"
+include/net/inet_connection_sock.h:225 [tcp_input]inet_csk_reset_xmit_timer =_ "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\012"
+net/ipv4/tcp_input.c:6381 [tcp_input]pr_drop_req =_ "drop open request from %pI4/%u\012"
+net/ipv4/tcp_input.c:6385 [tcp_input]pr_drop_req =_ "drop open request from %pI6/%u\012"
+net/ipv4/tcp_input.c:3256 [tcp_input]tcp_clean_rtx_queue =_ "Leak l=%u %d\012"
+net/ipv4/tcp_input.c:3261 [tcp_input]tcp_clean_rtx_queue =_ "Leak s=%u %d\012"
+net/ipv4/tcp_input.c:3266 [tcp_input]tcp_clean_rtx_queue =_ "Leak r=%u %d\012"
+include/net/inet_connection_sock.h:225 [tcp_output]inet_csk_reset_xmit_timer =_ "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\012"
+net/ipv4/tcp_output.c:3024 [tcp_output]tcp_retransmit_skb =_ "retrans_out leaked\012"
+include/net/inet_connection_sock.h:225 [tcp_timer]inet_csk_reset_xmit_timer =_ "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\012"
+net/ipv4/tcp_timer.c:467 [tcp_timer]tcp_retransmit_timer =_ "Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\012"
+net/ipv4/tcp_timer.c:475 [tcp_timer]tcp_retransmit_timer =_ "Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\012"
+include/net/inet_connection_sock.h:225 [tcp_ipv4]inet_csk_reset_xmit_timer =_ "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\012"
+net/ipv4/tcp_cong.c:90 [tcp_cong]tcp_register_congestion_control =_ "%s registered\012"
+net/ipv4/tcp_rate.c:167 [tcp_rate]tcp_rate_gen =_ "tcp rate: %ld %d %u %u %u\012"
+include/net/inet_connection_sock.h:225 [tcp_recovery]inet_csk_reset_xmit_timer =_ "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\012"
+net/ipv4/udp.c:1188 [udp]udp_sendmsg =_ "socket already corked\012"
+net/ipv4/udp.c:1272 [udp]udp_sendpage =_ "cork failed\012"
+include/net/udplite.h:46 [udp]udplite_checksum_init =_ "UDPLite: zeroed checksum field\012"
+include/net/udplite.h:59 [udp]udplite_checksum_init =_ "UDPLite: bad csum coverage %d/%d\012"
+net/ipv4/udp.c:2358 [udp]__udp4_lib_rcv =_ "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\012"
+net/ipv4/udp.c:2369 [udp]__udp4_lib_rcv =_ "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\012"
+net/ipv4/arp.c:376 [arp]arp_solicit =_ "trying to ucast probe in NUD_INVALID\012"
+net/ipv4/icmp.c:827 [icmp]icmp_unreach =_ "%pI4: fragmentation needed and DF set\012"
+net/ipv4/icmp.c:841 [icmp]icmp_unreach =_ "%pI4: Source Route Failed\012"
+net/ipv4/devinet.c:1547 [devinet]inetdev_event =_ "%s: bug\012"
+net/ipv4/fib_trie.c:2273 [fib_trie]fib_trie_get_next =_ "get_next iter={node=%p index=%d depth=%d}\012"
+net/ipv4/fib_trie.c:392 [fib_trie]tnode_new =_ "AT %p s=%zu %zu\012"
+net/ipv4/fib_trie.c:859 [fib_trie]resize =_ "In tnode_resize %p inflate_threshold=%d threshold=%d\012"
+net/ipv4/fib_trie.c:545 [fib_trie]inflate =_ "In inflate\012"
+net/ipv4/fib_trie.c:640 [fib_trie]halve =_ "In halve\012"
+net/ipv4/fib_trie.c:1140 [fib_trie]fib_table_insert =_ "Insert table=%u %08x/%d\012"
+net/ipv4/fib_trie.c:1571 [fib_trie]fib_table_delete =_ "Deleting %08x/%d tos=%d t=%p\012"
+net/ipv4/fib_trie.c:1943 [fib_trie]fib_table_flush =_ "trie_flush found=%d\012"
+net/ipv4/ping.c:145 [ping]ping_hash =_ "ping_hash(sk->port=%u)\012"
+net/ipv4/ping.c:155 [ping]ping_unhash =_ "ping_unhash(isk=%p,isk->num=%u)\012"
+net/ipv4/ping.c:287 [ping]ping_close =_ "ping_close(sk=%p,sk->num=%u)\012"
+net/ipv4/ping.c:288 [ping]ping_close =_ "isk->refcnt = %d\012"
+net/ipv4/ping.c:858 [ping]ping_recvmsg =_ "ping_recvmsg(sk=%p,sk->num=%u)\012"
+net/ipv4/ping.c:935 [ping]ping_recvmsg =_ "ping_recvmsg -> %d\012"
+net/ipv4/ping.c:709 [ping]ping_v4_sendmsg =_ "ping_v4_sendmsg(sk=%p,sk->num=%u)\012"
+net/ipv4/ping.c:943 [ping]ping_queue_rcv_skb =_ "ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\012"
+net/ipv4/ping.c:946 [ping]ping_queue_rcv_skb =_ "ping_queue_rcv_skb -> failed\012"
+net/ipv4/ping.c:66 [ping]ping_hashfn =_ "hash(%u) = %u\012"
+net/ipv4/ping.c:126 [ping]ping_get_port =_ "found port/ident = %d\012"
+net/ipv4/ping.c:129 [ping]ping_get_port =_ "was not hashed\012"
+net/ipv4/ping.c:311 [ping]ping_check_bind_addr =_ "ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\012"
+net/ipv4/ping.c:337 [ping]ping_check_bind_addr =_ "ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\012"
+net/ipv4/ping.c:432 [ping]ping_bind =_ "after bind(): num = %hu, dif = %d\012"
+net/ipv4/ping.c:456 [ping]ping_bind =_ "ping_v4_bind -> %d\012"
+net/ipv4/ping.c:179 [ping]ping_lookup =_ "try to find: num = %d, daddr = %pI4, dif = %d\012"
+net/ipv4/ping.c:183 [ping]ping_lookup =_ "try to find: num = %d, daddr = %pI6c, dif = %d\012"
+net/ipv4/ping.c:192 [ping]ping_lookup =_ "iterate\012"
+net/ipv4/ping.c:200 [ping]ping_lookup =_ "found: %p: num=%d, daddr=%pI4, dif=%d\012"
+net/ipv4/ping.c:212 [ping]ping_lookup =_ "found: %p: num=%d, daddr=%pI6c, dif=%d\012"
+net/ipv4/ping.c:509 [ping]ping_err =_ "ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\012"
+net/ipv4/ping.c:513 [ping]ping_err =_ "no socket, dropping\012"
+net/ipv4/ping.c:516 [ping]ping_err =_ "err on socket %p\012"
+net/ipv4/ping.c:967 [ping]ping_rcv =_ "ping_rcv(skb=%p,id=%04x,seq=%04x)\012"
+net/ipv4/ping.c:976 [ping]ping_rcv =_ "rcv on socket %p\012"
+net/ipv4/ping.c:982 [ping]ping_rcv =_ "no socket, dropping\012"
+net/ipv4/sysctl_net_ipv4.c:293 [sysctl_net_ipv4]sscanf_key =_ "proc TFO key set 0x%x-%x-%x-%x <- 0x%s: %u\012"
+net/ipv4/ipconfig.c:319 [ipconfig]ic_close_devs =_ "IP-Config: Downing %s\012"
+net/ipv4/ipconfig.c:1640 [ipconfig]ic_proto_name =_ "DHCP: Invalid client identifier type\012"
+net/ipv4/ipconfig.c:1701 [ipconfig]ip_auto_config_setup =_ "IP-Config: Parameter #%d: `%s'\012"
+net/ipv4/ipconfig.c:1073 [ipconfig]ic_bootp_recv =_ "DHCP: Got message type %d (%s)\012"
+net/ipv4/ipconfig.c:1087 [ipconfig]ic_bootp_recv =_ "DHCP: Offered address %pI4 by server %pI4\012"
+net/ipv4/ipconfig.c:898 [ipconfig]ic_do_bootp_ext =_ "DHCP/BOOTP: Got extension %d:"
+net/ipv4/ipconfig.c:900 [ipconfig]ic_do_bootp_ext =_ " %02x"
+net/ipv4/ipconfig.c:901 [ipconfig]ic_do_bootp_ext =_ "\012"
+net/ipv4/ipconfig.c:1462 [ipconfig]ip_auto_config =_ "IP-Config: Entered.\012"
+net/ipv4/ipconfig.c:263 [ipconfig]ic_open_devs =_ "IP-Config: %s UP (able=%d, xid=%08x)\012"
+net/ipv4/ipconfig.c:660 [ipconfig]ic_dhcp_init_options =_ "DHCP: Sending message type %d (%s)\012"
+net/ipv6/addrconf.c:1678 [ipv6]__ipv6_dev_get_saddr =_ "ADDRCONF: unspecified / multicast address assigned as unicast address on %s"
+net/ipv6/addrconf.c:1023 [ipv6]ipv6_add_addr_hash =_ "ipv6_add_addr: already assigned\012"
+net/ipv6/addrconf.c:399 [ipv6]ipv6_add_dev =_ "%s: cannot allocate memory for statistics\012"
+net/ipv6/addrconf.c:408 [ipv6]ipv6_add_dev =_ "%s: cannot create /proc/net/dev_snmp6/%s\012"
+net/ipv6/addrconf.c:4523 [ipv6]addrconf_verify_rtnl =_ "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\012"
+net/ipv6/addrconf.c:3907 [ipv6]addrconf_rs_timer =_ "%s: no IPv6 routers present\012"
+net/ipv6/addrconf.c:2671 [ipv6]addrconf_prefix_rcv =_ "addrconf: prefix option too short\012"
+net/ipv6/addrconf.c:2696 [ipv6]addrconf_prefix_rcv =_ "addrconf: device %s not configured\012"
+net/ipv6/addrconf.c:2784 [ipv6]addrconf_prefix_rcv =_ "IPv6 addrconf: prefix with wrong length %d\012"
+net/ipv6/addrconf.c:3548 [ipv6]addrconf_notify =_ "ADDRCONF(NETDEV_UP): %s: link is not ready\012"
+net/ipv6/addrconf.c:3388 [ipv6]addrconf_sit_config =_ "%s: add_dev failed\012"
+net/ipv6/addrconf.c:3165 [ipv6]init_loopback =_ "%s: add_dev failed\012"
+net/ipv6/route.c:3965 [ipv6]rt6_do_redirect =_ "rt6_do_redirect: packet too short\012"
+net/ipv6/route.c:3972 [ipv6]rt6_do_redirect =_ "rt6_do_redirect: destination address is multicast\012"
+net/ipv6/route.c:3981 [ipv6]rt6_do_redirect =_ "rt6_do_redirect: target address is not link-local unicast\012"
+net/ipv6/route.c:3997 [ipv6]rt6_do_redirect =_ "rt6_redirect: invalid ND options\012"
+net/ipv6/route.c:4006 [ipv6]rt6_do_redirect =_ "rt6_redirect: invalid link-layer address length\012"
+net/ipv6/route.c:4013 [ipv6]rt6_do_redirect =_ "rt6_redirect: source isn't a valid nexthop for redirect target\012"
+net/ipv6/ip6_fib.c:2087 [ipv6]fib6_clean_node =_ "%s: del failed: rt=%p@%p err=%d\012"
+net/ipv6/ndisc.c:732 [ipv6]ndisc_solicit =_ "%s: trying to ucast probe in NUD_INVALID: %pI6\012"
+net/ipv6/udp.c:1481 [ipv6]udpv6_sendmsg =_ "udp cork app bug 2\012"
+net/ipv6/udp.c:936 [ipv6]__udp6_lib_rcv =_ "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\012"
+net/ipv6/udp.c:736 [ipv6]udp6_csum_zero_error =_ "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\012"
+net/ipv6/icmp.c:497 [ipv6]icmp6_send =_ "icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\012"
+net/ipv6/icmp.c:506 [ipv6]icmp6_send =_ "icmp6_send: no reply to icmp error [%pI6c > %pI6c]\012"
+net/ipv6/icmp.c:350 [ipv6]icmpv6_route_lookup =_ "icmp6_send: acast source\012"
+net/ipv6/icmp.c:855 [ipv6]icmpv6_rcv =_ "ICMPv6 checksum failed [%pI6c > %pI6c]\012"
+net/ipv6/icmp.c:928 [ipv6]icmpv6_rcv =_ "icmpv6: msg of unknown type [%pI6c > %pI6c]\012"
+net/ipv6/reassembly.c:310 [ipv6]ip6_frag_reasm =_ "ip6_frag_reasm: no memory for reassembly\012"
+net/ipv6/reassembly.c:307 [ipv6]ip6_frag_reasm =_ "ip6_frag_reasm: payload len = %d\012"
+net/ipv6/ping.c:62 [ipv6]ping_v6_sendmsg =_ "ping_v6_sendmsg(sk=%p,sk->num=%u)\012"
+net/ipv6/exthdrs.c:750 [ipv6]ipv6_hop_jumbo =_ "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\012"
+net/ipv6/exthdrs.c:734 [ipv6]ipv6_hop_ra =_ "ipv6_hop_ra: wrong RA length %d\012"
+net/ipv6/datagram.c:1015 [ipv6]ip6_datagram_send_ctl =_ "invalid cmsg type: %d\012"
+net/ipv6/sit.c:848 [sit]ipip6_tunnel_xmit =_ "nexthop == NULL\012"
+net/ipv6/sit.c:877 [sit]ipip6_tunnel_xmit =_ "nexthop == NULL\012"
+include/net/udplite.h:46 [ip6_checksum]udplite_checksum_init =_ "UDPLite: zeroed checksum field\012"
+include/net/udplite.h:59 [ip6_checksum]udplite_checksum_init =_ "UDPLite: bad csum coverage %d/%d\012"
+lib/decompress.c:69 [decompress]decompress_method =_ "Compressed data magic: %#.2x %#.2x\012"
+lib/kobject.c:746 [kobject]dynamic_kobj_release =_ "kobject: (%p): %s\012"
+lib/kobject.c:917 [kobject]kset_release =_ "kobject: '%s' (%p): %s\012"
+lib/kobject.c:161 [kobject]fill_kobj_path =_ "kobject: '%s' (%p): %s: path = '%s'\012"
+lib/kobject.c:670 [kobject]kobject_cleanup =_ "kobject: '%s' (%p): %s, parent %p\012"
+lib/kobject.c:674 [kobject]kobject_cleanup =_ "kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\012"
+lib/kobject.c:679 [kobject]kobject_cleanup =_ "kobject: '%s' (%p): auto cleanup 'remove' event\012"
+lib/kobject.c:686 [kobject]kobject_cleanup =_ "kobject: '%s' (%p): auto cleanup kobject_del\012"
+lib/kobject.c:692 [kobject]kobject_cleanup =_ "kobject: '%s' (%p): calling ktype release\012"
+lib/kobject.c:698 [kobject]kobject_cleanup =_ "kobject: '%s': free name\012"
+lib/kobject.c:253 [kobject]kobject_add_internal =_ "kobject: '%s' (%p): %s: parent: '%s', set: '%s'\012"
+lib/kobject_uevent.c:477 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s\012"
+lib/kobject_uevent.c:487 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s: attempted to send uevent without kset!\012"
+lib/kobject_uevent.c:498 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s: uevent_suppress caused the event to drop!\012"
+lib/kobject_uevent.c:506 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s: filter function caused the event to drop!\012"
+lib/kobject_uevent.c:518 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s: unset subsystem caused the event to drop!\012"
+lib/kobject_uevent.c:560 [kobject_uevent]kobject_uevent_env =_ "kobject: '%s' (%p): %s: uevent() returned %d\012"
+drivers/fce/fce_drv.c:61 [fce]print_fce_cfg =p "print_fce_cfg: basenum:0x%x, offset_base:0x%lx, saddr:0x%lx, dim:%d, width:%d, sigsel:%d, readnum:%d, pre_endian:%d, target_endian:%d, base_endian:%d\012"
+drivers/fce/fce_drv.c:116 [fce]fcedev_release =p "fcedev_release: magic_data=0x%x\012"
+drivers/fce/fce_drv.c:129 [fce]fcedev_release =p "fcedev release! users<%d>\012"
+drivers/fce/fce_drv.c:73 [fce]fcedev_open =p " fdev<0x%lx>\012"
+drivers/fce/fce_drv.c:74 [fce]fcedev_open =p " miscdev<0x%lx>\012"
+drivers/fce/fce_drv.c:75 [fce]fcedev_open =p "fcedev_open: magic_data=0x%x\012"
+drivers/fce/fce_drv.c:105 [fce]fcedev_open =p "fcedev open!, users<%d>\012"
+drivers/fce/fce_drv.c:152 [fce]feature_lib_mmap =p "%s\012"
+drivers/fce/fce_drv.c:161 [fce]feature_lib_mmap =p "%s buffer<%#llx>, phys<%#llx>, len<%ld>, vma_begin<%#llx>, vma_end<%#llx>, vm_pgoff<%#llx>\012"
+drivers/fce/fce_drv.c:503 [fce]fce_thead_irq_thread =p "fce reg top<%d> match_index=0x%x, match_value=0x%x\012"
+drivers/fce/fce_drv.c:678 [fce]fce_thead_probe =p "<%s>\012"
+drivers/fce/fce_drv.c:686 [fce]fce_thead_probe =p "%s: registers %#llx-%#llx\012"
+drivers/fce/fce_drv.c:700 [fce]fce_thead_probe =p "%s: reg_addr %#llx\012"
+drivers/fce/fce_drv.c:714 [fce]fce_thead_probe =p "%s: memory_region %#llx-%#llx\012"
+drivers/fce/fce_drv.c:722 [fce]fce_thead_probe =p "%s: virt addr  %#llx\012"
+drivers/fce/fce_drv.c:725 [fce]fce_thead_probe =p "irq: <%d>\012"
+drivers/fce/fce_drv.c:554 [fce]fce_thead_hw_init =p "BASENUM is: 0x%x\012"
+drivers/fce/fce_drv.c:749 [fce]fce_thead_probe =p " fce hw init\012"
+drivers/fce/fce_drv.c:762 [fce]fce_thead_probe =p " request fce irq \012"
+drivers/fce/fce_drv.c:326 [fce]fce_add_dev =p "<%s>\012"
+drivers/fce/fce_drv.c:333 [fce]fce_add_dev =p " fdev<0x%lx>\012"
+drivers/fce/fce_drv.c:334 [fce]fce_add_dev =p " &fdev->miscdev<0x%lx>\012"
+drivers/fce/fce_drv.c:340 [fce]fce_add_dev =p " fce miscdev registered!\012"
+drivers/fce/fce_drv.c:774 [fce]fce_thead_probe =p " set drvdata\012"
+drivers/fce/fce_drv.c:224 [fce]fcedev_ioctl =p "fcedev_ioctl: cmd=0x%x, magic_data=0x%x\012"
+drivers/fce/fce_drv.c:236 [fce]fcedev_ioctl =p "fcedev_ioctl_query_state: 0x%x state=%d\012"
+drivers/fce/fce_drv.c:241 [fce]fcedev_ioctl =p "fcedev_ioctl_query_isr_count: 0x%x count=%d\012"
+drivers/fce/fce_drv.c:246 [fce]fcedev_ioctl =p "fcedev_ioctl_query_result_count: 0x%x count=%d\012"
+drivers/fce/fce_drv.c:354 [fce]fce_sess_config =p "<%s>\012"
+drivers/fce/fce_drv.c:382 [fce]fce_sess_config =p "<%s> vm_start<0x%llx>, vm_end<0x%llx>, phys<0x%llx>\012"
+drivers/fce/fce_drv.c:383 [fce]fce_sess_config =p "<%s> saddr<0x%llx>, phys<0x%llx>\012"
+drivers/fce/fce_drv.c:253 [fce]fcedev_ioctl =p "fcedev_ioctl_config: 0x%x, retval=%d \012"
+drivers/fce/fce_drv.c:256 [fce]fcedev_ioctl =p "fcedev_ioctl_config: 0x%x, retval=%d \012"
+drivers/fce/fce_drv.c:621 [fce]fce_thead_reset =p "<%s>\012"
+drivers/fce/fce_drv.c:574 [fce]fce_thead_config =p "<%s>\012"
+drivers/fce/fce_drv.c:586 [fce]fce_thead_config =p "readnum=%d\012"
+drivers/fce/fce_drv.c:630 [fce]fce_thead_start =p "<%s>\012"
+drivers/fce/fce_drv.c:269 [fce]fcedev_ioctl =p "fcedev_ioctl_start: 0x%x retval=%d \012"
+drivers/fce/fce_drv.c:273 [fce]fcedev_ioctl =p "fcedev_ioctl_stop: 0x%x retval=%d \012"
+drivers/fce/fce_drv.c:278 [fce]fcedev_ioctl =p "fcedev_ioctl_get_result: 0x%x match_data0=0x%x, match_index0=0x%x\012"
+drivers/fce/fce_drv.c:655 [fce]fce_thead_suspend =p "<%s>\012"
+drivers/fce/fce_drv.c:285 [fce]fcedev_ioctl =p "fcedev_ioctl_suspend: 0x%x retval=%d \012"
+drivers/fce/fce_drv.c:661 [fce]fce_thead_resume =p "<%s>\012"
+drivers/fce/fce_drv.c:291 [fce]fcedev_ioctl =p "fcedev_ioctl_resume: 0x%x retval=%d \012"
+drivers/fce/fce_drv.c:192 [fce]feature_lib_create =p "%s: feature_lib buffer %p\012"
+drivers/fce/fce_drv.c:196 [fce]feature_lib_create =p "%s: requested size %lu PAGE_SIZE %lu actual size %zu\012"
+drivers/fce/fce_drv.c:172 [fce]feature_lib_free =p "%s: feature_lib buffer %p\012"
+drivers/fce/featurelib_heap.c:29 [fce]feature_lib_heap_buffer_create =p "%s:feature lib heap create size %zu\012"
+drivers/fce/featurelib_heap.c:59 [fce]feature_lib_heap_buffer_free =p "%s:feature lib heap free\012"
+drivers/fce/featurelib_heap.c:61 [fce]feature_lib_heap_buffer_free =p "%s:feature lib heap_pool<%#llx>\012"
+drivers/fce/featurelib_heap.c:77 [fce]feature_lib_heap_init =p "%s:feature lib heap init\012"
+drivers/fce/featurelib_heap.c:85 [fce]feature_lib_heap_init =p "%s:feature lib heap_pool<%#llx>\012"
+drivers/fce/featurelib_heap.c:104 [fce]feature_lib_heap_deinit =p "%s:feature lib heap_pool<%#llx>\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:126 [img_mem]img_mem_add_heap =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:197 [img_mem]img_mem_add_heap =p "%s created heap %d type %d (%s)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:299 [img_mem]img_mem_create_proc_ctx =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:319 [img_mem]img_mem_create_proc_ctx =p "%s id:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:253 [img_mem]img_mem_get_heap_info =p "%s:%d heap %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:264 [img_mem]img_mem_get_heap_info =p "%s heap %d not found!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:958 [img_mem]img_mem_unmap_um =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:967 [img_mem]img_mem_unmap_um =p "%s:%d buffer 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:214 [img_mem]_img_mem_del_heap =p "%s heap %d 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:989 [img_mem]_img_mem_map_km =p "%s:%d buffer 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1299 [img_mem]_img_mem_sync_cpu_to_device =p "%s:%d buffer %d size %zu cache synchronization disabled!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1305 [img_mem]_img_mem_sync_cpu_to_device =p "%s:%d buffer %d size %zu kptr %p cache(%d:%d)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1929 [img_mem]_update_page =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2187 [img_mem]_img_mmu_unmap =p "%s:%d unmapping %p buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2196 [img_mem]_img_mmu_unmap =p "%s imgmmu_cat_unmap bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:722 [img_mem]_img_mem_free =p "%s buffer 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1615 [img_mem]_page_free =p "%s:%d buffer %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:395 [img_mem]_img_mem_alloc =p "%s heap %p '%s' ctx %p size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:441 [img_mem]_img_mem_alloc =p "%s heap %d changing cache attributes from %x to %x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:466 [img_mem]_img_mem_alloc =p "%s heap %p ctx %p created buffer %d (%p) actual_size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1530 [img_mem]_page_alloc =p "%s:%d arg %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1594 [img_mem]_page_alloc =p "%s:%d virt addr %#lx type:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1596 [img_mem]_page_alloc =p "%s:%d phys addr %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:814 [img_mem]img_mem_add_fence =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1995 [img_mem]img_mmu_ctx_create =p "%s adding %lx offset bytes to heap %d type %d (%s)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2010 [img_mem]img_mmu_ctx_create =p "%s imgmmu_cat_create bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2080 [img_mem]img_mmu_map =p "%s buffer %d virt_addr %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2094 [img_mem]img_mmu_map =p "%s buffer %d 0x%p size %zu virt_addr %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2128 [img_mem]img_mmu_map =p "%s imgmmu_cat_map_sg bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2147 [img_mem]img_mmu_map =p "%s imgmmu_cat_map_arr bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2283 [img_mem]img_mmu_get_pc =p "%s: addr %#llx pc %#llx bufid %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2285 [img_mem]img_mmu_get_pc =p "%s imgmmu_cat_get_page bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:229 [img_mem]img_mem_del_heap =p "%s:%d heap %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:685 [img_mem]img_mem_export =p "%s ctx %p buffer id %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:711 [img_mem]img_mem_export =p "%s heap %d ctx %p exported buffer %d (%p) size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1007 [img_mem]img_mem_map_km =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1048 [img_mem]img_mem_unmap_km =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1030 [img_mem]_img_mem_unmap_km =p "%s:%d buffer 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1326 [img_mem]img_mem_sync_cpu_to_device =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2051 [img_mem]_img_mmu_ctx_destroy =p "%s imgmmu_cat_destroy bypass!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2212 [img_mem]img_mmu_unmap =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:340 [img_mem]_img_mem_destroy_proc_ctx =p "%s:%d id:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:373 [img_mem]img_mem_destroy_proc_ctx =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:764 [img_mem]img_mem_free =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:484 [img_mem]img_mem_alloc =p "%s heap %d ctx %p size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:507 [img_mem]img_mem_alloc =p "%s heap %d ctx %p created buffer %d (%p) size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:615 [img_mem]img_mem_import =p "%s heap %d ctx %p hnd %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:580 [img_mem]_img_mem_import =p "%s heap %d changing cache attributes from %x to %x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:640 [img_mem]img_mem_import =p "%s heap %d ctx %p created buffer %d (%p) size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:858 [img_mem]img_mem_remove_fence =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:888 [img_mem]img_mem_signal_fence =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1350 [img_mem]_img_mem_sync_device_to_cpu =p "%s:%d buffer %d size %zu cache synchronization disabled!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1356 [img_mem]_img_mem_sync_device_to_cpu =p "%s:%d buffer %d size %zu kptr %p cache(%d:%d)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:1370 [img_mem]img_mem_sync_device_to_cpu =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:922 [img_mem]img_mem_map_um =p "%s:%d buffer %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:931 [img_mem]img_mem_map_um =p "%s:%d buffer 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2495 [img_mem]img_mem_init =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_man.c:2513 [img_mem]img_mem_exit =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:558 [img_mem]carveout_cache_update =p "%s vma start:%lx end:%lx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:369 [img_mem]_mmap_close =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:355 [img_mem]_mmap_open =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:463 [img_mem]carveout_heap_map_um =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:466 [img_mem]carveout_heap_map_um =p "%s:%d vm_start %#lx vm_end %#lx size %ld\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:122 [img_mem]carveout_release_dmabuf =p "%s %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:112 [img_mem]carveout_unmap_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:95 [img_mem]carveout_map_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:589 [img_mem]carveout_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:322 [img_mem]carveout_heap_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:137 [img_mem]carveout_mmap_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:180 [img_mem]carveout_heap_export =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:514 [img_mem]carveout_heap_unmap_km =p "%s:%d buffer %d (0x%p) kptr 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:506 [img_mem]carveout_heap_map_km =p "%s:%d buffer %d (0x%p) kptr 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:270 [img_mem]carveout_heap_alloc =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:312 [img_mem]carveout_heap_alloc =p "%s buffer %d phys %#llx size %zu attrs %x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_carveout.c:629 [img_mem]img_mem_carveout_init =p "%s phys base:%#llx (offs:%llx) size:%zu order:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:207 [img_mem]_mmap_close =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:394 [img_mem]dmabuf_heap_unmap_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:411 [img_mem]dmabuf_heap_unmap_km =p "%s:%d buffer %d kunmap from 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:177 [img_mem]_mmap_open =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:326 [img_mem]dmabuf_heap_map_um =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:329 [img_mem]dmabuf_heap_map_um =p "%s:%d vm_start %#lx vm_end %#lx size %ld\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:357 [img_mem]dmabuf_heap_map_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:383 [img_mem]dmabuf_heap_map_km =p "%s:%d buffer %d vmap to 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:146 [img_mem]dmabuf_heap_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:87 [img_mem]dmabuf_heap_import =p "%s:%d buffer %d (0x%p) buf_fd %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:100 [img_mem]dmabuf_heap_import =p "%s:%d buffer %d dma_buf %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:428 [img_mem]dmabuf_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_dmabuf.c:449 [img_mem]img_mem_dmabuf_init =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:132 [img_mem]unified_release_dmabuf =p "%s %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:123 [img_mem]unified_unmap_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:283 [img_mem]unified_export =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:225 [img_mem]unified_mmap_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:883 [img_mem]unified_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:103 [img_mem]unified_map_dmabuf =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:349 [img_mem]unified_alloc =p "%s:%d buffer %d (0x%p) size:%zu attr:%x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:464 [img_mem]unified_alloc =p "%s:%d buffer %d orig_nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:526 [img_mem]_dma_unmap =p "%s:%d buffer %d orig_nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:816 [img_mem]unified_unmap_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:826 [img_mem]unified_unmap_km =p "%s vunmap 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:608 [img_mem]_mmap_close =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:536 [img_mem]unified_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:547 [img_mem]unified_free =p "%s vunmap 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:509 [img_mem]_dma_map =p "%s:%d buffer %d orig_nents %d nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:762 [img_mem]unified_map_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:808 [img_mem]unified_map_km =p "%s:%d buffer %d vmap to 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:207 [img_mem]unified_end_cpu_access_dmabuf =p "%s:%d orig_nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:871 [img_mem]unified_sync_dev_to_cpu =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:170 [img_mem]unified_begin_cpu_access_dmabuf =p "%s:%d orig_nents %d nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:850 [img_mem]unified_sync_cpu_to_dev =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:577 [img_mem]_mmap_open =p "%s:%d buffer %d (0x%p) vma:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:725 [img_mem]unified_map_um =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:728 [img_mem]unified_map_um =p "%s:%d vm_start %#lx vm_end %#lx size %ld\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_unified.c:905 [img_mem]img_mem_unified_init =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:138 [img_mem]coherent_heap_map_um =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:141 [img_mem]coherent_heap_map_um =p "%s:%d vm_start %#lx vm_end %#lx size %ld\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:123 [img_mem]coherent_heap_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:177 [img_mem]coherent_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:160 [img_mem]coherent_heap_unmap_km =p "%s:%d buffer %d (0x%p) kptr 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:152 [img_mem]coherent_heap_map_km =p "%s:%d buffer %d (0x%p) kptr 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:73 [img_mem]coherent_heap_alloc =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:114 [img_mem]coherent_heap_alloc =p "%s buffer %d kptr %p phys %#llx size %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_coherent.c:199 [img_mem]img_mem_coherent_init =p "%s gfp:%x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:267 [img_mem]anonymous_heap_unmap_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:276 [img_mem]anonymous_heap_unmap_km =p "%s:%d buffer %d kunmap from 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:303 [img_mem]anonymous_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:82 [img_mem]anonymous_heap_import =p "%s:%d buffer %d (0x%p) cpu_addr %#lx for PID:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:156 [img_mem]anonymous_heap_import =p "%s:%d buffer %d orig_nents %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:188 [img_mem]anonymous_heap_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:191 [img_mem]anonymous_heap_free =p "%s vunmap 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:224 [img_mem]anonymous_heap_map_km =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:255 [img_mem]anonymous_heap_map_km =p "%s:%d buffer %d vmap to 0x%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_anonymous.c:324 [img_mem]img_mem_anonymous_init =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_ocm.c:116 [img_mem]ocm_heap_free =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_ocm.c:134 [img_mem]ocm_heap_destroy =p "%s:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_ocm.c:70 [img_mem]ocm_heap_alloc =p "%s:%d buffer %d (0x%p)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_ocm.c:107 [img_mem]ocm_heap_alloc =p "%s buffer %d phys %#llx size %zu attrs %x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_mem_ocm.c:157 [img_mem]img_mem_ocm_init =p "%s phys:%#llx size:%zu attrs:%#x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_pdump.c:84 [img_mem]img_pdump_create =p "%s %d buffer %p size:%zu!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_pdump.c:115 [img_mem]img_pdump_write =p "%s %d buffer len:%zu size:%zu!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_pdump.c:118 [img_mem]img_pdump_write =p "%s end!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/img_mem/img_pdump.c:177 [img_mem]img_pdump_destroy =p "%s %d buffer %p!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:280 [vha]vha_release =p "%s: PID: %d, vha: %p, session: %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:742 [vha]vha_mmap =p "%s: PID: %d start %#lx end %#lx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:745 [vha]vha_mmap =p "%s: PID: %d buf_id %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:319 [vha]vha_ioctl_query_heaps =p "%s: session %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:151 [vha]vha_poll =p "%s: PID: %d, vha: %p, link: %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:511 [vha]vha_ioctl_map_to_onchip =p "%s: session %u, virt_addr 0x%016llx, buf_id %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:694 [vha]vha_ioctl =p "%s: code: 0x%x, value: 0x%lx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:297 [vha]vha_ioctl_get_hw_props =p "%s: session %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:353 [vha]vha_ioctl_alloc =p "%s: session %u, size %llu, heap_id %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:406 [vha]vha_ioctl_import =p "%s: session %u, buf_hnd 0x%016llx, size %llu, heap_id %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:453 [vha]vha_ioctl_export =p "%s: session %u, buf_id %u, size %llu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:480 [vha]vha_ioctl_free =p "%s: session %u, buf_id %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:538 [vha]vha_ioctl_map =p "%s: session %u, virt_addr 0x%016llx, buf_id %u, flags 0x%08x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:562 [vha]vha_ioctl_unmap =p "%s: session %u, buf_id %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:581 [vha]vha_ioctl_buf_status =p "%s: session %u, buf_id %u, status %u, in_sync_fd %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:613 [vha]vha_ioctl_sync =p "%s: session %u, VHA_SYNC_OP_CREATE_OUT buf_id_count: %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:624 [vha]vha_ioctl_sync =p "%s: session %u, VHA_SYNC_OP_MERGE_IN in_sync_fd_count: %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:635 [vha]vha_ioctl_sync =p "%s: session %u, VHA_SYNC_OP_RELEASE buf_id_count: %u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:682 [vha]vha_ioctl_cancel =p "%s: session %u, cmd_id 0x%08x, cmd_id_mask 0x%08x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:75 [vha]vha_read =p "%s: PID: %d, vha: %p, link: %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:86 [vha]vha_read =p "%s: returning, no block!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:89 [vha]vha_read =p "%s: going to sleep\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:92 [vha]vha_read =p "%s: signal\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:96 [vha]vha_read =p "%s: woken up\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:130 [vha]vha_read =p "VHA RSP: "
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:234 [vha]vha_open =p "%s: PID: %d, vha: %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:183 [vha]vha_write =p "%s: PID: %d, vha: %p, session: %p, size: %zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:779 [vha]vha_api_add_dev =p "%s: trying to register misc dev %s...\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:793 [vha]vha_api_add_dev =p "%s: misc dev registered successfully\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:812 [vha]vha_api_rm_dev =p "%s: trying to deregister VHA misc device\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:819 [vha]vha_api_rm_dev =p "%s: VHA misc dev deregistered: %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:828 [vha]vha_api_init =p "loading VHA module.\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_api.c:845 [vha]vha_api_exit =p "unloading VHA module.\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1039 [vha]vha_apm_worker =p "%s: apm expired! core_mask:%#x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1806 [vha]cmd_worker =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1811 [vha]cmd_worker =p "%s: Postpone worker task!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:286 [vha]vha_init_plat_heaps =p "%s: adding platform heap of type %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:522 [vha]vha_deinit =p "%s: Total kernel memory used: %u.%u MB\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:607 [vha]vha_cmd_notify =p "%s: 0x%08x/%u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:802 [vha]vha_add_session =p "%s: %p ctxid:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1009 [vha]vha_sched_apm =p "%s: core_mask:%#x delay:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1088 [vha]vha_add_dev =p "%s: allocated vha_dev @ %px\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:374 [vha]vha_init =p "%s: adding device heap of type %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:426 [vha]vha_init =p "%s: adding heap of type %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:490 [vha]vha_init =p "%s: using heap %d for internal alloc\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:496 [vha]vha_init =p "%s: vha drv init done\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1419 [vha]vha_map_to_onchip =p "%s: mapped buf %s (%u) to %#llx, num_pages: %d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1472 [vha]vha_map_buffer =p "%s: using direct mapping!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1524 [vha]vha_map_buffer =p "%s: mapped buf %s (%u) to %#llx, flags: 0x%x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1573 [vha]vha_unmap_buffer =p "%s: unmapped buf %s(%u)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1651 [vha]vha_add_buf =p "%s buf '%.*s' id:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1686 [vha]vha_add_buf =p "memset buf chunk %d!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1717 [vha]vha_rm_buf =p "%s buf_id:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1751 [vha]vha_is_waiting_on_input_buffs =p "%s: cmd %u waiting for input buf %d to be ready\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1825 [vha]vha_chk_cmd_queues =p "%s threaded:%u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:946 [vha]vha_rm_session =p "%s: Total user memory used in session: %u.%u MB\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1873 [vha]vha_set_buf_status =p "%s: id:%d curr:%d new:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1978 [vha]vha_buf_needs_inval =p "%s: id:%d (skip)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:1983 [vha]vha_buf_needs_inval =p "%s: id:%d (%d)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:2002 [vha]vha_buf_needs_flush =p "%s: id:%d (%d)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:2408 [vha]vha_add_cmd =p "%s: cmd id: 0x%08x/%u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:2545 [vha]vha_suspend_dev =p "%s: taking a nap!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_common.c:2559 [vha]vha_resume_dev =p "%s: waking up!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_dbg.c:588 [vha]vha_dbg_alloc_hwbuf =p "%s: mapped buf %s (%u) to %#llx:%zu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_pdump.c:253 [vha]vha_pdump_ldb_buf =p "vha_pdump_ldb_buf chunk %d!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/vha_pdump.c:309 [vha]vha_pdump_sab_buf =p "vha_pdump_sab_buf chunk %d!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:419 [vha]vha_rollback_cnn_cmds =p "%s: (%d)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:104 [vha]vha_dev_ready =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:461 [vha]vha_handle_cmd =p "No command. Probably it has been aborted\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:502 [vha]vha_handle_cmd =p "%s: %p -> new pending %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:190 [vha]vha_dev_start =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:237 [vha]vha_dev_stop =p "%s\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:288 [vha]vha_dev_stop =p "%s Too short execution time to calculate utilization!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:394 [vha]vha_handle_irq =p "IRQ 0x%08llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:658 [vha]vha_handle_thread_irq =p "%s: status:%llx count:%d\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:517 [vha]vha_do_queued_cmd =p "%s: queued %p pending %p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:523 [vha]vha_do_queued_cmd =p "%s: skipping!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:886 [vha]vha_scheduler_loop =p "%s Queue full. Postpone worker task!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:1104 [vha]vha_dev_get_props =p "%s: supported: %#x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:1106 [vha]vha_dev_get_props =p "%s: soc_axi: %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:1111 [vha]vha_dev_get_props =p "%s: ip integrator id: %#llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_dev.c:1113 [vha]vha_dev_get_props =p "%s: ip change list: %llu\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:603 [vha]vha_cnn_cmd_completed =p "%s: %p, hw_cycles %llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:646 [vha]vha_do_cnn_cmd =p "CNN command: id:%x type:%x nin:%x nbufs:%x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:648 [vha]vha_do_cnn_cmd =p "VHA CMD: "
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:253 [vha]do_cmd_cnn_submit =p "%s: -> kicked:%p queueing:%p\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:313 [vha]do_cmd_cnn_submit =p "%s: CNN kick queued (%p)!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_cnn.c:320 [vha]do_cmd_cnn_submit =p "%s: CNN kick %s (%p)!\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_mmu.c:126 [vha]do_mmu_ctx_setup =p "%s: setting hardware ctx id:%u\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_mmu.c:79 [vha]mmu_flush =p "%s: ctx_id:%d (0x%llx)\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_mmu.c:141 [vha]vha_mmu_setup =p "%s: mode:%d session ctxid:%x active ctxid:%x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_mmu.c:188 [vha]vha_mmu_setup =p "%s: update ctx id active:%x pc:%#x\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/single/vha_mmu.c:226 [vha]vha_mmu_status =p "%s: MMU FAULT: s1:%llx s2:%llx\012"
+/home/davidli/workspace/light_base/npu_ax3386/driver/vha/platform/vha_plat_thead_light_fpga_c910.c:67 [vha]vha_plat_dt_hw_init =p "%s dma_get_mask : %#llx\012"
+drivers/perf/thead_ddr_perf.c:444 [thead_ddr_perf]ddr_perf_probe =p "<%s>\012"
+drivers/perf/thead_ddr_perf.c:452 [thead_ddr_perf]ddr_perf_probe =p "%s: registers %#llx-%#llx\012"
+drivers/perf/thead_ddr_perf.c:461 [thead_ddr_perf]ddr_perf_probe =p "%s: base_addr %#llx\012"
+drivers/perf/thead_ddr_perf.c:480 [thead_ddr_perf]ddr_perf_probe =p "irq: <%d>\012"
+drivers/perf/thead_ddr_perf.c:300 [thead_ddr_perf]ddr_perf_event_add =p "There are not enough counters\012"

BIN
test/face_detect/004545.jpg


+ 84 - 0
test/face_detect/Makefile

@@ -0,0 +1,84 @@
+TARGET=face_detect
+
+LIB_DIR=$(shell cd ../../lib; pwd)
+SYSROOT_DIR=$(shell cd ../../../recipe-sysroot; pwd)
+USRLIB_DIR=$(SYSROOT_DIR)/usr/lib
+CSINN_INSTALL=${LIB_DIR}/install_nn2
+
+USRINC_DIR=${SYSROOT_DIR}/usr/include
+INCLUDE += -I./
+INCLUDE += -I../../lib/install_nn2/include
+INCLUDE += -I${USRINC_DIR}/plink
+INCLUDE += -I${USRINC_DIR}/vidmem
+
+OUT_DIR = ../output
+
+CC = $(CROSS)gcc
+RV_CC = $(CROSS)gcc
+
+BD_CFLAGS += -O0 -g3 ${INCLUDE}
+
+LDFLAGS += -L${CSINN_INSTALL}/lib -L${USRLIB_DIR}/plink -L${USRLIB_DIR}/vidmem -Wl,-unresolved-symbols=ignore-in-shared-libs -lstdc++ -lm
+
+BD_LDFLAGS += ${LDFLAGS} -lshl_pnna
+
+.PHONY: clean all
+
+all: $(OUT_DIR)/m1 $(OUT_DIR)/m1_det \
+     $(OUT_DIR)/dw_src_test $(OUT_DIR)/dw_fd_src_test $(OUT_DIR)/dw_2fd_src_test \
+     $(OUT_DIR)/npu_sink_test $(OUT_DIR)/npu_sink_src_test $(OUT_DIR)/npu_fd_sink_test $(OUT_DIR)/npu_fd_sink_src_test \
+     $(OUT_DIR)/g2d_sink_test \
+     $(OUT_DIR)/test
+
+$(OUT_DIR)/m1: main.c model.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/m1_det: main_det.c model.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/dw_src_test: dw_src_test.c model.c io.c process.c
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/dw_fd_src_test: dw_fd_src_test.c io.c process.c
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/dw_2fd_src_test: dw_2fd_src_test.c io.c process.c
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/npu_sink_test: npu_sink_test.c model.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/npu_sink_src_test: npu_sink_src_test.c model.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/npu_fd_sink_test: npu_sink_test.c model_fd.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/npu_fd_sink_src_test: npu_sink_src_test.c model_fd.c io.c process.c detect.cpp
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -lvmem -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/g2d_sink_test: g2d_sink_test.c model.c io.c
+	@mkdir -p $(@D)
+	$(RV_CC) $(BD_CFLAGS) -lpthread -lrt -lplink -o $@  $^ $(BD_LDFLAGS)
+
+$(OUT_DIR)/test: $(OUT_DIR)/npu_fd_sink_src_test $(OUT_DIR)/npu_sink_src_test
+	@rm -rf $(OUT_DIR)/$(TARGET)
+	@cp -rf $(OUT_DIR)/npu_fd_sink_src_test $(OUT_DIR)/$(TARGET)
+
+clean:
+	rm -rf \
+	$(OUT_DIR)/m1 $(OUT_DIR)/m1_det \
+	$(OUT_DIR)/dw_src_test $(OUT_DIR)/dw_fd_src_test \
+	$(OUT_DIR)/npu_sink_test $(OUT_DIR)/npu_sink_src_test $(OUT_DIR)/npu_fd_sink_test $(OUT_DIR)/npu_fd_sink_src_test \
+	$(OUT_DIR)/g2d_sink_test \
+	${OUT_DIR}/${TARGET} \
+	LASTRUN.SBA

+ 247 - 0
test/face_detect/detect.cpp

@@ -0,0 +1,247 @@
+#include <math.h>
+#include <stdio.h>
+#include <string>
+#include <vector>
+#include <algorithm>
+#include "detect.h"
+int num_class = 21;
+float nms_threshold = 0.45f;
+int nms_top_k = 100;
+int keep_top_k = 100;
+//float confidence_threshold = 0.51f;
+float confidence_threshold = 0.45f;
+//float confidence_threshold = 0.48f;
+
+
+static inline float intersection_area(const BBoxRect& a, const BBoxRect& b)
+{
+    if (a.xmin > b.xmax || a.xmax < b.xmin || a.ymin > b.ymax || a.ymax < b.ymin)
+    {
+        // no intersection
+        return 0.f;
+    }
+
+    float inter_width = std::min(a.xmax, b.xmax) - std::max(a.xmin, b.xmin);
+    float inter_height = std::min(a.ymax, b.ymax) - std::max(a.ymin, b.ymin);
+
+    return inter_width * inter_height;
+}
+
+template <typename T>
+static void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right)
+{
+    int i = left;
+    int j = right;
+    //must be instanced ... so this function must include vector
+    float p = scores[(left + right) / 2];
+
+    while (i <= j)
+    {
+        while (scores[i] > p)
+            i++;
+
+        while (scores[j] < p)
+            j--;
+
+        if (i <= j)
+        {
+            // swap
+            std::swap(datas[i], datas[j]);
+            std::swap(scores[i], scores[j]);
+
+            i++;
+            j--;
+        }
+    }
+
+    if (left < j)
+        qsort_descent_inplace(datas, scores, left, j);
+
+    if (i < right)
+        qsort_descent_inplace(datas, scores, i, right);
+}
+
+template <typename T>
+static void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores)
+{
+    if (datas.empty() || scores.empty())
+        return;
+
+    qsort_descent_inplace(datas, scores, 0, scores.size() - 1);
+}
+
+static void nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold)
+{
+    picked.clear();
+    const int n = bboxes.size();
+    std::vector<float> areas(n);
+    for (int i = 0; i < n; i++)
+    {
+        const BBoxRect& r = bboxes[i];
+
+        float width = r.xmax - r.xmin;
+        float height = r.ymax - r.ymin;
+
+        areas[i] = width * height;
+    }
+
+    for (int i = 0; i < n; i++)
+    {
+        const BBoxRect& a = bboxes[i];
+
+        int keep = 1;
+        for (int j = 0; j < (int)picked.size(); j++)
+        {
+            const BBoxRect& b = bboxes[picked[j]];
+
+            float interarea = intersection_area(a, b);
+            float unionarea = areas[i] + areas[picked[j]] - interarea;
+            if (interarea / unionarea > nms_threshold)
+                keep = 0;
+        }
+
+        if (keep)
+            picked.push_back(i);
+    }
+}
+int ssdforward(float *location,float * confidence,float * priorbox,BBox *bboxes,BBoxOut *out)
+{
+    const float* location_ptr = location;
+    const float* priorbox_ptr = priorbox;
+    const float* variance_ptr = priorbox + num_prior*4;
+
+    for (int i = 0; i < num_prior; i++)
+    {
+        const float* loc = location_ptr + i * 4;
+        const float* pb = priorbox_ptr + i * 4;
+        const float* var = variance_ptr;// + i * 4;
+
+        float* bbox =   (float*)&bboxes[i];// bboxes.row(i);
+
+        // CENTER_SIZE
+        float pb_w = pb[2] - pb[0];
+        float pb_h = pb[3] - pb[1];
+        float pb_cx = (pb[0] + pb[2]) * 0.5f;
+        float pb_cy = (pb[1] + pb[3]) * 0.5f;
+
+        float bbox_cx = var[0] * loc[0] * pb_w + pb_cx;
+        float bbox_cy = var[1] * loc[1] * pb_h + pb_cy;
+        float bbox_w = exp(var[2] * loc[2]) * pb_w;
+        float bbox_h = exp(var[3] * loc[3]) * pb_h;
+
+        bbox[0] = bbox_cx - bbox_w * 0.5f;
+        bbox[1] = bbox_cy - bbox_h * 0.5f;
+        bbox[2] = bbox_cx + bbox_w * 0.5f;
+        bbox[3] = bbox_cy + bbox_h * 0.5f;
+    }
+
+    // sort and nms for each class
+    std::vector< std::vector<BBoxRect> > all_class_bbox_rects;
+    std::vector< std::vector<float> > all_class_bbox_scores;
+    all_class_bbox_rects.resize(num_class);
+    all_class_bbox_scores.resize(num_class);
+
+    // start from 1 to ignore background class
+    for (int i = 1; i < num_class; i++)
+    {
+        // filter by confidence_threshold
+        std::vector<BBoxRect> class_bbox_rects;
+        std::vector<float> class_bbox_scores;
+
+        for (int j = 0; j < num_prior; j++)
+        {
+            float score = confidence[j * num_class + i];
+
+            if (score > confidence_threshold)
+            {
+                const float* bbox = (float*)&bboxes[j];
+                BBoxRect c = { bbox[0], bbox[1], bbox[2], bbox[3], i };
+                class_bbox_rects.push_back(c);
+                class_bbox_scores.push_back(score);
+            }
+        }
+
+        // sort inplace
+        qsort_descent_inplace(class_bbox_rects, class_bbox_scores);
+
+        // keep nms_top_k
+        if (nms_top_k < (int)class_bbox_rects.size())
+        {
+            class_bbox_rects.resize(nms_top_k);
+            class_bbox_scores.resize(nms_top_k);
+        }
+
+        // apply nms
+        std::vector<int> picked;
+        nms_sorted_bboxes(class_bbox_rects, picked, nms_threshold);
+
+        // select
+        for (int j = 0; j < (int)picked.size(); j++)
+        {
+            int z = picked[j];
+            all_class_bbox_rects[i].push_back(class_bbox_rects[z]);
+            all_class_bbox_scores[i].push_back(class_bbox_scores[z]);
+        }
+    }
+
+    // gather all class
+    std::vector<BBoxRect> bbox_rects;
+    std::vector<float> bbox_scores;
+
+    for (int i = 1; i < num_class; i++)
+    {
+        const std::vector<BBoxRect>& class_bbox_rects = all_class_bbox_rects[i];
+        const std::vector<float>& class_bbox_scores = all_class_bbox_scores[i];
+
+        bbox_rects.insert(bbox_rects.end(), class_bbox_rects.begin(), class_bbox_rects.end());
+        bbox_scores.insert(bbox_scores.end(), class_bbox_scores.begin(), class_bbox_scores.end());
+    }
+
+    // global sort inplace
+    qsort_descent_inplace(bbox_rects, bbox_scores);
+
+    // keep_top_k
+    if (keep_top_k < (int)bbox_rects.size())
+    {
+        bbox_rects.resize(keep_top_k);
+        bbox_scores.resize(keep_top_k);
+    }
+
+    int num_detected = bbox_rects.size();
+
+    if(num_detected >100) num_detected =100;
+    for (int i = 0; i < num_detected; i++)
+    {
+        const BBoxRect& r = bbox_rects[i];
+        float score = bbox_scores[i];
+        float* outptr = (float*)&out[i];
+        int *labelptr = (int *)outptr;
+        labelptr[0] = r.label;
+        outptr[1] = score;
+        outptr[2] = r.xmin;
+        outptr[3] = r.ymin;
+        outptr[4] = r.xmax;
+        outptr[5] = r.ymax;
+    }
+
+    return num_detected;
+}
+
+int readbintomem(float *dst,char *path)
+{
+    FILE *pFile = fopen (path, "rb" );
+    if (pFile==NULL)
+    {
+        fputs ("File error",stderr);
+        exit (1);
+    }
+    fseek (pFile , 0 , SEEK_END);
+    int fsize = ftell(pFile);
+    rewind (pFile);
+
+    //buffer = (char*) malloc (sizeof(char)*lSize);
+    int result = fread (dst,1,fsize,pFile);
+    fclose(pFile);
+
+    return result;
+}

+ 49 - 0
test/face_detect/detect.h

@@ -0,0 +1,49 @@
+#ifndef DETECT_H
+#define DETECT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+//#include "cnndecoder.h"
+typedef struct BBoxOut
+{
+    int label;
+    float score;
+    float xmin;
+    float ymin;
+    float xmax;
+    float ymax;
+}BBoxOut;
+#define PIX3218 0
+#define PIX3030 1
+#if PIX3218
+#define  num_prior 1224
+#elif PIX3030
+#define  num_prior 1917
+#endif
+typedef struct BBox
+{
+    float xmin;
+    float ymin;
+    float xmax;
+    float ymax;
+}BBox;
+
+typedef struct BBoxRect
+{
+    float xmin;
+    float ymin;
+    float xmax;
+    float ymax;
+    int label;
+}BBoxRect;
+
+
+int ssdforward(float *location,float * confidence,float * priorbox,BBox *bboxes,BBoxOut *out);
+int readbintomem(float *dst,char *path);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

+ 506 - 0
test/face_detect/dw_2fd_src_test.c

@@ -0,0 +1,506 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.x" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <semaphore.h>
+#include <sys/stat.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process_linker_types.h"
+#include "process_linker.h"
+#include "process.h"
+#include "video_mem.h"
+
+#define MODULE_NAME "dw_test"
+
+#define _DMABUF_FD_SRC_
+
+#define FILE_LENGTH         1028
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+
+#define IMG_WIDTH 300
+#define IMG_HEIGHT 300
+#define STRIDE_WIDTH 304
+#define STRIDE_HEIGHT 304
+#define RESIZE_WIDTH        304
+#define RESIZE_HEIGHT       304
+#define CROP_WIDTH          304
+#define CROP_HEGHT          304
+#define R_MEAN              127.5
+#define G_MEAN              127.5
+#define B_MEAN              127.5
+#define SCALE               (1.0/127.5)
+
+int input_size[] = {1 * 3 * STRIDE_HEIGHT * STRIDE_WIDTH, };
+
+
+#define BASE_MEMORY 0xD0000000
+//#define BASE_MEMORY 0xc0c00000
+
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+typedef struct _ServerParams
+{
+    char *plinkname;
+    char *inputfile;
+    PlinkColorFormat format;
+    int width;
+    int height;
+    int stride;
+    int frames;
+} ServerParams;
+
+typedef struct _PlinkChannel
+{
+    PlinkChannelID id;
+    PlinkHandle plink;
+    PlinkPacket pkt;
+    int sendid;
+    int backid;
+    int exit;
+    int available_bufs;
+} PlinkChannel;
+
+typedef struct _PictureBuffer
+{
+  unsigned int bus_address;
+  void *virtual_address;
+  unsigned int size;
+  int fd;
+} PictureBuffer;
+
+void printUsage(char *name)
+{
+    printf("usage: %s [options]\n"
+           "\n"
+           "  Available options:\n"
+           "    -l      plink file name (default: /tmp/plink.test)\n"
+           "    -i      input YUV file name (mandatory)\n"
+           "    -f      input color format (default: 2)\n"
+           "                2 - I420\n"
+           "                3 - NV12\n"
+           "    -w      video width (mandatory)\n"
+           "    -h      video height (mandatory)\n"
+           "    -s      video buffer stride (default: video width)\n"
+           "    -n      number of frames to send (default: 10)\n"
+           "\n", name);
+}
+
+/*
+ * Preprocess function
+ */
+void preprocess(struct image_data *img, int is_rgb, int to_bgr)
+{
+    uint32_t new_height, new_width;
+    uint32_t min_side;
+    if (is_rgb) {
+        im2rgb(img);
+    }
+    if (RESIZE_WIDTH == 0) {
+        min_side = MIN(img->shape[0], img->shape[1]);
+        new_height = (uint32_t) (img->shape[0] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        new_width = (uint32_t) (img->shape[1] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        imresize(img, new_height, new_width);
+    } else {
+        imresize(img, RESIZE_HEIGHT, RESIZE_WIDTH);
+    }
+    data_crop(img, CROP_HEGHT, CROP_WIDTH);
+    sub_mean(img, R_MEAN, G_MEAN, B_MEAN);
+    data_scale(img, SCALE);
+    if(to_bgr) {
+        imrgb2bgr(img);
+    }
+    imhwc2chw(img);
+}
+
+void parseParams(int argc, char **argv, ServerParams *params)
+{
+    int i = 1;
+    memset(params, 0, sizeof(*params));
+    params->plinkname = "/tmp/plink_npu_rgb.test";
+    params->width = IMG_WIDTH;
+    params->height = IMG_HEIGHT;
+    params->stride = STRIDE_WIDTH;
+    params->frames = 3;
+    params->format = PLINK_COLOR_Format24BitBGR888Planar;
+    while (i < argc)
+    {
+        if (argv[i][0] != '-' || strlen(argv[i]) < 2)
+        {
+            i++;
+            continue;
+        }
+
+        if (argv[i][1] == 'l')
+        {
+            if (++i < argc)
+            {
+                params->plinkname = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'i')
+        {
+            if (++i < argc)
+            {
+                params->inputfile = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'f')
+        {
+            if (++i < argc)
+            {
+                params->format = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'w')
+        {
+            if (++i < argc)
+            {
+                params->width = atoi(argv[i++]);
+                if (params->stride == 0)
+                    params->stride = params->width;
+            }
+        }
+        else if (argv[i][1] == 'h')
+        {
+            if (++i < argc)
+            {
+                params->height = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 's')
+        {
+            if (++i < argc)
+            {
+                params->stride = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'n')
+        {
+            if (++i < argc)
+            {
+                params->frames = atoi(argv[i++]);
+            }
+        }
+    };
+}
+
+int checkParams(ServerParams *params)
+{
+    if (params->plinkname == NULL ||
+        params->inputfile == NULL ||
+        params->format == PLINK_COLOR_FormatUnused ||
+        params->width == 0 ||
+        params->height == 0 ||
+        params->stride == 0)
+        return -1;
+    return 0;
+}
+
+int getBufferSize(ServerParams *params)
+{
+    int size = 0;
+    switch (params->format)
+    {
+        case PLINK_COLOR_FormatYUV420Planar:
+        case PLINK_COLOR_FormatYUV420SemiPlanar:
+            size = params->stride * params->height * 3 / 2;
+            break;
+        case PLINK_COLOR_Format24BitRGB888Planar:
+        case PLINK_COLOR_Format24BitBGR888Planar:
+            size = params->stride * params->height * 3;
+            break;
+        default:
+            size = 0;
+    }
+    return size;
+}
+
+void constructRGBInfo(PlinkRGBInfo *info, ServerParams *params, unsigned int bus_address, int id)
+{
+    //int size = params->width * params->stride;
+    int size_r = params->stride * params->stride;
+
+    info->header.type = PLINK_TYPE_2D_RGB;
+    info->header.size = DATA_SIZE(*info);
+    info->header.id = id + 1;
+
+    info->format = params->format;
+    info->bus_address_b = bus_address;
+    info->bus_address_g = info->bus_address_b + size_r;
+    info->bus_address_r = info->bus_address_g + size_r;
+    info->img_width = params->width;
+    info->img_height = params->height;
+    info->stride_r = params->stride;
+    info->stride_g = params->stride;
+    info->stride_b = params->stride;
+    info->offset_r = 0;
+    info->offset_g = 0;
+    info->offset_b = 0;
+}
+
+int getBufferCount(PlinkPacket *pkt)
+{
+    int ret = 0;
+    for (int i = 0; i < pkt->num; i++)
+    {
+        PlinkDescHdr *hdr = (PlinkDescHdr *)(pkt->list[i]);
+        if (hdr->type == PLINK_TYPE_MESSAGE)
+        {
+            int *data = (int *)(pkt->list[i] + DATA_HEADER_SIZE);
+            if (*data == PLINK_EXIT_CODE)
+            {
+                ret |= 0x80000000; // set bit 31 to 1 to indicate 'exit'
+            }
+            else if (*data >= 0)
+                ret++;
+        }
+    }
+
+    return ret;
+}
+
+void retreiveSentBuffers(PlinkHandle plink, PlinkChannel *channel)
+{
+    PlinkStatus sts = PLINK_STATUS_OK;
+    while (channel->available_bufs < NUM_OF_BUFFERS)
+    {
+        do
+        {
+            sts = PLINK_recv(plink, channel->id, &channel->pkt);
+            int count = getBufferCount(&channel->pkt);
+            if (count > 0)
+            {
+                channel->available_bufs += count;
+            }
+        } while (sts == PLINK_STATUS_MORE_DATA);
+    }
+}
+#ifdef _DMABUF_FD_SRC_
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, void *vmem)
+{
+    unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+    VmemParams params;
+    params.size = buffer_size;
+    params.flags = VMEM_FLAG_CONTIGUOUS | VMEM_FLAG_4GB_ADDR;
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        VMEM_allocate(vmem, &params);
+        VMEM_mmap(vmem, &params);
+        VMEM_export(vmem, &params);
+        printf("[SERVER] mmap %p from %x with size %d, dma-buf fd %d\n", 
+                params.vir_address, params.phy_address, params.size, params.fd);
+        picbuffers[i].virtual_address = params.vir_address;
+        picbuffers[i].bus_address = params.phy_address;
+        picbuffers[i].size = buffer_size;
+        picbuffers[i].fd = params.fd;
+    }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], void *vmem)
+{
+    VmemParams params;
+    memset(&params, 0, sizeof(params));
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        close(picbuffers[i].fd);
+        params.size = picbuffers[i].size;
+        params.vir_address = picbuffers[i].virtual_address;
+        params.phy_address = picbuffers[i].bus_address;
+        VMEM_free(vmem, &params);
+    }
+}
+#else
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  unsigned int bus_address = BASE_MEMORY;
+  unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    picbuffers[i].virtual_address = mmap(0, buffer_size, PROT_READ | PROT_WRITE,
+                                        MAP_SHARED, fd_mem,
+                                        bus_address);
+    printf("mmap %p from %x with size %d\n", picbuffers[i].virtual_address, bus_address, size);
+    picbuffers[i].bus_address = bus_address;
+    picbuffers[i].size = buffer_size;
+    bus_address += buffer_size;
+  }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    munmap(picbuffers[i].virtual_address, picbuffers[i].size);
+  }
+}
+#endif
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    PlinkStatus sts = PLINK_STATUS_OK;
+    ServerParams params;
+    PlinkChannel channel[2];
+    PlinkHandle plink = NULL;
+    PlinkRGBInfo pic;
+    PlinkMsg msg;
+
+    int input_num = 1;
+    int output_num = 1;
+    int pictures = 1;
+    int i;
+    int index = 0;
+
+    if (argc < (1 + input_num)) {
+        printf("Please set valide args: ./dw_src_test image.rgb\n");
+        return -1;
+    } else {
+        data_path = argv + 1;
+		printf("in_file1:<%s>\n", data_path[0]);
+		if (argc == 1 + 2*input_num) {
+			pictures++;
+		    printf("in_file2:<%s>\n", data_path[1]);
+		}
+    }
+
+    parseParams(argc, argv, &params);
+    if (checkParams(&params) != 0)
+    {
+        printUsage(argv[0]);
+        //return 0;
+    }
+
+    FILE *fp;
+#if 0
+    fp = fopen(params.inputfile, "rb");
+    if (fp == NULL) {
+        printf("failed to open %s\n", params.inputfile);
+        errExit("fopen");
+    }
+#endif
+
+#ifdef _DMABUF_FD_SRC_
+    void *vmem = NULL;
+    if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+        errExit("Failed to create VMEM.");
+#else
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem < 0) {
+        printf("%s: failed to open /dev/mem", MODULE_NAME);
+        return -1;
+    }
+#endif
+
+    int in_size = input_size[0];
+    char filename[FILE_LENGTH] = {0};
+    char filename_prefix[FILE_LENGTH] = {0};
+    uint64_t start_time, end_time;
+    int frames = params.frames;
+    PictureBuffer picbuffers[NUM_OF_BUFFERS];
+#ifdef _DMABUF_FD_SRC_
+    AllocateBuffers(picbuffers, in_size, vmem);
+#else
+    AllocateBuffers(picbuffers, in_size, fd_mem);
+#endif
+    sts = PLINK_create(&plink, params.plinkname, PLINK_MODE_SERVER);
+
+    memset(&channel[0], 0, sizeof(channel[0]));
+    channel[0].available_bufs = NUM_OF_BUFFERS;
+    sts = PLINK_connect(plink, &channel[0].id);
+
+    int frmcnt = 0;
+    do {
+        int sendid = channel[0].sendid; 
+
+	    fill_buffer_from_file(data_path[sendid%pictures], picbuffers[sendid].virtual_address);
+	    //snprintf(filename, FILE_LENGTH, "%s_src_data%u.txt", filename_prefix, i);
+            //save_uint8_to_file(filename, (uint8_t*)picbuffers[index].virtual_address, in_size);
+      
+            constructRGBInfo(&pic, &params, picbuffers[sendid].bus_address, sendid);
+
+	    printf("[SERVER] Processed frame %d 0x%010llx: %dx%d, stride = %d\n",
+                sendid, pic.bus_address_b,
+                pic.img_width, pic.img_height,
+                pic.stride_b);
+
+            channel[0].pkt.list[0] = &pic;
+            channel[0].pkt.num = 1;
+#ifdef _DMABUF_FD_SRC_
+            channel[0].pkt.fd = picbuffers[sendid].fd;
+#else
+            channel[0].pkt.fd = PLINK_INVALID_FD; // physical address
+#endif
+	    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+            channel[0].sendid = (channel[0].sendid + 1) % NUM_OF_BUFFERS;
+            channel[0].available_bufs -= 1;
+	    // Notify npu one picture is ready for inference
+            int timeout = 0;
+            if (channel[0].available_bufs == 0)
+                timeout = 60000; // wait up to 60 seconds if buffers are used up
+
+            if (PLINK_wait(plink, channel[0].id, timeout) == PLINK_STATUS_OK)
+            {
+                do
+                {
+                    sts = PLINK_recv(plink, channel[0].id, &channel[0].pkt);
+                    int count = getBufferCount(&channel[0].pkt);
+                    if (count < 0)
+                        channel[0].exit = 1;
+                    channel[0].available_bufs += count;
+                } while (sts == PLINK_STATUS_MORE_DATA);
+            }
+
+        index = (index + 1) % NUM_OF_BUFFERS;
+    } while (channel[0].exit == 0 && frmcnt < frames);
+
+    retreiveSentBuffers(plink, &channel[0]);
+
+cleanup:
+    msg.header.type = PLINK_TYPE_MESSAGE;
+    msg.header.size = DATA_SIZE(PlinkMsg);
+    msg.msg = PLINK_EXIT_CODE;
+    channel[0].pkt.list[0] = &msg;
+    channel[0].pkt.num = 1;
+    channel[0].pkt.fd = PLINK_INVALID_FD;
+    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+    sleep(1); // Sleep one second to make sure client is ready for exit
+    PLINK_close(plink, PLINK_CLOSE_ALL);
+#ifdef _DMABUF_FD_SRC_
+    FreeBuffers(picbuffers, vmem);
+#else    
+    FreeBuffers(picbuffers, in_size, fd_mem);
+#endif
+    if (fp != NULL)
+        fclose(fp);
+
+    return 0;
+}
+

+ 500 - 0
test/face_detect/dw_fd_src_test.c

@@ -0,0 +1,500 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "1.13.x" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <semaphore.h>
+#include <sys/stat.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process_linker_types.h"
+#include "process_linker.h"
+#include "process.h"
+#include "video_mem.h"
+
+#define MODULE_NAME "dw_test"
+
+#define _DMABUF_FD_SRC_
+
+#define FILE_LENGTH         1028
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+
+#define IMG_WIDTH 300
+#define IMG_HEIGHT 300
+#define STRIDE_WIDTH 304
+#define STRIDE_HEIGHT 304
+#define RESIZE_WIDTH        304
+#define RESIZE_HEIGHT       304
+#define CROP_WIDTH          304
+#define CROP_HEGHT          304
+#define R_MEAN              127.5
+#define G_MEAN              127.5
+#define B_MEAN              127.5
+#define SCALE               (1.0/127.5)
+
+int input_size[] = {1 * 3 * STRIDE_HEIGHT * STRIDE_WIDTH, };
+
+
+#define BASE_MEMORY 0xD0000000
+//#define BASE_MEMORY 0xc0c00000
+
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+typedef struct _ServerParams
+{
+    char *plinkname;
+    char *inputfile;
+    PlinkColorFormat format;
+    int width;
+    int height;
+    int stride;
+    int frames;
+} ServerParams;
+
+typedef struct _PlinkChannel
+{
+    PlinkChannelID id;
+    PlinkHandle plink;
+    PlinkPacket pkt;
+    int sendid;
+    int backid;
+    int exit;
+    int available_bufs;
+} PlinkChannel;
+
+typedef struct _PictureBuffer
+{
+  unsigned int bus_address;
+  void *virtual_address;
+  unsigned int size;
+  int fd;
+} PictureBuffer;
+
+void printUsage(char *name)
+{
+    printf("usage: %s [options]\n"
+           "\n"
+           "  Available options:\n"
+           "    -l      plink file name (default: /tmp/plink.test)\n"
+           "    -i      input YUV file name (mandatory)\n"
+           "    -f      input color format (default: 2)\n"
+           "                2 - I420\n"
+           "                3 - NV12\n"
+           "    -w      video width (mandatory)\n"
+           "    -h      video height (mandatory)\n"
+           "    -s      video buffer stride (default: video width)\n"
+           "    -n      number of frames to send (default: 10)\n"
+           "\n", name);
+}
+
+/*
+ * Preprocess function
+ */
+void preprocess(struct image_data *img, int is_rgb, int to_bgr)
+{
+    uint32_t new_height, new_width;
+    uint32_t min_side;
+    if (is_rgb) {
+        im2rgb(img);
+    }
+    if (RESIZE_WIDTH == 0) {
+        min_side = MIN(img->shape[0], img->shape[1]);
+        new_height = (uint32_t) (img->shape[0] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        new_width = (uint32_t) (img->shape[1] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        imresize(img, new_height, new_width);
+    } else {
+        imresize(img, RESIZE_HEIGHT, RESIZE_WIDTH);
+    }
+    data_crop(img, CROP_HEGHT, CROP_WIDTH);
+    sub_mean(img, R_MEAN, G_MEAN, B_MEAN);
+    data_scale(img, SCALE);
+    if(to_bgr) {
+        imrgb2bgr(img);
+    }
+    imhwc2chw(img);
+}
+
+void parseParams(int argc, char **argv, ServerParams *params)
+{
+    int i = 1;
+    memset(params, 0, sizeof(*params));
+    params->plinkname = "/tmp/plink_npu_rgb.test";
+    params->width = IMG_WIDTH;
+    params->height = IMG_HEIGHT;
+    params->stride = STRIDE_WIDTH;
+    params->frames = 3;
+    params->format = PLINK_COLOR_Format24BitBGR888Planar;
+    while (i < argc)
+    {
+        if (argv[i][0] != '-' || strlen(argv[i]) < 2)
+        {
+            i++;
+            continue;
+        }
+
+        if (argv[i][1] == 'l')
+        {
+            if (++i < argc)
+            {
+                params->plinkname = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'i')
+        {
+            if (++i < argc)
+            {
+                params->inputfile = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'f')
+        {
+            if (++i < argc)
+            {
+                params->format = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'w')
+        {
+            if (++i < argc)
+            {
+                params->width = atoi(argv[i++]);
+                if (params->stride == 0)
+                    params->stride = params->width;
+            }
+        }
+        else if (argv[i][1] == 'h')
+        {
+            if (++i < argc)
+            {
+                params->height = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 's')
+        {
+            if (++i < argc)
+            {
+                params->stride = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'n')
+        {
+            if (++i < argc)
+            {
+                params->frames = atoi(argv[i++]);
+            }
+        }
+    };
+}
+
+int checkParams(ServerParams *params)
+{
+    if (params->plinkname == NULL ||
+        params->inputfile == NULL ||
+        params->format == PLINK_COLOR_FormatUnused ||
+        params->width == 0 ||
+        params->height == 0 ||
+        params->stride == 0)
+        return -1;
+    return 0;
+}
+
+int getBufferSize(ServerParams *params)
+{
+    int size = 0;
+    switch (params->format)
+    {
+        case PLINK_COLOR_FormatYUV420Planar:
+        case PLINK_COLOR_FormatYUV420SemiPlanar:
+            size = params->stride * params->height * 3 / 2;
+            break;
+        case PLINK_COLOR_Format24BitRGB888Planar:
+        case PLINK_COLOR_Format24BitBGR888Planar:
+            size = params->stride * params->height * 3;
+            break;
+        default:
+            size = 0;
+    }
+    return size;
+}
+
+void constructRGBInfo(PlinkRGBInfo *info, ServerParams *params, unsigned int bus_address, int id)
+{
+    //int size = params->width * params->stride;
+    int size_r = params->stride * params->stride;
+
+    info->header.type = PLINK_TYPE_2D_RGB;
+    info->header.size = DATA_SIZE(*info);
+    info->header.id = id + 1;
+
+    info->format = params->format;
+    info->bus_address_b = bus_address;
+    info->bus_address_g = info->bus_address_b + size_r;
+    info->bus_address_r = info->bus_address_g + size_r;
+    info->img_width = params->width;
+    info->img_height = params->height;
+    info->stride_r = params->stride;
+    info->stride_g = params->stride;
+    info->stride_b = params->stride;
+    info->offset_r = 0;
+    info->offset_g = 0;
+    info->offset_b = 0;
+}
+
+int getBufferCount(PlinkPacket *pkt)
+{
+    int ret = 0;
+    for (int i = 0; i < pkt->num; i++)
+    {
+        PlinkDescHdr *hdr = (PlinkDescHdr *)(pkt->list[i]);
+        if (hdr->type == PLINK_TYPE_MESSAGE)
+        {
+            int *data = (int *)(pkt->list[i] + DATA_HEADER_SIZE);
+            if (*data == PLINK_EXIT_CODE)
+            {
+                ret |= 0x80000000; // set bit 31 to 1 to indicate 'exit'
+            }
+            else if (*data >= 0)
+                ret++;
+        }
+    }
+
+    return ret;
+}
+
+void retreiveSentBuffers(PlinkHandle plink, PlinkChannel *channel)
+{
+    PlinkStatus sts = PLINK_STATUS_OK;
+    while (channel->available_bufs < NUM_OF_BUFFERS)
+    {
+        do
+        {
+            sts = PLINK_recv(plink, channel->id, &channel->pkt);
+            int count = getBufferCount(&channel->pkt);
+            if (count > 0)
+            {
+                channel->available_bufs += count;
+            }
+        } while (sts == PLINK_STATUS_MORE_DATA);
+    }
+}
+#ifdef _DMABUF_FD_SRC_
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, void *vmem)
+{
+    unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+    VmemParams params;
+    params.size = buffer_size;
+    params.flags = VMEM_FLAG_CONTIGUOUS | VMEM_FLAG_4GB_ADDR;
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        VMEM_allocate(vmem, &params);
+        VMEM_mmap(vmem, &params);
+        VMEM_export(vmem, &params);
+        printf("[SERVER] mmap %p from %x with size %d, dma-buf fd %d\n", 
+                params.vir_address, params.phy_address, params.size, params.fd);
+        picbuffers[i].virtual_address = params.vir_address;
+        picbuffers[i].bus_address = params.phy_address;
+        picbuffers[i].size = buffer_size;
+        picbuffers[i].fd = params.fd;
+    }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], void *vmem)
+{
+    VmemParams params;
+    memset(&params, 0, sizeof(params));
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        close(picbuffers[i].fd);
+        params.size = picbuffers[i].size;
+        params.vir_address = picbuffers[i].virtual_address;
+        params.phy_address = picbuffers[i].bus_address;
+        VMEM_free(vmem, &params);
+    }
+}
+#else
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  unsigned int bus_address = BASE_MEMORY;
+  unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    picbuffers[i].virtual_address = mmap(0, buffer_size, PROT_READ | PROT_WRITE,
+                                        MAP_SHARED, fd_mem,
+                                        bus_address);
+    printf("mmap %p from %x with size %d\n", picbuffers[i].virtual_address, bus_address, size);
+    picbuffers[i].bus_address = bus_address;
+    picbuffers[i].size = buffer_size;
+    bus_address += buffer_size;
+  }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    munmap(picbuffers[i].virtual_address, picbuffers[i].size);
+  }
+}
+#endif
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    PlinkStatus sts = PLINK_STATUS_OK;
+    ServerParams params;
+    PlinkChannel channel[2];
+    PlinkHandle plink = NULL;
+    PlinkRGBInfo pic;
+    PlinkMsg msg;
+
+    int input_num = 1;
+    int output_num = 1;
+    int i;
+    int index = 0;
+
+    if (argc < (1 + input_num)) {
+        printf("Please set valide args: ./dw_src_test image.rgb\n");
+        return -1;
+    } else {
+            data_path = argv + 1;
+    }
+
+    parseParams(argc, argv, &params);
+    if (checkParams(&params) != 0)
+    {
+        printUsage(argv[0]);
+        //return 0;
+    }
+
+    FILE *fp;
+#if 0
+    fp = fopen(params.inputfile, "rb");
+    if (fp == NULL) {
+        printf("failed to open %s\n", params.inputfile);
+        errExit("fopen");
+    }
+#endif
+
+#ifdef _DMABUF_FD_SRC_
+    void *vmem = NULL;
+    if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+        errExit("Failed to create VMEM.");
+#else
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem < 0) {
+        printf("%s: failed to open /dev/mem", MODULE_NAME);
+        return -1;
+    }
+#endif
+
+    int in_size = input_size[0];
+    char filename[FILE_LENGTH] = {0};
+    char filename_prefix[FILE_LENGTH] = {0};
+    uint64_t start_time, end_time;
+    int frames = params.frames;
+    PictureBuffer picbuffers[NUM_OF_BUFFERS];
+#ifdef _DMABUF_FD_SRC_
+    AllocateBuffers(picbuffers, in_size, vmem);
+#else
+    AllocateBuffers(picbuffers, in_size, fd_mem);
+#endif
+    sts = PLINK_create(&plink, params.plinkname, PLINK_MODE_SERVER);
+
+    memset(&channel[0], 0, sizeof(channel[0]));
+    channel[0].available_bufs = NUM_OF_BUFFERS;
+    sts = PLINK_connect(plink, &channel[0].id);
+
+    int frmcnt = 0;
+    do {
+        int sendid = channel[0].sendid; 
+
+	    fill_buffer_from_file(data_path[0], picbuffers[sendid].virtual_address);
+	    //snprintf(filename, FILE_LENGTH, "%s_src_data%u.txt", filename_prefix, i);
+            //save_uint8_to_file(filename, (uint8_t*)picbuffers[index].virtual_address, in_size);
+      
+            constructRGBInfo(&pic, &params, picbuffers[sendid].bus_address, sendid);
+
+	    printf("[SERVER] Processed frame %d 0x%010llx: %dx%d, stride = %d\n",
+                sendid, pic.bus_address_b,
+                pic.img_width, pic.img_height,
+                pic.stride_b);
+
+            channel[0].pkt.list[0] = &pic;
+            channel[0].pkt.num = 1;
+#ifdef _DMABUF_FD_SRC_
+            channel[0].pkt.fd = picbuffers[sendid].fd;
+#else
+            channel[0].pkt.fd = PLINK_INVALID_FD; // physical address
+#endif
+	    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+            channel[0].sendid = (channel[0].sendid + 1) % NUM_OF_BUFFERS;
+            channel[0].available_bufs -= 1;
+	    // Notify npu one picture is ready for inference
+            int timeout = 0;
+            if (channel[0].available_bufs == 0)
+                timeout = 60000; // wait up to 60 seconds if buffers are used up
+
+            if (PLINK_wait(plink, channel[0].id, timeout) == PLINK_STATUS_OK)
+            {
+                do
+                {
+                    sts = PLINK_recv(plink, channel[0].id, &channel[0].pkt);
+                    int count = getBufferCount(&channel[0].pkt);
+                    if (count < 0)
+                        channel[0].exit = 1;
+                    channel[0].available_bufs += count;
+                } while (sts == PLINK_STATUS_MORE_DATA);
+            }
+
+        index = (index + 1) % NUM_OF_BUFFERS;
+    } while (channel[0].exit == 0 && frmcnt < frames);
+
+    retreiveSentBuffers(plink, &channel[0]);
+
+cleanup:
+    msg.header.type = PLINK_TYPE_MESSAGE;
+    msg.header.size = DATA_SIZE(PlinkMsg);
+    msg.msg = PLINK_EXIT_CODE;
+    channel[0].pkt.list[0] = &msg;
+    channel[0].pkt.num = 1;
+    channel[0].pkt.fd = PLINK_INVALID_FD;
+    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+    sleep(1); // Sleep one second to make sure client is ready for exit
+    PLINK_close(plink, PLINK_CLOSE_ALL);
+#ifdef _DMABUF_FD_SRC_
+    FreeBuffers(picbuffers, vmem);
+#else    
+    FreeBuffers(picbuffers, in_size, fd_mem);
+#endif
+    if (fp != NULL)
+        fclose(fp);
+
+    return 0;
+}
+

+ 500 - 0
test/face_detect/dw_src_test.c

@@ -0,0 +1,500 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <semaphore.h>
+#include <sys/stat.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process_linker_types.h"
+#include "process_linker.h"
+#include "process.h"
+#include "video_mem.h"
+
+#define MODULE_NAME "dw_test"
+
+//#define _DMABUF_FD_SRC_
+
+#define FILE_LENGTH         1028
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+
+#define IMG_WIDTH 300
+#define IMG_HEIGHT 300
+#define STRIDE_WIDTH 304
+#define STRIDE_HEIGHT 304
+#define RESIZE_WIDTH        304
+#define RESIZE_HEIGHT       304
+#define CROP_WIDTH          304
+#define CROP_HEGHT          304
+#define R_MEAN              127.5
+#define G_MEAN              127.5
+#define B_MEAN              127.5
+#define SCALE               (1.0/127.5)
+
+int input_size[] = {1 * 3 * STRIDE_HEIGHT * STRIDE_WIDTH, };
+
+
+#define BASE_MEMORY 0xD0000000
+//#define BASE_MEMORY 0xc0c00000
+
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+typedef struct _ServerParams
+{
+    char *plinkname;
+    char *inputfile;
+    PlinkColorFormat format;
+    int width;
+    int height;
+    int stride;
+    int frames;
+} ServerParams;
+
+typedef struct _PlinkChannel
+{
+    PlinkChannelID id;
+    PlinkHandle plink;
+    PlinkPacket pkt;
+    int sendid;
+    int backid;
+    int exit;
+    int available_bufs;
+} PlinkChannel;
+
+typedef struct _PictureBuffer
+{
+  unsigned int bus_address;
+  void *virtual_address;
+  unsigned int size;
+  int fd;
+} PictureBuffer;
+
+void printUsage(char *name)
+{
+    printf("usage: %s [options]\n"
+           "\n"
+           "  Available options:\n"
+           "    -l      plink file name (default: /tmp/plink.test)\n"
+           "    -i      input YUV file name (mandatory)\n"
+           "    -f      input color format (default: 2)\n"
+           "                2 - I420\n"
+           "                3 - NV12\n"
+           "    -w      video width (mandatory)\n"
+           "    -h      video height (mandatory)\n"
+           "    -s      video buffer stride (default: video width)\n"
+           "    -n      number of frames to send (default: 10)\n"
+           "\n", name);
+}
+
+/*
+ * Preprocess function
+ */
+void preprocess(struct image_data *img, int is_rgb, int to_bgr)
+{
+    uint32_t new_height, new_width;
+    uint32_t min_side;
+    if (is_rgb) {
+        im2rgb(img);
+    }
+    if (RESIZE_WIDTH == 0) {
+        min_side = MIN(img->shape[0], img->shape[1]);
+        new_height = (uint32_t) (img->shape[0] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        new_width = (uint32_t) (img->shape[1] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        imresize(img, new_height, new_width);
+    } else {
+        imresize(img, RESIZE_HEIGHT, RESIZE_WIDTH);
+    }
+    data_crop(img, CROP_HEGHT, CROP_WIDTH);
+    sub_mean(img, R_MEAN, G_MEAN, B_MEAN);
+    data_scale(img, SCALE);
+    if(to_bgr) {
+        imrgb2bgr(img);
+    }
+    imhwc2chw(img);
+}
+
+void parseParams(int argc, char **argv, ServerParams *params)
+{
+    int i = 1;
+    memset(params, 0, sizeof(*params));
+    params->plinkname = "/tmp/plink_npu_rgb.test";
+    params->width = IMG_WIDTH;
+    params->height = IMG_HEIGHT;
+    params->stride = STRIDE_WIDTH;
+    params->frames = 3;
+    params->format = PLINK_COLOR_Format24BitBGR888Planar;
+    while (i < argc)
+    {
+        if (argv[i][0] != '-' || strlen(argv[i]) < 2)
+        {
+            i++;
+            continue;
+        }
+
+        if (argv[i][1] == 'l')
+        {
+            if (++i < argc)
+            {
+                params->plinkname = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'i')
+        {
+            if (++i < argc)
+            {
+                params->inputfile = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'f')
+        {
+            if (++i < argc)
+            {
+                params->format = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'w')
+        {
+            if (++i < argc)
+            {
+                params->width = atoi(argv[i++]);
+                if (params->stride == 0)
+                    params->stride = params->width;
+            }
+        }
+        else if (argv[i][1] == 'h')
+        {
+            if (++i < argc)
+            {
+                params->height = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 's')
+        {
+            if (++i < argc)
+            {
+                params->stride = atoi(argv[i++]);
+            }
+        }
+        else if (argv[i][1] == 'n')
+        {
+            if (++i < argc)
+            {
+                params->frames = atoi(argv[i++]);
+            }
+        }
+    };
+}
+
+int checkParams(ServerParams *params)
+{
+    if (params->plinkname == NULL ||
+        params->inputfile == NULL ||
+        params->format == PLINK_COLOR_FormatUnused ||
+        params->width == 0 ||
+        params->height == 0 ||
+        params->stride == 0)
+        return -1;
+    return 0;
+}
+
+int getBufferSize(ServerParams *params)
+{
+    int size = 0;
+    switch (params->format)
+    {
+        case PLINK_COLOR_FormatYUV420Planar:
+        case PLINK_COLOR_FormatYUV420SemiPlanar:
+            size = params->stride * params->height * 3 / 2;
+            break;
+        case PLINK_COLOR_Format24BitRGB888Planar:
+        case PLINK_COLOR_Format24BitBGR888Planar:
+            size = params->stride * params->height * 3;
+            break;
+        default:
+            size = 0;
+    }
+    return size;
+}
+
+void constructRGBInfo(PlinkRGBInfo *info, ServerParams *params, unsigned int bus_address, int id)
+{
+    //int size = params->width * params->stride;
+    int size_r = params->stride * params->stride;
+
+    info->header.type = PLINK_TYPE_2D_RGB;
+    info->header.size = DATA_SIZE(*info);
+    info->header.id = id + 1;
+
+    info->format = params->format;
+    info->bus_address_b = bus_address;
+    info->bus_address_g = info->bus_address_b + size_r;
+    info->bus_address_r = info->bus_address_g + size_r;
+    info->img_width = params->width;
+    info->img_height = params->height;
+    info->stride_r = params->stride;
+    info->stride_g = params->stride;
+    info->stride_b = params->stride;
+    info->offset_r = 0;
+    info->offset_g = 0;
+    info->offset_b = 0;
+}
+
+int getBufferCount(PlinkPacket *pkt)
+{
+    int ret = 0;
+    for (int i = 0; i < pkt->num; i++)
+    {
+        PlinkDescHdr *hdr = (PlinkDescHdr *)(pkt->list[i]);
+        if (hdr->type == PLINK_TYPE_MESSAGE)
+        {
+            int *data = (int *)(pkt->list[i] + DATA_HEADER_SIZE);
+            if (*data == PLINK_EXIT_CODE)
+            {
+                ret |= 0x80000000; // set bit 31 to 1 to indicate 'exit'
+            }
+            else if (*data >= 0)
+                ret++;
+        }
+    }
+
+    return ret;
+}
+
+void retreiveSentBuffers(PlinkHandle plink, PlinkChannel *channel)
+{
+    PlinkStatus sts = PLINK_STATUS_OK;
+    while (channel->available_bufs < NUM_OF_BUFFERS)
+    {
+        do
+        {
+            sts = PLINK_recv(plink, channel->id, &channel->pkt);
+            int count = getBufferCount(&channel->pkt);
+            if (count > 0)
+            {
+                channel->available_bufs += count;
+            }
+        } while (sts == PLINK_STATUS_MORE_DATA);
+    }
+}
+#ifdef _DMABUF_FD_SRC_
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, void *vmem)
+{
+    unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+    VmemParams params;
+    params.size = buffer_size;
+    params.flags = VMEM_FLAG_CONTIGUOUS | VMEM_FLAG_4GB_ADDR;
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        VMEM_allocate(vmem, &params);
+        VMEM_mmap(vmem, &params);
+        VMEM_export(vmem, &params);
+        printf("[SERVER] mmap %p from %x with size %d, dma-buf fd %d\n", 
+                params.vir_address, params.phy_address, params.size, params.fd);
+        picbuffers[i].virtual_address = params.vir_address;
+        picbuffers[i].bus_address = params.phy_address;
+        picbuffers[i].size = buffer_size;
+        picbuffers[i].fd = params.fd;
+    }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], void *vmem)
+{
+    VmemParams params;
+    memset(&params, 0, sizeof(params));
+    for (int i = 0; i < NUM_OF_BUFFERS; i++)
+    {
+        close(picbuffers[i].fd);
+        params.size = picbuffers[i].size;
+        params.vir_address = picbuffers[i].virtual_address;
+        params.phy_address = picbuffers[i].bus_address;
+        VMEM_free(vmem, &params);
+    }
+}
+#else
+void AllocateBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  unsigned int bus_address = BASE_MEMORY;
+  unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    picbuffers[i].virtual_address = mmap(0, buffer_size, PROT_READ | PROT_WRITE,
+                                        MAP_SHARED, fd_mem,
+                                        bus_address);
+    printf("mmap %p from %x with size %d\n", picbuffers[i].virtual_address, bus_address, size);
+    picbuffers[i].bus_address = bus_address;
+    picbuffers[i].size = buffer_size;
+    bus_address += buffer_size;
+  }
+}
+
+void FreeBuffers(PictureBuffer picbuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    munmap(picbuffers[i].virtual_address, picbuffers[i].size);
+  }
+}
+#endif
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    PlinkStatus sts = PLINK_STATUS_OK;
+    ServerParams params;
+    PlinkChannel channel[2];
+    PlinkHandle plink = NULL;
+    PlinkRGBInfo pic;
+    PlinkMsg msg;
+
+    int input_num = 1;
+    int output_num = 1;
+    int i;
+    int index = 0;
+
+    if (argc < (1 + input_num)) {
+        printf("Please set valide args: ./dw_src_test image.rgb\n");
+        return -1;
+    } else {
+            data_path = argv + 1;
+    }
+
+    parseParams(argc, argv, &params);
+    if (checkParams(&params) != 0)
+    {
+        printUsage(argv[0]);
+        //return 0;
+    }
+
+    FILE *fp;
+#if 0
+    fp = fopen(params.inputfile, "rb");
+    if (fp == NULL) {
+        printf("failed to open %s\n", params.inputfile);
+        errExit("fopen");
+    }
+#endif
+
+#ifdef _DMABUF_FD_SRC_
+    void *vmem = NULL;
+    if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+        errExit("Failed to create VMEM.");
+#else
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem < 0) {
+        printf("%s: failed to open /dev/mem", MODULE_NAME);
+        return -1;
+    }
+#endif
+
+    int in_size = input_size[0];
+    char filename[FILE_LENGTH] = {0};
+    char filename_prefix[FILE_LENGTH] = {0};
+    uint64_t start_time, end_time;
+    int frames = params.frames;
+    PictureBuffer picbuffers[NUM_OF_BUFFERS];
+#ifdef _DMABUF_FD_SRC_
+    AllocateBuffers(picbuffers, in_size, vmem);
+#else
+    AllocateBuffers(picbuffers, in_size, fd_mem);
+#endif
+    sts = PLINK_create(&plink, params.plinkname, PLINK_MODE_SERVER);
+
+    memset(&channel[0], 0, sizeof(channel[0]));
+    channel[0].available_bufs = NUM_OF_BUFFERS;
+    sts = PLINK_connect(plink, &channel[0].id);
+
+    int frmcnt = 0;
+    do {
+        int sendid = channel[0].sendid; 
+
+	    fill_buffer_from_file(data_path[0], picbuffers[sendid].virtual_address);
+	    //snprintf(filename, FILE_LENGTH, "%s_src_data%u.txt", filename_prefix, i);
+            //save_uint8_to_file(filename, (uint8_t*)picbuffers[index].virtual_address, in_size);
+      
+            constructRGBInfo(&pic, &params, picbuffers[sendid].bus_address, sendid);
+
+	    printf("[SERVER] Processed frame %d 0x%010llx: %dx%d, stride = %d\n",
+                sendid, pic.bus_address_b,
+                pic.img_width, pic.img_height,
+                pic.stride_b);
+
+            channel[0].pkt.list[0] = &pic;
+            channel[0].pkt.num = 1;
+#ifdef _DMABUF_FD_SRC_
+            channel[0].pkt.fd = picbuffers[sendid].fd;
+#else
+            channel[0].pkt.fd = PLINK_INVALID_FD; // physical address
+#endif
+	    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+            channel[0].sendid = (channel[0].sendid + 1) % NUM_OF_BUFFERS;
+            channel[0].available_bufs -= 1;
+	    // Notify npu one picture is ready for inference
+            int timeout = 0;
+            if (channel[0].available_bufs == 0)
+                timeout = 60000; // wait up to 60 seconds if buffers are used up
+
+            if (PLINK_wait(plink, channel[0].id, timeout) == PLINK_STATUS_OK)
+            {
+                do
+                {
+                    sts = PLINK_recv(plink, channel[0].id, &channel[0].pkt);
+                    int count = getBufferCount(&channel[0].pkt);
+                    if (count < 0)
+                        channel[0].exit = 1;
+                    channel[0].available_bufs += count;
+                } while (sts == PLINK_STATUS_MORE_DATA);
+            }
+
+        index = (index + 1) % NUM_OF_BUFFERS;
+    } while (channel[0].exit == 0 && frmcnt < frames);
+
+    retreiveSentBuffers(plink, &channel[0]);
+
+cleanup:
+    msg.header.type = PLINK_TYPE_MESSAGE;
+    msg.header.size = DATA_SIZE(PlinkMsg);
+    msg.msg = PLINK_EXIT_CODE;
+    channel[0].pkt.list[0] = &msg;
+    channel[0].pkt.num = 1;
+    channel[0].pkt.fd = PLINK_INVALID_FD;
+    sts = PLINK_send(plink, channel[0].id, &channel[0].pkt);
+    sleep(1); // Sleep one second to make sure client is ready for exit
+    PLINK_close(plink, PLINK_CLOSE_ALL);
+#ifdef _DMABUF_FD_SRC_
+    FreeBuffers(picbuffers, vmem);
+#else    
+    FreeBuffers(picbuffers, in_size, fd_mem);
+#endif
+    if (fp != NULL)
+        fclose(fp);
+
+    return 0;
+}
+

+ 159 - 0
test/face_detect/g2d_sink_test.c

@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "1.13.x" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <memory.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process_linker_types.h"
+#include "process_linker.h"
+
+#define FILE_LENGTH         1028
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+
+int main(int argc, char **argv) {
+    PlinkStatus sts = PLINK_STATUS_OK;
+    PlinkPacket sendpkt, recvpkt;
+    PlinkMsg msg;
+    PlinkHandle plink = NULL;
+    FILE *fp = NULL;
+    int exitcode = 0;
+    int i,j;
+
+    uint64_t start_time, end_time;
+    uint64_t _start_time, _end_time;
+    
+    int frames = 1000;
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem == -1) {
+      printf("ERROR: failed to open: %s\n", "/dev/mem");
+      return -1;
+    }
+
+    if (PLINK_create(&plink, "/tmp/plink_npu_featuremap.test", PLINK_MODE_CLIENT) != PLINK_STATUS_OK)
+        errExit("Failed to create PLINK.");
+
+    if (PLINK_connect(plink, NULL) != PLINK_STATUS_OK)
+        errExit("Failed to connect to server.");
+
+    int frmcnt = 0;
+    // while loop to receive shm features
+    do {
+        sts = PLINK_recv(plink, 0, &recvpkt);
+	for (i = 0; i < recvpkt.num; i++) {
+	    PlinkDescHdr *hdr = (PlinkDescHdr *)(recvpkt.list[i]);
+            if (hdr->type == PLINK_TYPE_OBJECT) {
+                PlinkObjectInfo *info = (PlinkObjectInfo *)(recvpkt.list[i]);
+	        printf("[G2D CLIENT] Received frame<%d> addr:<0x%08x>: face_cnt<%d>\n",
+                        info->header.id, info->bus_address, info->object_cnt);
+                /* set input */
+                char filename[FILE_LENGTH] = {0};
+	        uint8_t *vaddr = 0;
+                start_time = shl_get_timespec();
+                //printf("g2d_sink_test: wait frame%d\n", frmcnt);
+	        int size =  info->object_cnt * sizeof(PlinkObjectDetect);
+                vaddr = (uint8_t *) mmap(0, size, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, fd_mem, info->bus_address);
+                //printf("g2d_sink_test: get feature frame %d\n", index);
+        
+	        end_time = shl_get_timespec();
+                //printf("wait feature frame time: %.5fmsf\n", ((float)(end_time-start_time))/1000000);
+	        PlinkObjectDetect *face = (PlinkObjectDetect *)vaddr;
+                for (j = 0; j < info->object_cnt; j++) {
+		    printf("feat[%d]: score<%f>, [%f,%f]~[%f,%f]\n", j,
+				    face[j].score,
+				    face[j].box.x1, face[j].box.y1,
+				    face[j].box.x2, face[j].box.y2);
+                }
+                // return the buffer to source
+                msg.header.type = PLINK_TYPE_MESSAGE;
+                msg.header.size = DATA_SIZE(PlinkMsg);
+                msg.msg = hdr->id;
+                sendpkt.list[0] = &msg;
+                sendpkt.num = 1;
+                sendpkt.fd = PLINK_INVALID_FD;
+                if (PLINK_send(plink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                    errExit("Failed to send data.");
+
+	        //printf("g2d_sink_test: release frame%d\n", frmcnt);
+        
+	        fprintf(stderr, "Run G2D frame time: %.5fms, FPS=%.2f\n", ((float)(end_time-start_time))/1000000,
+                    1000000000.0/((float)(end_time-start_time)));
+
+            }
+	    else if (hdr->type == PLINK_TYPE_MESSAGE)
+            {
+                PlinkMsg *msg = (PlinkMsg *)(recvpkt.list[i]);
+                if (msg->msg == PLINK_EXIT_CODE)
+                {
+                    exitcode = 1;
+                    printf("Exit\n");
+                    break;
+                }
+            }
+	}
+
+	if (recvpkt.fd != PLINK_INVALID_FD)
+            close(recvpkt.fd);
+
+        frmcnt++;
+
+#if 0
+        if (frmcnt >= frames)
+        {
+            msg.header.type = PLINK_TYPE_MESSAGE;
+            msg.header.size = DATA_SIZE(PlinkMsg);
+            msg.msg = PLINK_EXIT_CODE;
+            sendpkt.list[0] = &msg;
+            sendpkt.num = 1;
+            sendpkt.fd = PLINK_INVALID_FD;
+            if (PLINK_send(plink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                errExit("Failed to send data.");
+            break;
+        }
+#endif
+    } while (exitcode == 0);
+
+cleanup:
+    sleep(1); // Sleep one second to make sure server is ready for exit
+    PLINK_close(plink, 0);
+    if (fp != NULL)
+        fclose(fp);
+
+    return 0;
+}
+

BIN
test/face_detect/graph_info.bin


+ 220 - 0
test/face_detect/io.c

@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include "io.h"
+
+/******************************************************************************
+ *                                                                            *
+ *                      Utils for process data                                *
+ *                                                                            *
+ * ***************************************************************************/
+
+/*!
+ * \brief Get the type of file (JPEG PNG or txt/tensor by suffix
+ *
+ * \param filename Image file name.
+ * \return FILE_PNG FILE_JPEG or FILE_TENSOR of enum file_type
+ *
+ */
+enum file_type get_file_type(const char* filename) {
+  enum file_type type = 0;
+  const char* ptr;
+  char sep = '.';
+  uint32_t pos, n;
+  char buff[32] = {0};
+
+  ptr = strrchr(filename, sep);
+  pos = ptr - filename;
+  n = strlen(filename) - (pos + 1);
+  strncpy(buff, filename + (pos + 1), n);
+
+  if (strcmp(buff, "jpg") == 0 || strcmp(buff, "jpeg") == 0 || strcmp(buff, "JPG") == 0 ||
+      strcmp(buff, "JPEG") == 0) {
+    type = FILE_JPEG;
+  } else if (strcmp(buff, "png") == 0 || strcmp(buff, "PNG") == 0) {
+    type = FILE_PNG;
+  } else if (strcmp(buff, "tensor") == 0) {
+    type = FILE_TENSOR;
+  } else if (strcmp(buff, "txt") == 0) {
+    type = FILE_TXT;
+  } else if (strcmp(buff, "bin") == 0) {
+    type = FILE_BIN;
+  } else if (strcmp(buff, "rgb") == 0) {
+    type = 0;
+  } else if (strcmp(buff, "bgr") == 0) {
+    type = 0;
+  } else {
+    printf("Unsupport for .%s file\n", buff);
+    exit(1);
+  }
+  return type;
+}
+
+/*!
+ * \brief Save float data into file.
+ *
+ * \param filename The file that you will put the data into.
+ * \param data The float data that you will put into file.
+ * \param size The size of data.
+ */
+void save_data_to_file(const char* filename, float* data, uint32_t size) {
+  int i = 0;
+  FILE* fp = fopen(filename, "w+");
+  for (i = 0; i < size; i++) {
+    if (i == size - 1) {
+      fprintf(fp, "%f", data[i]);
+    } else {
+      fprintf(fp, "%f\n", data[i]);
+    }
+  }
+  fclose(fp);
+}
+
+void save_uint8_to_file(const char* filename, uint8_t* data, uint32_t size) {
+  int i = 0;
+  FILE* fp = fopen(filename, "w+");
+  for (i = 0; i < size; i++) {
+    if (i == size - 1) {
+      fprintf(fp, "%d", data[i]);
+    } else {
+      fprintf(fp, "%d\n", data[i]);
+    }
+  }
+  fclose(fp);
+}
+
+void save_uint8_to_binary(const char* filename, uint8_t* data, uint32_t size) {
+  int i = 0;
+  FILE* fp = fopen(filename, "w+");
+  fwrite(data, 1, size, fp);
+  fclose(fp);
+}
+
+/*!
+ * \brief Get the binary params char from model.params
+ *
+ * \param filename It is generally model.params for anole
+ * \return The char data of params
+ *
+ */
+int fill_buffer_from_file(const char* filename, char* buffer) {
+  int file_size;
+  int ret;
+  FILE* fp = fopen(filename, "rb");
+  if (fp == NULL) {
+    printf("Invalid input file: %s\n", filename);
+    return -1;
+  }
+  fseek(fp, 0, SEEK_END);
+  file_size = ftell(fp);
+  rewind(fp);
+
+  ret = fread(buffer, 1, file_size, fp);
+  if (ret != file_size) {
+    printf("Read input file error\n");
+    return -1;
+  }
+
+  fclose(fp);
+  return 0;
+}
+
+char* get_binary_from_file(const char* filename, int *size) {
+  char* buffer = NULL;
+  int file_size;
+  int ret;
+  FILE* fp = fopen(filename, "rb");
+  if (fp == NULL) {
+    printf("Invalid input file: %s\n", filename);
+    return NULL;
+  }
+  fseek(fp, 0, SEEK_END);
+  file_size = ftell(fp);
+  rewind(fp);
+
+  buffer = (char*)malloc(file_size);  // NOLINT
+  if (buffer == NULL) {
+    printf("Malloc fail\n");
+    return NULL;
+  }
+
+  ret = fread(buffer, 1, file_size, fp);
+  if (ret != file_size) {
+    printf("Read input file error\n");
+    return NULL;
+  }
+
+  fclose(fp);
+  if (size) {
+    *size = file_size;
+  }
+  return buffer;
+}
+
+char** read_string_from_file(const char* filename, int* len) {
+  char buff[MAX_FILENAME_LEN];
+  char** result = (char**)malloc(sizeof(char*) * (MAX_FILE_LINE * MAX_INPUT_NUMBER));  // NOLINT
+  char *find, *sep, *inter;
+  FILE* fp = fopen(filename, "r");
+  if (fp == NULL) {
+    return NULL;
+  }
+  int cnt = 0;
+  while (fgets(buff, sizeof(buff), fp)) {
+    if (strcmp(buff, "\n") == 0) continue;
+    find = strchr(buff, '\n');
+    if (find) {
+      *find = '\0';
+    }
+    sep = strtok(buff, " ");                                  // NOLINT
+    inter = (char*)malloc((strlen(sep) + 2) * sizeof(char));  // NOLINT
+    memcpy(inter, sep, strlen(sep) + 1);
+    result[cnt++] = inter;
+    while (sep != NULL) {
+      sep = strtok(NULL, " ");  // NOLINT
+      if (sep) {
+        inter = (char*)malloc((strlen(sep) + 2) * sizeof(char));  // NOLINT
+        memcpy(inter, sep, strlen(sep) + 1);
+        result[cnt++] = inter;
+      }
+    }
+  }
+  *len = cnt;
+  fclose(fp);
+  return result;
+}
+
+uint32_t shape2string(uint32_t* shape, uint32_t dim_num, char* buf, uint32_t buf_sz) {
+  uint32_t s;
+  uint32_t count;
+  if (NULL == shape || NULL == buf || dim_num == 0 || buf_sz == 0) {
+    return 0;
+  }
+  count = 0;
+  for (s = 0; s < dim_num; s++) {
+    if (count >= buf_sz) {
+      break;
+    }
+    count += snprintf(&buf[count], buf_sz - count, "%d_", shape[s]);
+  }
+  buf[count - 1] = 0;
+  return count;
+}

+ 47 - 0
test/face_detect/io.h

@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#ifndef HHB_IO_H_
+#define HHB_IO_H_
+
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define MAX_FILE_LINE 50001
+#define MAX_INPUT_NUMBER 4
+#define MAX_FILENAME_LEN 1280
+
+enum file_type { FILE_PNG, FILE_JPEG, FILE_TENSOR, FILE_TXT, FILE_BIN };
+
+/* Utils to process image data*/
+enum file_type get_file_type(const char* filename);
+void save_data_to_file(const char* filename, float* data, uint32_t size);
+void save_uint8_to_file(const char* filename, uint8_t* data, uint32_t size);
+void save_uint8_to_binary(const char* filename, uint8_t* data, uint32_t size);
+char* get_binary_from_file(const char* filename, int* size);
+int fill_buffer_from_file(const char* filename, char *buffer);
+char** read_string_from_file(const char* filename, int* len);
+uint32_t shape2string(uint32_t* shape, uint32_t dim_num, char* buf, uint32_t buf_sz);
+
+#endif  // HHB_IO_H_

+ 218 - 0
test/face_detect/main.c

@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process.h"
+
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+#define FILE_LENGTH         1028
+#define SHAPE_LENGHT        128
+
+void *csinn_(char *params);
+void csinn_run(void *data0,  void *td);
+void *csinn_nbg(const char *nbg_file_name);
+
+int input_size[] = {1 * 3 * 300 * 300, };
+const char model_name[] = "network";
+
+#define RESIZE_HEIGHT       300
+#define RESIZE_WIDTH        300
+#define CROP_HEGHT          300
+#define CROP_WIDTH          300
+#define R_MEAN              0
+#define G_MEAN              0
+#define B_MEAN              0
+#define SCALE               1.0
+
+/*
+ * Preprocess function
+ */
+void preprocess(struct image_data *img, int is_rgb, int to_bgr)
+{
+    uint32_t new_height, new_width;
+    uint32_t min_side;
+    if (is_rgb) {
+        im2rgb(img);
+    }
+    if (RESIZE_WIDTH == 0) {
+        min_side = MIN(img->shape[0], img->shape[1]);
+        new_height = (uint32_t) (img->shape[0] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        new_width = (uint32_t) (img->shape[1] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        imresize(img, new_height, new_width);
+    } else {
+        imresize(img, RESIZE_HEIGHT, RESIZE_WIDTH);
+    }
+    data_crop(img, CROP_HEGHT, CROP_WIDTH);
+    sub_mean(img, R_MEAN, G_MEAN, B_MEAN);
+    data_scale(img, SCALE);
+    if(to_bgr) {
+        imrgb2bgr(img);
+    }
+    imhwc2chw(img);
+}
+
+static void print_tensor_info(struct csinn_tensor *t) {
+    printf("\n=== tensor info ===\n");
+    printf("shape: ");
+    for (int j = 0; j < t->dim_count; j++) {
+        printf("%d ", t->dim[j]);
+    }
+    printf("\n");
+    if (t->dtype == CSINN_DTYPE_UINT8) {
+        printf("scale: %f\n", t->qinfo->scale);
+        printf("zero point: %d\n", t->qinfo->zero_point);
+    }
+    printf("data pointer: %p\n", t->data);
+}
+
+
+/*
+ * Postprocess function
+ */
+static void postprocess(void *sess, const char *filename_prefix) {
+    int output_num, input_num;
+    struct csinn_tensor *input = csinn_alloc_tensor(NULL);
+    struct csinn_tensor *output = csinn_alloc_tensor(NULL);
+
+    input_num = csinn_get_input_number(sess);
+    for (int i = 0; i < input_num; i++) {
+        input->data = NULL;
+        csinn_get_input(i, input, sess);
+        print_tensor_info(input);
+        
+    }
+
+    output_num = csinn_get_output_number(sess);
+    for (int i = 0; i < output_num; i++) {
+        output->data = NULL;
+        csinn_get_output(i, output, sess);
+        print_tensor_info(output);
+
+        struct csinn_tensor *foutput = shl_ref_tensor_transform_f32(output);
+        shl_show_top5(foutput, sess);
+        char filename[FILE_LENGTH] = {0};
+        char shape[SHAPE_LENGHT] = {0};
+        shape2string(output->dim, output->dim_count, shape, SHAPE_LENGHT);
+        snprintf(filename, FILE_LENGTH, "%s_output%u_%s.txt", filename_prefix, i, shape);
+        int output_size = csinn_tensor_size(foutput);
+        save_data_to_file(filename, (float*)foutput->data, output_size);
+
+        shl_ref_tensor_transform_free_f32(foutput);
+
+    }
+}
+
+void *create_graph(char *params_path) {
+    int binary_size;
+    char *params = get_binary_from_file(params_path, &binary_size);
+    if (params == NULL) {
+        return NULL;
+    }
+
+    char *suffix = params_path + (strlen(params_path) - 7);
+    if (strcmp(suffix, ".params") == 0) {
+        // create general graph
+        return csinn_(params);
+    }
+
+    suffix = params_path + (strlen(params_path) - 3);
+    if (strcmp(suffix, ".bm") == 0) {
+        struct shl_bm_sections *section = (struct shl_bm_sections *)(params + 4128);
+        if (section->graph_offset) {
+            return csinn_import_binary_model(params);
+        } else {
+            return csinn_(params + section->params_offset * 4096);
+        }
+    } else {
+        return NULL;
+    }
+}
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    int input_num = 1;
+    int output_num = 3;
+    int input_group_num = 1;
+    int i;
+
+    if (argc < (2 + input_num)) {
+        printf("Please set valide args: ./model.elf model.params "
+                "[tensor1/image1 ...] [tensor2/image2 ...]\n");
+        return -1;
+    } else {
+        if (argc == 3 && get_file_type(argv[2]) == FILE_TXT) {
+            data_path = read_string_from_file(argv[2], &input_group_num);
+            input_group_num /= input_num;
+        } else {
+            data_path = argv + 2;
+            input_group_num = (argc - 2) / input_num;
+        }
+    }
+
+    void *sess = create_graph(argv[1]);
+
+    uint8_t *input[input_num];
+    float *inputf[input_num];
+    char filename_prefix[FILE_LENGTH] = {0};
+    
+    uint64_t start_time, end_time;
+    for (i = 0; i < input_group_num; i++) {
+        /* set input */
+        for (int j = 0; j < input_num; j++) {
+            int input_len = csinn_tensor_size(((struct csinn_session *)sess)->input[j]);
+            struct image_data *img = get_input_data(data_path[i * input_num + j], input_len);
+            if (get_file_type(data_path[i * input_num + j]) == FILE_PNG || get_file_type(data_path[i * input_num + j]) == FILE_JPEG) {
+                preprocess(img, 1, 1);
+            }
+            inputf[j] = img->data;
+            free_image_data(img);
+
+            input[j] = shl_ref_f32_to_input_dtype(j, inputf[j], sess);
+        }
+        
+        start_time = shl_get_timespec();
+        csinn_run(input[0],  sess);
+        end_time = shl_get_timespec();
+        printf("Run graph execution time: %.5fms, FPS=%.2f\n", ((float)(end_time-start_time))/1000000,
+                    1000000000.0/((float)(end_time-start_time)));
+
+        snprintf(filename_prefix, FILE_LENGTH, "%s", basename(data_path[i * input_num]));
+        postprocess(sess, filename_prefix);
+
+        for (int j = 0; j < input_num; j++) {
+            shl_mem_free(inputf[j]);
+            shl_mem_free(input[j]);
+        }
+    }
+
+    csinn_session_deinit(sess);
+    csinn_free_session(sess);
+
+    return 0;
+}
+

+ 271 - 0
test/face_detect/main_det.c

@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include "io.h"
+#include "shl_ref.h"
+#include "process.h"
+#include "detect.h"
+#include "output_120_out0_nchw_1_2_7668_1.h"
+
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+#define FILE_LENGTH         1028
+#define SHAPE_LENGHT        128
+
+void *csinn_(char *params);
+void csinn_run(void *data0,  void *td);
+void *csinn_nbg(const char *nbg_file_name);
+
+int input_size[] = {1 * 3 * 304 * 304, };
+const char model_name[] = "network";
+
+const char class_name[][FILE_LENGTH] = {
+    "background", "aeroplane", "bicycle", "bird", "boat",
+    "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
+    "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
+    "sofa", "train", "tvmonitor"
+};
+
+#define RESIZE_HEIGHT       304
+#define RESIZE_WIDTH        304
+#define CROP_HEGHT          304
+#define CROP_WIDTH          304
+#define R_MEAN              0
+#define G_MEAN              0
+#define B_MEAN              0
+#define SCALE               1.0
+
+/*
+ * Preprocess function
+ */
+void preprocess(struct image_data *img, int is_rgb, int to_bgr)
+{
+    uint32_t new_height, new_width;
+    uint32_t min_side;
+    if (is_rgb) {
+        im2rgb(img);
+    }
+    if (RESIZE_WIDTH == 0) {
+        min_side = MIN(img->shape[0], img->shape[1]);
+        new_height = (uint32_t) (img->shape[0] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        new_width = (uint32_t) (img->shape[1] * (((float)RESIZE_HEIGHT) / (float)min_side));
+        imresize(img, new_height, new_width);
+    } else {
+        imresize(img, RESIZE_HEIGHT, RESIZE_WIDTH);
+    }
+    data_crop(img, CROP_HEGHT, CROP_WIDTH);
+    sub_mean(img, R_MEAN, G_MEAN, B_MEAN);
+    data_scale(img, SCALE);
+    if(to_bgr) {
+        imrgb2bgr(img);
+    }
+    imhwc2chw(img);
+}
+
+static void print_tensor_info(struct csinn_tensor *t) {
+    printf("\n=== tensor info ===\n");
+    printf("shape: ");
+    for (int j = 0; j < t->dim_count; j++) {
+        printf("%d ", t->dim[j]);
+    }
+    printf("\n");
+    if (t->dtype == CSINN_DTYPE_UINT8) {
+        printf("scale: %f\n", t->qinfo->scale);
+        printf("zero point: %d\n", t->qinfo->zero_point);
+    }
+    printf("data pointer: %p\n", t->data);
+}
+
+
+/*
+ * Postprocess function
+ */
+static void postprocess(void *sess, const char *filename_prefix) {
+    int output_num, input_num;
+    struct csinn_tensor *input = csinn_alloc_tensor(NULL);
+    struct csinn_tensor *output = csinn_alloc_tensor(NULL);
+
+    input_num = csinn_get_input_number(sess);
+    for (int i = 0; i < input_num; i++) {
+        input->data = NULL;
+        csinn_get_input(i, input, sess);
+        print_tensor_info(input);
+
+    }
+
+    float *location;
+    float *confidence;
+
+    output_num = csinn_get_output_number(sess);
+    for (int i = 0; i < output_num; i++) {
+        output->data = NULL;
+        csinn_get_output(i, output, sess);
+        print_tensor_info(output);
+
+        struct csinn_tensor *foutput = shl_ref_tensor_transform_f32(output);
+        shl_show_top5(foutput, sess);
+        char filename[FILE_LENGTH] = {0};
+        char shape[SHAPE_LENGHT] = {0};
+        shape2string(output->dim, output->dim_count, shape, SHAPE_LENGHT);
+        snprintf(filename, FILE_LENGTH, "%s_output%u_%s.txt", filename_prefix, i, shape);
+        int output_size = csinn_tensor_size(foutput);
+        save_data_to_file(filename, (float*)foutput->data, output_size);
+
+        if (i == 0) location = (float *)foutput->data;
+        if (i == 1) confidence = (float *)foutput->data;
+
+        // shl_ref_tensor_transform_free_f32(foutput);
+
+    }
+
+    BBoxOut out[100];
+    BBox gbboxes[num_prior];
+
+    int num = ssdforward(location, confidence, priorbox, gbboxes, out);
+
+    printf("%d\n", num);
+    for (int i = 0; i < num; i++) {
+        printf("%d, label=%s, score=%f, x1=%f, y1=%f, x2=%f, y2=%f\n", out[i].label, class_name[out[i].label],
+             out[i].score, out[i].xmin, out[i].ymin, out[i].xmax, out[i].ymax);
+    }
+}
+
+void *create_graph(char *params_path) {
+    void *ret;
+    int binary_size;
+    char *params = get_binary_from_file(params_path, &binary_size);
+    if (params == NULL) {
+        return NULL;
+    }
+
+    char *suffix = params_path + (strlen(params_path) - 7);
+    if (strcmp(suffix, ".params") == 0) {
+        // create general graph
+        ret = csinn_(params);
+	free(params);
+	return ret;
+    }
+
+    suffix = params_path + (strlen(params_path) - 3);
+    if (strcmp(suffix, ".bm") == 0) {
+        struct shl_bm_sections *section = (struct shl_bm_sections *)(params + 4128);
+        if (section->graph_offset) {
+            ret = csinn_import_binary_model(params);
+	    free(params);
+	    return ret;
+        } else {
+            ret = csinn_(params + section->params_offset * 4096);
+	    free(params);
+	    return ret;
+        }
+    } else {
+	free(params);
+        return NULL;
+    }
+}
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    int input_num = 1;
+    int output_num = 3;
+    int input_group_num = 1;
+    int i;
+
+    if (argc < (2 + input_num)) {
+        printf("Please set valide args: ./model.elf model.params "
+                "[tensor1/image1 ...] [tensor2/image2 ...]\n");
+        return -1;
+    } else {
+        if (argc == 3 && get_file_type(argv[2]) == FILE_TXT) {
+            data_path = read_string_from_file(argv[2], &input_group_num);
+            input_group_num /= input_num;
+        } else {
+            data_path = argv + 2;
+            input_group_num = (argc - 2) / input_num;
+        }
+    }
+
+    void *sess = create_graph(argv[1]);
+
+    uint8_t *input[input_num];
+    float *inputf[input_num];
+    char filename_prefix[FILE_LENGTH] = {0};
+    void *input_aligned[input_num];
+    for (i = 0; i < input_num; i++) {
+        input_aligned[i] = shl_mem_alloc_aligned(input_size[i], 0);
+    }
+
+    uint64_t start_time, end_time;
+    char *loop_s;
+    int32_t loop_time = 1;
+    loop_s = getenv("HHB_LOOP_TIME");
+    if (loop_s) {
+        loop_time = atoi(loop_s);
+    }
+    for (i = 0; i < input_group_num; i++) {
+        /* set input */
+        for (int j = 0; j < input_num; j++) {
+	    int input_len = csinn_tensor_size(((struct csinn_session *)sess)->input[j]);
+	    if (get_file_type(data_path[i * input_num + j]) == 0) {
+		input[j] = (uint8_t *)get_binary_from_file(data_path[i * input_num + j], &input_size[j]);
+	    }
+	    else {
+            	struct image_data *img = get_input_data(data_path[i * input_num + j], input_size[j]);
+            	if (get_file_type(data_path[i * input_num + j]) == FILE_PNG || get_file_type(data_path[i * input_num + j]) == FILE_JPEG) {
+                    preprocess(img, 1, 1);
+            	}
+            	inputf[j] = img->data;
+                free_image_data(img);
+
+            	input[j] = shl_ref_f32_to_input_dtype(j, inputf[j], sess);
+            }
+	}
+        memcpy(input_aligned[0], input[0], input_size[0]);
+
+        for (int t = 0; t < loop_time; t++) {
+        start_time = shl_get_timespec();
+        csinn_run(input_aligned[0],  sess);
+        end_time = shl_get_timespec();
+        printf("Run graph execution time: %.5fms, FPS=%.2f\n", ((float)(end_time-start_time))/1000000,
+                    1000000000.0/((float)(end_time-start_time)));
+
+        snprintf(filename_prefix, FILE_LENGTH, "%s", basename(data_path[i * input_num]));
+        postprocess(sess, filename_prefix);
+	}
+        for (int j = 0; j < input_num; j++) {
+	    if (get_file_type(data_path[i * input_num + j]) != 0) {
+            	shl_mem_free(inputf[j]);
+	    }
+            shl_mem_free(input[j]);
+        }
+    }
+
+    csinn_session_deinit(sess);
+    csinn_free_session(sess);
+
+    return 0;
+}
+

+ 3803 - 0
test/face_detect/model.c

@@ -0,0 +1,3803 @@
+/* auto generate by HHB_VERSION 2.0.21 */
+
+#include <shl_pnna.h>
+
+void *csinn_(char *params_base) {
+  struct csinn_session *sess = csinn_alloc_session();
+  sess->base_quant_type = CSINN_QUANT_UINT8_ASYM;
+  sess->model.priority = 0;
+  sess->base_api = CSINN_LIGHT;
+  sess->base_dtype = CSINN_DTYPE_UINT8;
+  csinn_session_init(sess);
+  csinn_set_input_number(1, sess);
+  csinn_set_output_number(2, sess);
+
+  struct csinn_tensor *data = csinn_alloc_tensor(sess);
+  data->name = "data@@multiply_1_0";
+  data->dtype = CSINN_DTYPE_UINT8;
+  data->layout = CSINN_LAYOUT_NCHW;
+  data->dim[0] = 1;
+  data->dim[1] = 3;
+  data->dim[2] = 300;
+  data->dim[3] = 300;
+  data->dim_count = 4;
+  data->qinfo = (struct csinn_quant_info *)(params_base + 0);
+  data->quant_channel = 1;
+  struct csinn_tensor *output_1 = csinn_alloc_tensor(sess);
+  output_1->name = "output_1";
+  output_1->dtype = CSINN_DTYPE_UINT8;
+  output_1->layout = CSINN_LAYOUT_NCHW;
+  output_1->dim[0] = 1;
+  output_1->dim[1] = 3;
+  output_1->dim[2] = 300;
+  output_1->dim[3] = 300;
+  output_1->dim_count = 4;
+  output_1->qinfo = (struct csinn_quant_info *)(params_base + 24);
+  output_1->quant_channel = 1;
+  struct csinn_tensor *rhs_1 = csinn_alloc_tensor(sess);
+  rhs_1->name = "rhs_1";
+  rhs_1->data = params_base + 72;
+  rhs_1->is_const = 1;
+  rhs_1->dtype = CSINN_DTYPE_UINT8;
+  rhs_1->layout = CSINN_LAYOUT_OIHW;
+  rhs_1->dim[0] = 1;
+  rhs_1->dim[1] = 1;
+  rhs_1->dim[2] = 1;
+  rhs_1->dim[3] = 1;
+  rhs_1->dim_count = 4;
+  rhs_1->qinfo = (struct csinn_quant_info *)(params_base + 48);
+  rhs_1->quant_channel = 1;
+  struct csinn_diso_params *params_1 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_1->base.name = "multiply_1";
+  csinn_mul_init(data, rhs_1, output_1, params_1);
+  struct csinn_tensor *output_4 = csinn_alloc_tensor(sess);
+  output_4->name = "output_4";
+  output_4->dtype = CSINN_DTYPE_UINT8;
+  output_4->layout = CSINN_LAYOUT_NCHW;
+  output_4->dim[0] = 1;
+  output_4->dim[1] = 3;
+  output_4->dim[2] = 300;
+  output_4->dim[3] = 300;
+  output_4->dim_count = 4;
+  output_4->qinfo = (struct csinn_quant_info *)(params_base + 73);
+  output_4->quant_channel = 1;
+  struct csinn_tensor *rhs_4 = csinn_alloc_tensor(sess);
+  rhs_4->name = "rhs_4";
+  rhs_4->data = params_base + 121;
+  rhs_4->is_const = 1;
+  rhs_4->dtype = CSINN_DTYPE_UINT8;
+  rhs_4->layout = CSINN_LAYOUT_OIHW;
+  rhs_4->dim[0] = 1;
+  rhs_4->dim[1] = 3;
+  rhs_4->dim[2] = 1;
+  rhs_4->dim[3] = 1;
+  rhs_4->dim_count = 4;
+  rhs_4->qinfo = (struct csinn_quant_info *)(params_base + 97);
+  rhs_4->quant_channel = 1;
+  struct csinn_diso_params *params_4 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_4->base.name = "add_2";
+  csinn_add_init(output_1, rhs_4, output_4, params_4);
+  struct csinn_tensor *output_6 = csinn_alloc_tensor(sess);
+  output_6->name = "output_6";
+  output_6->dtype = CSINN_DTYPE_UINT8;
+  output_6->layout = CSINN_LAYOUT_NCHW;
+  output_6->dim[0] = 1;
+  output_6->dim[1] = 32;
+  output_6->dim[2] = 150;
+  output_6->dim[3] = 150;
+  output_6->dim_count = 4;
+  output_6->qinfo = (struct csinn_quant_info *)(params_base + 124);
+  output_6->quant_channel = 1;
+  struct csinn_tensor *kernel_6 = csinn_alloc_tensor(sess);
+  kernel_6->name = "kernel_6";
+  kernel_6->data = params_base + 172;
+  kernel_6->is_const = 1;
+  kernel_6->dtype = CSINN_DTYPE_UINT8;
+  kernel_6->layout = CSINN_LAYOUT_OIHW;
+  kernel_6->dim[0] = 32;
+  kernel_6->dim[1] = 3;
+  kernel_6->dim[2] = 3;
+  kernel_6->dim[3] = 3;
+  kernel_6->dim_count = 4;
+  kernel_6->qinfo = (struct csinn_quant_info *)(params_base + 148);
+  kernel_6->quant_channel = 1;
+  struct csinn_tensor *bias_6 = csinn_alloc_tensor(sess);
+  bias_6->name = "bias_6";
+  bias_6->data = params_base + 1060;
+  bias_6->is_const = 1;
+  bias_6->dtype = CSINN_DTYPE_INT32;
+  bias_6->layout = CSINN_LAYOUT_O;
+  bias_6->dim[0] = 32;
+  bias_6->dim_count = 1;
+  bias_6->qinfo = (struct csinn_quant_info *)(params_base + 1036);
+  bias_6->quant_channel = 1;
+  struct csinn_conv2d_params *params_6 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_6->group = 1;
+  params_6->stride_height = 2;
+  params_6->stride_width = 2;
+  params_6->dilation_height = 1;
+  params_6->dilation_width = 1;
+  params_6->conv_extra.kernel_tm = NULL;
+  params_6->conv_extra.conv_mode = CSINN_DIRECT;
+  params_6->pad_top = 1;
+  params_6->pad_left = 1;
+  params_6->pad_down = 1;
+  params_6->pad_right = 1;
+  params_6->base.name = "conv2d_3_fuse_multiply_4_fuse_add_5";
+  csinn_conv2d_init(output_4, output_6, kernel_6, bias_6, params_6);
+  struct csinn_tensor *output_7 = csinn_alloc_tensor(sess);
+  output_7->name = "output_7";
+  output_7->dtype = CSINN_DTYPE_UINT8;
+  output_7->layout = CSINN_LAYOUT_NCHW;
+  output_7->dim[0] = 1;
+  output_7->dim[1] = 32;
+  output_7->dim[2] = 150;
+  output_7->dim[3] = 150;
+  output_7->dim_count = 4;
+  output_7->qinfo = (struct csinn_quant_info *)(params_base + 1188);
+  output_7->quant_channel = 1;
+  struct csinn_relu_params *params_7 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_7->base.name = "relu_6";
+  csinn_relu_init(output_6, output_7, params_7);
+  struct csinn_tensor *output_8 = csinn_alloc_tensor(sess);
+  output_8->name = "output_8";
+  output_8->dtype = CSINN_DTYPE_UINT8;
+  output_8->layout = CSINN_LAYOUT_NCHW;
+  output_8->dim[0] = 1;
+  output_8->dim[1] = 32;
+  output_8->dim[2] = 150;
+  output_8->dim[3] = 150;
+  output_8->dim_count = 4;
+  output_8->qinfo = (struct csinn_quant_info *)(params_base + 1212);
+  output_8->quant_channel = 1;
+  struct csinn_tensor *kernel_8 = csinn_alloc_tensor(sess);
+  kernel_8->name = "kernel_8";
+  kernel_8->data = params_base + 1260;
+  kernel_8->is_const = 1;
+  kernel_8->dtype = CSINN_DTYPE_UINT8;
+  kernel_8->layout = CSINN_LAYOUT_O1HW;
+  kernel_8->dim[0] = 32;
+  kernel_8->dim[1] = 1;
+  kernel_8->dim[2] = 3;
+  kernel_8->dim[3] = 3;
+  kernel_8->dim_count = 4;
+  kernel_8->qinfo = (struct csinn_quant_info *)(params_base + 1236);
+  kernel_8->quant_channel = 1;
+  struct csinn_tensor *bias_8 = csinn_alloc_tensor(sess);
+  bias_8->name = "bias_8";
+  bias_8->data = params_base + 1572;
+  bias_8->is_const = 1;
+  bias_8->dtype = CSINN_DTYPE_INT32;
+  bias_8->layout = CSINN_LAYOUT_O;
+  bias_8->dim[0] = 32;
+  bias_8->dim_count = 1;
+  bias_8->qinfo = (struct csinn_quant_info *)(params_base + 1548);
+  bias_8->quant_channel = 1;
+  struct csinn_conv2d_params *params_8 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_8->group = 32;
+  params_8->stride_height = 1;
+  params_8->stride_width = 1;
+  params_8->dilation_height = 1;
+  params_8->dilation_width = 1;
+  params_8->conv_extra.kernel_tm = NULL;
+  params_8->conv_extra.conv_mode = CSINN_DIRECT;
+  params_8->pad_top = 1;
+  params_8->pad_left = 1;
+  params_8->pad_down = 1;
+  params_8->pad_right = 1;
+  params_8->base.name = "conv2d_7_fuse_multiply_8_fuse_add_9";
+  csinn_conv2d_init(output_7, output_8, kernel_8, bias_8, params_8);
+  struct csinn_tensor *output_9 = csinn_alloc_tensor(sess);
+  output_9->name = "output_9";
+  output_9->dtype = CSINN_DTYPE_UINT8;
+  output_9->layout = CSINN_LAYOUT_NCHW;
+  output_9->dim[0] = 1;
+  output_9->dim[1] = 32;
+  output_9->dim[2] = 150;
+  output_9->dim[3] = 150;
+  output_9->dim_count = 4;
+  output_9->qinfo = (struct csinn_quant_info *)(params_base + 1700);
+  output_9->quant_channel = 1;
+  struct csinn_relu_params *params_9 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_9->base.name = "relu_10";
+  csinn_relu_init(output_8, output_9, params_9);
+  struct csinn_tensor *output_10 = csinn_alloc_tensor(sess);
+  output_10->name = "output_10";
+  output_10->dtype = CSINN_DTYPE_UINT8;
+  output_10->layout = CSINN_LAYOUT_NCHW;
+  output_10->dim[0] = 1;
+  output_10->dim[1] = 64;
+  output_10->dim[2] = 150;
+  output_10->dim[3] = 150;
+  output_10->dim_count = 4;
+  output_10->qinfo = (struct csinn_quant_info *)(params_base + 1724);
+  output_10->quant_channel = 1;
+  struct csinn_tensor *kernel_10 = csinn_alloc_tensor(sess);
+  kernel_10->name = "kernel_10";
+  kernel_10->data = params_base + 1772;
+  kernel_10->is_const = 1;
+  kernel_10->dtype = CSINN_DTYPE_UINT8;
+  kernel_10->layout = CSINN_LAYOUT_OIHW;
+  kernel_10->dim[0] = 64;
+  kernel_10->dim[1] = 32;
+  kernel_10->dim[2] = 1;
+  kernel_10->dim[3] = 1;
+  kernel_10->dim_count = 4;
+  kernel_10->qinfo = (struct csinn_quant_info *)(params_base + 1748);
+  kernel_10->quant_channel = 1;
+  struct csinn_tensor *bias_10 = csinn_alloc_tensor(sess);
+  bias_10->name = "bias_10";
+  bias_10->data = params_base + 3844;
+  bias_10->is_const = 1;
+  bias_10->dtype = CSINN_DTYPE_INT32;
+  bias_10->layout = CSINN_LAYOUT_O;
+  bias_10->dim[0] = 64;
+  bias_10->dim_count = 1;
+  bias_10->qinfo = (struct csinn_quant_info *)(params_base + 3820);
+  bias_10->quant_channel = 1;
+  struct csinn_conv2d_params *params_10 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_10->group = 1;
+  params_10->stride_height = 1;
+  params_10->stride_width = 1;
+  params_10->dilation_height = 1;
+  params_10->dilation_width = 1;
+  params_10->conv_extra.kernel_tm = NULL;
+  params_10->conv_extra.conv_mode = CSINN_DIRECT;
+  params_10->pad_top = 0;
+  params_10->pad_left = 0;
+  params_10->pad_down = 0;
+  params_10->pad_right = 0;
+  params_10->base.name = "conv2d_11_fuse_multiply_12_fuse_add_13";
+  csinn_conv2d_init(output_9, output_10, kernel_10, bias_10, params_10);
+  struct csinn_tensor *output_11 = csinn_alloc_tensor(sess);
+  output_11->name = "output_11";
+  output_11->dtype = CSINN_DTYPE_UINT8;
+  output_11->layout = CSINN_LAYOUT_NCHW;
+  output_11->dim[0] = 1;
+  output_11->dim[1] = 64;
+  output_11->dim[2] = 150;
+  output_11->dim[3] = 150;
+  output_11->dim_count = 4;
+  output_11->qinfo = (struct csinn_quant_info *)(params_base + 4100);
+  output_11->quant_channel = 1;
+  struct csinn_relu_params *params_11 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_11->base.name = "relu_14";
+  csinn_relu_init(output_10, output_11, params_11);
+  struct csinn_tensor *output_12 = csinn_alloc_tensor(sess);
+  output_12->name = "output_12";
+  output_12->dtype = CSINN_DTYPE_UINT8;
+  output_12->layout = CSINN_LAYOUT_NCHW;
+  output_12->dim[0] = 1;
+  output_12->dim[1] = 64;
+  output_12->dim[2] = 75;
+  output_12->dim[3] = 75;
+  output_12->dim_count = 4;
+  output_12->qinfo = (struct csinn_quant_info *)(params_base + 4124);
+  output_12->quant_channel = 1;
+  struct csinn_tensor *kernel_12 = csinn_alloc_tensor(sess);
+  kernel_12->name = "kernel_12";
+  kernel_12->data = params_base + 4172;
+  kernel_12->is_const = 1;
+  kernel_12->dtype = CSINN_DTYPE_UINT8;
+  kernel_12->layout = CSINN_LAYOUT_O1HW;
+  kernel_12->dim[0] = 64;
+  kernel_12->dim[1] = 1;
+  kernel_12->dim[2] = 3;
+  kernel_12->dim[3] = 3;
+  kernel_12->dim_count = 4;
+  kernel_12->qinfo = (struct csinn_quant_info *)(params_base + 4148);
+  kernel_12->quant_channel = 1;
+  struct csinn_tensor *bias_12 = csinn_alloc_tensor(sess);
+  bias_12->name = "bias_12";
+  bias_12->data = params_base + 4772;
+  bias_12->is_const = 1;
+  bias_12->dtype = CSINN_DTYPE_INT32;
+  bias_12->layout = CSINN_LAYOUT_O;
+  bias_12->dim[0] = 64;
+  bias_12->dim_count = 1;
+  bias_12->qinfo = (struct csinn_quant_info *)(params_base + 4748);
+  bias_12->quant_channel = 1;
+  struct csinn_conv2d_params *params_12 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_12->group = 64;
+  params_12->stride_height = 2;
+  params_12->stride_width = 2;
+  params_12->dilation_height = 1;
+  params_12->dilation_width = 1;
+  params_12->conv_extra.kernel_tm = NULL;
+  params_12->conv_extra.conv_mode = CSINN_DIRECT;
+  params_12->pad_top = 1;
+  params_12->pad_left = 1;
+  params_12->pad_down = 1;
+  params_12->pad_right = 1;
+  params_12->base.name = "conv2d_15_fuse_multiply_16_fuse_add_17";
+  csinn_conv2d_init(output_11, output_12, kernel_12, bias_12, params_12);
+  struct csinn_tensor *output_13 = csinn_alloc_tensor(sess);
+  output_13->name = "output_13";
+  output_13->dtype = CSINN_DTYPE_UINT8;
+  output_13->layout = CSINN_LAYOUT_NCHW;
+  output_13->dim[0] = 1;
+  output_13->dim[1] = 64;
+  output_13->dim[2] = 75;
+  output_13->dim[3] = 75;
+  output_13->dim_count = 4;
+  output_13->qinfo = (struct csinn_quant_info *)(params_base + 5028);
+  output_13->quant_channel = 1;
+  struct csinn_relu_params *params_13 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_13->base.name = "relu_18";
+  csinn_relu_init(output_12, output_13, params_13);
+  struct csinn_tensor *output_14 = csinn_alloc_tensor(sess);
+  output_14->name = "output_14";
+  output_14->dtype = CSINN_DTYPE_UINT8;
+  output_14->layout = CSINN_LAYOUT_NCHW;
+  output_14->dim[0] = 1;
+  output_14->dim[1] = 128;
+  output_14->dim[2] = 75;
+  output_14->dim[3] = 75;
+  output_14->dim_count = 4;
+  output_14->qinfo = (struct csinn_quant_info *)(params_base + 5052);
+  output_14->quant_channel = 1;
+  struct csinn_tensor *kernel_14 = csinn_alloc_tensor(sess);
+  kernel_14->name = "kernel_14";
+  kernel_14->data = params_base + 5100;
+  kernel_14->is_const = 1;
+  kernel_14->dtype = CSINN_DTYPE_UINT8;
+  kernel_14->layout = CSINN_LAYOUT_OIHW;
+  kernel_14->dim[0] = 128;
+  kernel_14->dim[1] = 64;
+  kernel_14->dim[2] = 1;
+  kernel_14->dim[3] = 1;
+  kernel_14->dim_count = 4;
+  kernel_14->qinfo = (struct csinn_quant_info *)(params_base + 5076);
+  kernel_14->quant_channel = 1;
+  struct csinn_tensor *bias_14 = csinn_alloc_tensor(sess);
+  bias_14->name = "bias_14";
+  bias_14->data = params_base + 13316;
+  bias_14->is_const = 1;
+  bias_14->dtype = CSINN_DTYPE_INT32;
+  bias_14->layout = CSINN_LAYOUT_O;
+  bias_14->dim[0] = 128;
+  bias_14->dim_count = 1;
+  bias_14->qinfo = (struct csinn_quant_info *)(params_base + 13292);
+  bias_14->quant_channel = 1;
+  struct csinn_conv2d_params *params_14 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_14->group = 1;
+  params_14->stride_height = 1;
+  params_14->stride_width = 1;
+  params_14->dilation_height = 1;
+  params_14->dilation_width = 1;
+  params_14->conv_extra.kernel_tm = NULL;
+  params_14->conv_extra.conv_mode = CSINN_DIRECT;
+  params_14->pad_top = 0;
+  params_14->pad_left = 0;
+  params_14->pad_down = 0;
+  params_14->pad_right = 0;
+  params_14->base.name = "conv2d_19_fuse_multiply_20_fuse_add_21";
+  csinn_conv2d_init(output_13, output_14, kernel_14, bias_14, params_14);
+  struct csinn_tensor *output_15 = csinn_alloc_tensor(sess);
+  output_15->name = "output_15";
+  output_15->dtype = CSINN_DTYPE_UINT8;
+  output_15->layout = CSINN_LAYOUT_NCHW;
+  output_15->dim[0] = 1;
+  output_15->dim[1] = 128;
+  output_15->dim[2] = 75;
+  output_15->dim[3] = 75;
+  output_15->dim_count = 4;
+  output_15->qinfo = (struct csinn_quant_info *)(params_base + 13828);
+  output_15->quant_channel = 1;
+  struct csinn_relu_params *params_15 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_15->base.name = "relu_22";
+  csinn_relu_init(output_14, output_15, params_15);
+  struct csinn_tensor *output_16 = csinn_alloc_tensor(sess);
+  output_16->name = "output_16";
+  output_16->dtype = CSINN_DTYPE_UINT8;
+  output_16->layout = CSINN_LAYOUT_NCHW;
+  output_16->dim[0] = 1;
+  output_16->dim[1] = 128;
+  output_16->dim[2] = 75;
+  output_16->dim[3] = 75;
+  output_16->dim_count = 4;
+  output_16->qinfo = (struct csinn_quant_info *)(params_base + 13852);
+  output_16->quant_channel = 1;
+  struct csinn_tensor *kernel_16 = csinn_alloc_tensor(sess);
+  kernel_16->name = "kernel_16";
+  kernel_16->data = params_base + 13900;
+  kernel_16->is_const = 1;
+  kernel_16->dtype = CSINN_DTYPE_UINT8;
+  kernel_16->layout = CSINN_LAYOUT_O1HW;
+  kernel_16->dim[0] = 128;
+  kernel_16->dim[1] = 1;
+  kernel_16->dim[2] = 3;
+  kernel_16->dim[3] = 3;
+  kernel_16->dim_count = 4;
+  kernel_16->qinfo = (struct csinn_quant_info *)(params_base + 13876);
+  kernel_16->quant_channel = 1;
+  struct csinn_tensor *bias_16 = csinn_alloc_tensor(sess);
+  bias_16->name = "bias_16";
+  bias_16->data = params_base + 15076;
+  bias_16->is_const = 1;
+  bias_16->dtype = CSINN_DTYPE_INT32;
+  bias_16->layout = CSINN_LAYOUT_O;
+  bias_16->dim[0] = 128;
+  bias_16->dim_count = 1;
+  bias_16->qinfo = (struct csinn_quant_info *)(params_base + 15052);
+  bias_16->quant_channel = 1;
+  struct csinn_conv2d_params *params_16 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_16->group = 128;
+  params_16->stride_height = 1;
+  params_16->stride_width = 1;
+  params_16->dilation_height = 1;
+  params_16->dilation_width = 1;
+  params_16->conv_extra.kernel_tm = NULL;
+  params_16->conv_extra.conv_mode = CSINN_DIRECT;
+  params_16->pad_top = 1;
+  params_16->pad_left = 1;
+  params_16->pad_down = 1;
+  params_16->pad_right = 1;
+  params_16->base.name = "conv2d_23_fuse_multiply_24_fuse_add_25";
+  csinn_conv2d_init(output_15, output_16, kernel_16, bias_16, params_16);
+  struct csinn_tensor *output_17 = csinn_alloc_tensor(sess);
+  output_17->name = "output_17";
+  output_17->dtype = CSINN_DTYPE_UINT8;
+  output_17->layout = CSINN_LAYOUT_NCHW;
+  output_17->dim[0] = 1;
+  output_17->dim[1] = 128;
+  output_17->dim[2] = 75;
+  output_17->dim[3] = 75;
+  output_17->dim_count = 4;
+  output_17->qinfo = (struct csinn_quant_info *)(params_base + 15588);
+  output_17->quant_channel = 1;
+  struct csinn_relu_params *params_17 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_17->base.name = "relu_26";
+  csinn_relu_init(output_16, output_17, params_17);
+  struct csinn_tensor *output_18 = csinn_alloc_tensor(sess);
+  output_18->name = "output_18";
+  output_18->dtype = CSINN_DTYPE_UINT8;
+  output_18->layout = CSINN_LAYOUT_NCHW;
+  output_18->dim[0] = 1;
+  output_18->dim[1] = 128;
+  output_18->dim[2] = 75;
+  output_18->dim[3] = 75;
+  output_18->dim_count = 4;
+  output_18->qinfo = (struct csinn_quant_info *)(params_base + 15612);
+  output_18->quant_channel = 1;
+  struct csinn_tensor *kernel_18 = csinn_alloc_tensor(sess);
+  kernel_18->name = "kernel_18";
+  kernel_18->data = params_base + 15660;
+  kernel_18->is_const = 1;
+  kernel_18->dtype = CSINN_DTYPE_UINT8;
+  kernel_18->layout = CSINN_LAYOUT_OIHW;
+  kernel_18->dim[0] = 128;
+  kernel_18->dim[1] = 128;
+  kernel_18->dim[2] = 1;
+  kernel_18->dim[3] = 1;
+  kernel_18->dim_count = 4;
+  kernel_18->qinfo = (struct csinn_quant_info *)(params_base + 15636);
+  kernel_18->quant_channel = 1;
+  struct csinn_tensor *bias_18 = csinn_alloc_tensor(sess);
+  bias_18->name = "bias_18";
+  bias_18->data = params_base + 32068;
+  bias_18->is_const = 1;
+  bias_18->dtype = CSINN_DTYPE_INT32;
+  bias_18->layout = CSINN_LAYOUT_O;
+  bias_18->dim[0] = 128;
+  bias_18->dim_count = 1;
+  bias_18->qinfo = (struct csinn_quant_info *)(params_base + 32044);
+  bias_18->quant_channel = 1;
+  struct csinn_conv2d_params *params_18 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_18->group = 1;
+  params_18->stride_height = 1;
+  params_18->stride_width = 1;
+  params_18->dilation_height = 1;
+  params_18->dilation_width = 1;
+  params_18->conv_extra.kernel_tm = NULL;
+  params_18->conv_extra.conv_mode = CSINN_DIRECT;
+  params_18->pad_top = 0;
+  params_18->pad_left = 0;
+  params_18->pad_down = 0;
+  params_18->pad_right = 0;
+  params_18->base.name = "conv2d_27_fuse_multiply_28_fuse_add_29";
+  csinn_conv2d_init(output_17, output_18, kernel_18, bias_18, params_18);
+  struct csinn_tensor *output_19 = csinn_alloc_tensor(sess);
+  output_19->name = "output_19";
+  output_19->dtype = CSINN_DTYPE_UINT8;
+  output_19->layout = CSINN_LAYOUT_NCHW;
+  output_19->dim[0] = 1;
+  output_19->dim[1] = 128;
+  output_19->dim[2] = 75;
+  output_19->dim[3] = 75;
+  output_19->dim_count = 4;
+  output_19->qinfo = (struct csinn_quant_info *)(params_base + 32580);
+  output_19->quant_channel = 1;
+  struct csinn_relu_params *params_19 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_19->base.name = "relu_30";
+  csinn_relu_init(output_18, output_19, params_19);
+  struct csinn_tensor *output_20 = csinn_alloc_tensor(sess);
+  output_20->name = "output_20";
+  output_20->dtype = CSINN_DTYPE_UINT8;
+  output_20->layout = CSINN_LAYOUT_NCHW;
+  output_20->dim[0] = 1;
+  output_20->dim[1] = 128;
+  output_20->dim[2] = 38;
+  output_20->dim[3] = 38;
+  output_20->dim_count = 4;
+  output_20->qinfo = (struct csinn_quant_info *)(params_base + 32604);
+  output_20->quant_channel = 1;
+  struct csinn_tensor *kernel_20 = csinn_alloc_tensor(sess);
+  kernel_20->name = "kernel_20";
+  kernel_20->data = params_base + 32652;
+  kernel_20->is_const = 1;
+  kernel_20->dtype = CSINN_DTYPE_UINT8;
+  kernel_20->layout = CSINN_LAYOUT_O1HW;
+  kernel_20->dim[0] = 128;
+  kernel_20->dim[1] = 1;
+  kernel_20->dim[2] = 3;
+  kernel_20->dim[3] = 3;
+  kernel_20->dim_count = 4;
+  kernel_20->qinfo = (struct csinn_quant_info *)(params_base + 32628);
+  kernel_20->quant_channel = 1;
+  struct csinn_tensor *bias_20 = csinn_alloc_tensor(sess);
+  bias_20->name = "bias_20";
+  bias_20->data = params_base + 33828;
+  bias_20->is_const = 1;
+  bias_20->dtype = CSINN_DTYPE_INT32;
+  bias_20->layout = CSINN_LAYOUT_O;
+  bias_20->dim[0] = 128;
+  bias_20->dim_count = 1;
+  bias_20->qinfo = (struct csinn_quant_info *)(params_base + 33804);
+  bias_20->quant_channel = 1;
+  struct csinn_conv2d_params *params_20 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_20->group = 128;
+  params_20->stride_height = 2;
+  params_20->stride_width = 2;
+  params_20->dilation_height = 1;
+  params_20->dilation_width = 1;
+  params_20->conv_extra.kernel_tm = NULL;
+  params_20->conv_extra.conv_mode = CSINN_DIRECT;
+  params_20->pad_top = 1;
+  params_20->pad_left = 1;
+  params_20->pad_down = 1;
+  params_20->pad_right = 1;
+  params_20->base.name = "conv2d_31_fuse_multiply_32_fuse_add_33";
+  csinn_conv2d_init(output_19, output_20, kernel_20, bias_20, params_20);
+  struct csinn_tensor *output_21 = csinn_alloc_tensor(sess);
+  output_21->name = "output_21";
+  output_21->dtype = CSINN_DTYPE_UINT8;
+  output_21->layout = CSINN_LAYOUT_NCHW;
+  output_21->dim[0] = 1;
+  output_21->dim[1] = 128;
+  output_21->dim[2] = 38;
+  output_21->dim[3] = 38;
+  output_21->dim_count = 4;
+  output_21->qinfo = (struct csinn_quant_info *)(params_base + 34340);
+  output_21->quant_channel = 1;
+  struct csinn_relu_params *params_21 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_21->base.name = "relu_34";
+  csinn_relu_init(output_20, output_21, params_21);
+  struct csinn_tensor *output_22 = csinn_alloc_tensor(sess);
+  output_22->name = "output_22";
+  output_22->dtype = CSINN_DTYPE_UINT8;
+  output_22->layout = CSINN_LAYOUT_NCHW;
+  output_22->dim[0] = 1;
+  output_22->dim[1] = 256;
+  output_22->dim[2] = 38;
+  output_22->dim[3] = 38;
+  output_22->dim_count = 4;
+  output_22->qinfo = (struct csinn_quant_info *)(params_base + 34364);
+  output_22->quant_channel = 1;
+  struct csinn_tensor *kernel_22 = csinn_alloc_tensor(sess);
+  kernel_22->name = "kernel_22";
+  kernel_22->data = params_base + 34412;
+  kernel_22->is_const = 1;
+  kernel_22->dtype = CSINN_DTYPE_UINT8;
+  kernel_22->layout = CSINN_LAYOUT_OIHW;
+  kernel_22->dim[0] = 256;
+  kernel_22->dim[1] = 128;
+  kernel_22->dim[2] = 1;
+  kernel_22->dim[3] = 1;
+  kernel_22->dim_count = 4;
+  kernel_22->qinfo = (struct csinn_quant_info *)(params_base + 34388);
+  kernel_22->quant_channel = 1;
+  struct csinn_tensor *bias_22 = csinn_alloc_tensor(sess);
+  bias_22->name = "bias_22";
+  bias_22->data = params_base + 67204;
+  bias_22->is_const = 1;
+  bias_22->dtype = CSINN_DTYPE_INT32;
+  bias_22->layout = CSINN_LAYOUT_O;
+  bias_22->dim[0] = 256;
+  bias_22->dim_count = 1;
+  bias_22->qinfo = (struct csinn_quant_info *)(params_base + 67180);
+  bias_22->quant_channel = 1;
+  struct csinn_conv2d_params *params_22 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_22->group = 1;
+  params_22->stride_height = 1;
+  params_22->stride_width = 1;
+  params_22->dilation_height = 1;
+  params_22->dilation_width = 1;
+  params_22->conv_extra.kernel_tm = NULL;
+  params_22->conv_extra.conv_mode = CSINN_DIRECT;
+  params_22->pad_top = 0;
+  params_22->pad_left = 0;
+  params_22->pad_down = 0;
+  params_22->pad_right = 0;
+  params_22->base.name = "conv2d_35_fuse_multiply_36_fuse_add_37";
+  csinn_conv2d_init(output_21, output_22, kernel_22, bias_22, params_22);
+  struct csinn_tensor *output_23 = csinn_alloc_tensor(sess);
+  output_23->name = "output_23";
+  output_23->dtype = CSINN_DTYPE_UINT8;
+  output_23->layout = CSINN_LAYOUT_NCHW;
+  output_23->dim[0] = 1;
+  output_23->dim[1] = 256;
+  output_23->dim[2] = 38;
+  output_23->dim[3] = 38;
+  output_23->dim_count = 4;
+  output_23->qinfo = (struct csinn_quant_info *)(params_base + 68228);
+  output_23->quant_channel = 1;
+  struct csinn_relu_params *params_23 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_23->base.name = "relu_38";
+  csinn_relu_init(output_22, output_23, params_23);
+  struct csinn_tensor *output_24 = csinn_alloc_tensor(sess);
+  output_24->name = "output_24";
+  output_24->dtype = CSINN_DTYPE_UINT8;
+  output_24->layout = CSINN_LAYOUT_NCHW;
+  output_24->dim[0] = 1;
+  output_24->dim[1] = 256;
+  output_24->dim[2] = 38;
+  output_24->dim[3] = 38;
+  output_24->dim_count = 4;
+  output_24->qinfo = (struct csinn_quant_info *)(params_base + 68252);
+  output_24->quant_channel = 1;
+  struct csinn_tensor *kernel_24 = csinn_alloc_tensor(sess);
+  kernel_24->name = "kernel_24";
+  kernel_24->data = params_base + 68300;
+  kernel_24->is_const = 1;
+  kernel_24->dtype = CSINN_DTYPE_UINT8;
+  kernel_24->layout = CSINN_LAYOUT_O1HW;
+  kernel_24->dim[0] = 256;
+  kernel_24->dim[1] = 1;
+  kernel_24->dim[2] = 3;
+  kernel_24->dim[3] = 3;
+  kernel_24->dim_count = 4;
+  kernel_24->qinfo = (struct csinn_quant_info *)(params_base + 68276);
+  kernel_24->quant_channel = 1;
+  struct csinn_tensor *bias_24 = csinn_alloc_tensor(sess);
+  bias_24->name = "bias_24";
+  bias_24->data = params_base + 70628;
+  bias_24->is_const = 1;
+  bias_24->dtype = CSINN_DTYPE_INT32;
+  bias_24->layout = CSINN_LAYOUT_O;
+  bias_24->dim[0] = 256;
+  bias_24->dim_count = 1;
+  bias_24->qinfo = (struct csinn_quant_info *)(params_base + 70604);
+  bias_24->quant_channel = 1;
+  struct csinn_conv2d_params *params_24 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_24->group = 256;
+  params_24->stride_height = 1;
+  params_24->stride_width = 1;
+  params_24->dilation_height = 1;
+  params_24->dilation_width = 1;
+  params_24->conv_extra.kernel_tm = NULL;
+  params_24->conv_extra.conv_mode = CSINN_DIRECT;
+  params_24->pad_top = 1;
+  params_24->pad_left = 1;
+  params_24->pad_down = 1;
+  params_24->pad_right = 1;
+  params_24->base.name = "conv2d_39_fuse_multiply_40_fuse_add_41";
+  csinn_conv2d_init(output_23, output_24, kernel_24, bias_24, params_24);
+  struct csinn_tensor *output_25 = csinn_alloc_tensor(sess);
+  output_25->name = "output_25";
+  output_25->dtype = CSINN_DTYPE_UINT8;
+  output_25->layout = CSINN_LAYOUT_NCHW;
+  output_25->dim[0] = 1;
+  output_25->dim[1] = 256;
+  output_25->dim[2] = 38;
+  output_25->dim[3] = 38;
+  output_25->dim_count = 4;
+  output_25->qinfo = (struct csinn_quant_info *)(params_base + 71652);
+  output_25->quant_channel = 1;
+  struct csinn_relu_params *params_25 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_25->base.name = "relu_42";
+  csinn_relu_init(output_24, output_25, params_25);
+  struct csinn_tensor *output_26 = csinn_alloc_tensor(sess);
+  output_26->name = "output_26";
+  output_26->dtype = CSINN_DTYPE_UINT8;
+  output_26->layout = CSINN_LAYOUT_NCHW;
+  output_26->dim[0] = 1;
+  output_26->dim[1] = 256;
+  output_26->dim[2] = 38;
+  output_26->dim[3] = 38;
+  output_26->dim_count = 4;
+  output_26->qinfo = (struct csinn_quant_info *)(params_base + 71676);
+  output_26->quant_channel = 1;
+  struct csinn_tensor *kernel_26 = csinn_alloc_tensor(sess);
+  kernel_26->name = "kernel_26";
+  kernel_26->data = params_base + 71724;
+  kernel_26->is_const = 1;
+  kernel_26->dtype = CSINN_DTYPE_UINT8;
+  kernel_26->layout = CSINN_LAYOUT_OIHW;
+  kernel_26->dim[0] = 256;
+  kernel_26->dim[1] = 256;
+  kernel_26->dim[2] = 1;
+  kernel_26->dim[3] = 1;
+  kernel_26->dim_count = 4;
+  kernel_26->qinfo = (struct csinn_quant_info *)(params_base + 71700);
+  kernel_26->quant_channel = 1;
+  struct csinn_tensor *bias_26 = csinn_alloc_tensor(sess);
+  bias_26->name = "bias_26";
+  bias_26->data = params_base + 137284;
+  bias_26->is_const = 1;
+  bias_26->dtype = CSINN_DTYPE_INT32;
+  bias_26->layout = CSINN_LAYOUT_O;
+  bias_26->dim[0] = 256;
+  bias_26->dim_count = 1;
+  bias_26->qinfo = (struct csinn_quant_info *)(params_base + 137260);
+  bias_26->quant_channel = 1;
+  struct csinn_conv2d_params *params_26 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_26->group = 1;
+  params_26->stride_height = 1;
+  params_26->stride_width = 1;
+  params_26->dilation_height = 1;
+  params_26->dilation_width = 1;
+  params_26->conv_extra.kernel_tm = NULL;
+  params_26->conv_extra.conv_mode = CSINN_DIRECT;
+  params_26->pad_top = 0;
+  params_26->pad_left = 0;
+  params_26->pad_down = 0;
+  params_26->pad_right = 0;
+  params_26->base.name = "conv2d_43_fuse_multiply_44_fuse_add_45";
+  csinn_conv2d_init(output_25, output_26, kernel_26, bias_26, params_26);
+  struct csinn_tensor *output_27 = csinn_alloc_tensor(sess);
+  output_27->name = "output_27";
+  output_27->dtype = CSINN_DTYPE_UINT8;
+  output_27->layout = CSINN_LAYOUT_NCHW;
+  output_27->dim[0] = 1;
+  output_27->dim[1] = 256;
+  output_27->dim[2] = 38;
+  output_27->dim[3] = 38;
+  output_27->dim_count = 4;
+  output_27->qinfo = (struct csinn_quant_info *)(params_base + 138308);
+  output_27->quant_channel = 1;
+  struct csinn_relu_params *params_27 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_27->base.name = "relu_46";
+  csinn_relu_init(output_26, output_27, params_27);
+  struct csinn_tensor *output_28 = csinn_alloc_tensor(sess);
+  output_28->name = "output_28";
+  output_28->dtype = CSINN_DTYPE_UINT8;
+  output_28->layout = CSINN_LAYOUT_NCHW;
+  output_28->dim[0] = 1;
+  output_28->dim[1] = 256;
+  output_28->dim[2] = 19;
+  output_28->dim[3] = 19;
+  output_28->dim_count = 4;
+  output_28->qinfo = (struct csinn_quant_info *)(params_base + 138332);
+  output_28->quant_channel = 1;
+  struct csinn_tensor *kernel_28 = csinn_alloc_tensor(sess);
+  kernel_28->name = "kernel_28";
+  kernel_28->data = params_base + 138380;
+  kernel_28->is_const = 1;
+  kernel_28->dtype = CSINN_DTYPE_UINT8;
+  kernel_28->layout = CSINN_LAYOUT_O1HW;
+  kernel_28->dim[0] = 256;
+  kernel_28->dim[1] = 1;
+  kernel_28->dim[2] = 3;
+  kernel_28->dim[3] = 3;
+  kernel_28->dim_count = 4;
+  kernel_28->qinfo = (struct csinn_quant_info *)(params_base + 138356);
+  kernel_28->quant_channel = 1;
+  struct csinn_tensor *bias_28 = csinn_alloc_tensor(sess);
+  bias_28->name = "bias_28";
+  bias_28->data = params_base + 140708;
+  bias_28->is_const = 1;
+  bias_28->dtype = CSINN_DTYPE_INT32;
+  bias_28->layout = CSINN_LAYOUT_O;
+  bias_28->dim[0] = 256;
+  bias_28->dim_count = 1;
+  bias_28->qinfo = (struct csinn_quant_info *)(params_base + 140684);
+  bias_28->quant_channel = 1;
+  struct csinn_conv2d_params *params_28 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_28->group = 256;
+  params_28->stride_height = 2;
+  params_28->stride_width = 2;
+  params_28->dilation_height = 1;
+  params_28->dilation_width = 1;
+  params_28->conv_extra.kernel_tm = NULL;
+  params_28->conv_extra.conv_mode = CSINN_DIRECT;
+  params_28->pad_top = 1;
+  params_28->pad_left = 1;
+  params_28->pad_down = 1;
+  params_28->pad_right = 1;
+  params_28->base.name = "conv2d_47_fuse_multiply_48_fuse_add_49";
+  csinn_conv2d_init(output_27, output_28, kernel_28, bias_28, params_28);
+  struct csinn_tensor *output_29 = csinn_alloc_tensor(sess);
+  output_29->name = "output_29";
+  output_29->dtype = CSINN_DTYPE_UINT8;
+  output_29->layout = CSINN_LAYOUT_NCHW;
+  output_29->dim[0] = 1;
+  output_29->dim[1] = 256;
+  output_29->dim[2] = 19;
+  output_29->dim[3] = 19;
+  output_29->dim_count = 4;
+  output_29->qinfo = (struct csinn_quant_info *)(params_base + 141732);
+  output_29->quant_channel = 1;
+  struct csinn_relu_params *params_29 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_29->base.name = "relu_50";
+  csinn_relu_init(output_28, output_29, params_29);
+  struct csinn_tensor *output_30 = csinn_alloc_tensor(sess);
+  output_30->name = "output_30";
+  output_30->dtype = CSINN_DTYPE_UINT8;
+  output_30->layout = CSINN_LAYOUT_NCHW;
+  output_30->dim[0] = 1;
+  output_30->dim[1] = 512;
+  output_30->dim[2] = 19;
+  output_30->dim[3] = 19;
+  output_30->dim_count = 4;
+  output_30->qinfo = (struct csinn_quant_info *)(params_base + 141756);
+  output_30->quant_channel = 1;
+  struct csinn_tensor *kernel_30 = csinn_alloc_tensor(sess);
+  kernel_30->name = "kernel_30";
+  kernel_30->data = params_base + 141804;
+  kernel_30->is_const = 1;
+  kernel_30->dtype = CSINN_DTYPE_UINT8;
+  kernel_30->layout = CSINN_LAYOUT_OIHW;
+  kernel_30->dim[0] = 512;
+  kernel_30->dim[1] = 256;
+  kernel_30->dim[2] = 1;
+  kernel_30->dim[3] = 1;
+  kernel_30->dim_count = 4;
+  kernel_30->qinfo = (struct csinn_quant_info *)(params_base + 141780);
+  kernel_30->quant_channel = 1;
+  struct csinn_tensor *bias_30 = csinn_alloc_tensor(sess);
+  bias_30->name = "bias_30";
+  bias_30->data = params_base + 272900;
+  bias_30->is_const = 1;
+  bias_30->dtype = CSINN_DTYPE_INT32;
+  bias_30->layout = CSINN_LAYOUT_O;
+  bias_30->dim[0] = 512;
+  bias_30->dim_count = 1;
+  bias_30->qinfo = (struct csinn_quant_info *)(params_base + 272876);
+  bias_30->quant_channel = 1;
+  struct csinn_conv2d_params *params_30 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_30->group = 1;
+  params_30->stride_height = 1;
+  params_30->stride_width = 1;
+  params_30->dilation_height = 1;
+  params_30->dilation_width = 1;
+  params_30->conv_extra.kernel_tm = NULL;
+  params_30->conv_extra.conv_mode = CSINN_DIRECT;
+  params_30->pad_top = 0;
+  params_30->pad_left = 0;
+  params_30->pad_down = 0;
+  params_30->pad_right = 0;
+  params_30->base.name = "conv2d_51_fuse_multiply_52_fuse_add_53";
+  csinn_conv2d_init(output_29, output_30, kernel_30, bias_30, params_30);
+  struct csinn_tensor *output_31 = csinn_alloc_tensor(sess);
+  output_31->name = "output_31";
+  output_31->dtype = CSINN_DTYPE_UINT8;
+  output_31->layout = CSINN_LAYOUT_NCHW;
+  output_31->dim[0] = 1;
+  output_31->dim[1] = 512;
+  output_31->dim[2] = 19;
+  output_31->dim[3] = 19;
+  output_31->dim_count = 4;
+  output_31->qinfo = (struct csinn_quant_info *)(params_base + 274948);
+  output_31->quant_channel = 1;
+  struct csinn_relu_params *params_31 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_31->base.name = "relu_54";
+  csinn_relu_init(output_30, output_31, params_31);
+  struct csinn_tensor *output_32 = csinn_alloc_tensor(sess);
+  output_32->name = "output_32";
+  output_32->dtype = CSINN_DTYPE_UINT8;
+  output_32->layout = CSINN_LAYOUT_NCHW;
+  output_32->dim[0] = 1;
+  output_32->dim[1] = 512;
+  output_32->dim[2] = 19;
+  output_32->dim[3] = 19;
+  output_32->dim_count = 4;
+  output_32->qinfo = (struct csinn_quant_info *)(params_base + 274972);
+  output_32->quant_channel = 1;
+  struct csinn_tensor *kernel_32 = csinn_alloc_tensor(sess);
+  kernel_32->name = "kernel_32";
+  kernel_32->data = params_base + 275020;
+  kernel_32->is_const = 1;
+  kernel_32->dtype = CSINN_DTYPE_UINT8;
+  kernel_32->layout = CSINN_LAYOUT_O1HW;
+  kernel_32->dim[0] = 512;
+  kernel_32->dim[1] = 1;
+  kernel_32->dim[2] = 3;
+  kernel_32->dim[3] = 3;
+  kernel_32->dim_count = 4;
+  kernel_32->qinfo = (struct csinn_quant_info *)(params_base + 274996);
+  kernel_32->quant_channel = 1;
+  struct csinn_tensor *bias_32 = csinn_alloc_tensor(sess);
+  bias_32->name = "bias_32";
+  bias_32->data = params_base + 279652;
+  bias_32->is_const = 1;
+  bias_32->dtype = CSINN_DTYPE_INT32;
+  bias_32->layout = CSINN_LAYOUT_O;
+  bias_32->dim[0] = 512;
+  bias_32->dim_count = 1;
+  bias_32->qinfo = (struct csinn_quant_info *)(params_base + 279628);
+  bias_32->quant_channel = 1;
+  struct csinn_conv2d_params *params_32 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_32->group = 512;
+  params_32->stride_height = 1;
+  params_32->stride_width = 1;
+  params_32->dilation_height = 1;
+  params_32->dilation_width = 1;
+  params_32->conv_extra.kernel_tm = NULL;
+  params_32->conv_extra.conv_mode = CSINN_DIRECT;
+  params_32->pad_top = 1;
+  params_32->pad_left = 1;
+  params_32->pad_down = 1;
+  params_32->pad_right = 1;
+  params_32->base.name = "conv2d_55_fuse_multiply_56_fuse_add_57";
+  csinn_conv2d_init(output_31, output_32, kernel_32, bias_32, params_32);
+  struct csinn_tensor *output_33 = csinn_alloc_tensor(sess);
+  output_33->name = "output_33";
+  output_33->dtype = CSINN_DTYPE_UINT8;
+  output_33->layout = CSINN_LAYOUT_NCHW;
+  output_33->dim[0] = 1;
+  output_33->dim[1] = 512;
+  output_33->dim[2] = 19;
+  output_33->dim[3] = 19;
+  output_33->dim_count = 4;
+  output_33->qinfo = (struct csinn_quant_info *)(params_base + 281700);
+  output_33->quant_channel = 1;
+  struct csinn_relu_params *params_33 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_33->base.name = "relu_58";
+  csinn_relu_init(output_32, output_33, params_33);
+  struct csinn_tensor *output_34 = csinn_alloc_tensor(sess);
+  output_34->name = "output_34";
+  output_34->dtype = CSINN_DTYPE_UINT8;
+  output_34->layout = CSINN_LAYOUT_NCHW;
+  output_34->dim[0] = 1;
+  output_34->dim[1] = 512;
+  output_34->dim[2] = 19;
+  output_34->dim[3] = 19;
+  output_34->dim_count = 4;
+  output_34->qinfo = (struct csinn_quant_info *)(params_base + 281724);
+  output_34->quant_channel = 1;
+  struct csinn_tensor *kernel_34 = csinn_alloc_tensor(sess);
+  kernel_34->name = "kernel_34";
+  kernel_34->data = params_base + 281772;
+  kernel_34->is_const = 1;
+  kernel_34->dtype = CSINN_DTYPE_UINT8;
+  kernel_34->layout = CSINN_LAYOUT_OIHW;
+  kernel_34->dim[0] = 512;
+  kernel_34->dim[1] = 512;
+  kernel_34->dim[2] = 1;
+  kernel_34->dim[3] = 1;
+  kernel_34->dim_count = 4;
+  kernel_34->qinfo = (struct csinn_quant_info *)(params_base + 281748);
+  kernel_34->quant_channel = 1;
+  struct csinn_tensor *bias_34 = csinn_alloc_tensor(sess);
+  bias_34->name = "bias_34";
+  bias_34->data = params_base + 543940;
+  bias_34->is_const = 1;
+  bias_34->dtype = CSINN_DTYPE_INT32;
+  bias_34->layout = CSINN_LAYOUT_O;
+  bias_34->dim[0] = 512;
+  bias_34->dim_count = 1;
+  bias_34->qinfo = (struct csinn_quant_info *)(params_base + 543916);
+  bias_34->quant_channel = 1;
+  struct csinn_conv2d_params *params_34 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_34->group = 1;
+  params_34->stride_height = 1;
+  params_34->stride_width = 1;
+  params_34->dilation_height = 1;
+  params_34->dilation_width = 1;
+  params_34->conv_extra.kernel_tm = NULL;
+  params_34->conv_extra.conv_mode = CSINN_DIRECT;
+  params_34->pad_top = 0;
+  params_34->pad_left = 0;
+  params_34->pad_down = 0;
+  params_34->pad_right = 0;
+  params_34->base.name = "conv2d_59_fuse_multiply_60_fuse_add_61";
+  csinn_conv2d_init(output_33, output_34, kernel_34, bias_34, params_34);
+  struct csinn_tensor *output_35 = csinn_alloc_tensor(sess);
+  output_35->name = "output_35";
+  output_35->dtype = CSINN_DTYPE_UINT8;
+  output_35->layout = CSINN_LAYOUT_NCHW;
+  output_35->dim[0] = 1;
+  output_35->dim[1] = 512;
+  output_35->dim[2] = 19;
+  output_35->dim[3] = 19;
+  output_35->dim_count = 4;
+  output_35->qinfo = (struct csinn_quant_info *)(params_base + 545988);
+  output_35->quant_channel = 1;
+  struct csinn_relu_params *params_35 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_35->base.name = "relu_62";
+  csinn_relu_init(output_34, output_35, params_35);
+  struct csinn_tensor *output_36 = csinn_alloc_tensor(sess);
+  output_36->name = "output_36";
+  output_36->dtype = CSINN_DTYPE_UINT8;
+  output_36->layout = CSINN_LAYOUT_NCHW;
+  output_36->dim[0] = 1;
+  output_36->dim[1] = 512;
+  output_36->dim[2] = 19;
+  output_36->dim[3] = 19;
+  output_36->dim_count = 4;
+  output_36->qinfo = (struct csinn_quant_info *)(params_base + 546012);
+  output_36->quant_channel = 1;
+  struct csinn_tensor *kernel_36 = csinn_alloc_tensor(sess);
+  kernel_36->name = "kernel_36";
+  kernel_36->data = params_base + 546060;
+  kernel_36->is_const = 1;
+  kernel_36->dtype = CSINN_DTYPE_UINT8;
+  kernel_36->layout = CSINN_LAYOUT_O1HW;
+  kernel_36->dim[0] = 512;
+  kernel_36->dim[1] = 1;
+  kernel_36->dim[2] = 3;
+  kernel_36->dim[3] = 3;
+  kernel_36->dim_count = 4;
+  kernel_36->qinfo = (struct csinn_quant_info *)(params_base + 546036);
+  kernel_36->quant_channel = 1;
+  struct csinn_tensor *bias_36 = csinn_alloc_tensor(sess);
+  bias_36->name = "bias_36";
+  bias_36->data = params_base + 550692;
+  bias_36->is_const = 1;
+  bias_36->dtype = CSINN_DTYPE_INT32;
+  bias_36->layout = CSINN_LAYOUT_O;
+  bias_36->dim[0] = 512;
+  bias_36->dim_count = 1;
+  bias_36->qinfo = (struct csinn_quant_info *)(params_base + 550668);
+  bias_36->quant_channel = 1;
+  struct csinn_conv2d_params *params_36 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_36->group = 512;
+  params_36->stride_height = 1;
+  params_36->stride_width = 1;
+  params_36->dilation_height = 1;
+  params_36->dilation_width = 1;
+  params_36->conv_extra.kernel_tm = NULL;
+  params_36->conv_extra.conv_mode = CSINN_DIRECT;
+  params_36->pad_top = 1;
+  params_36->pad_left = 1;
+  params_36->pad_down = 1;
+  params_36->pad_right = 1;
+  params_36->base.name = "conv2d_63_fuse_multiply_64_fuse_add_65";
+  csinn_conv2d_init(output_35, output_36, kernel_36, bias_36, params_36);
+  struct csinn_tensor *output_37 = csinn_alloc_tensor(sess);
+  output_37->name = "output_37";
+  output_37->dtype = CSINN_DTYPE_UINT8;
+  output_37->layout = CSINN_LAYOUT_NCHW;
+  output_37->dim[0] = 1;
+  output_37->dim[1] = 512;
+  output_37->dim[2] = 19;
+  output_37->dim[3] = 19;
+  output_37->dim_count = 4;
+  output_37->qinfo = (struct csinn_quant_info *)(params_base + 552740);
+  output_37->quant_channel = 1;
+  struct csinn_relu_params *params_37 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_37->base.name = "relu_66";
+  csinn_relu_init(output_36, output_37, params_37);
+  struct csinn_tensor *output_38 = csinn_alloc_tensor(sess);
+  output_38->name = "output_38";
+  output_38->dtype = CSINN_DTYPE_UINT8;
+  output_38->layout = CSINN_LAYOUT_NCHW;
+  output_38->dim[0] = 1;
+  output_38->dim[1] = 512;
+  output_38->dim[2] = 19;
+  output_38->dim[3] = 19;
+  output_38->dim_count = 4;
+  output_38->qinfo = (struct csinn_quant_info *)(params_base + 552764);
+  output_38->quant_channel = 1;
+  struct csinn_tensor *kernel_38 = csinn_alloc_tensor(sess);
+  kernel_38->name = "kernel_38";
+  kernel_38->data = params_base + 552812;
+  kernel_38->is_const = 1;
+  kernel_38->dtype = CSINN_DTYPE_UINT8;
+  kernel_38->layout = CSINN_LAYOUT_OIHW;
+  kernel_38->dim[0] = 512;
+  kernel_38->dim[1] = 512;
+  kernel_38->dim[2] = 1;
+  kernel_38->dim[3] = 1;
+  kernel_38->dim_count = 4;
+  kernel_38->qinfo = (struct csinn_quant_info *)(params_base + 552788);
+  kernel_38->quant_channel = 1;
+  struct csinn_tensor *bias_38 = csinn_alloc_tensor(sess);
+  bias_38->name = "bias_38";
+  bias_38->data = params_base + 814980;
+  bias_38->is_const = 1;
+  bias_38->dtype = CSINN_DTYPE_INT32;
+  bias_38->layout = CSINN_LAYOUT_O;
+  bias_38->dim[0] = 512;
+  bias_38->dim_count = 1;
+  bias_38->qinfo = (struct csinn_quant_info *)(params_base + 814956);
+  bias_38->quant_channel = 1;
+  struct csinn_conv2d_params *params_38 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_38->group = 1;
+  params_38->stride_height = 1;
+  params_38->stride_width = 1;
+  params_38->dilation_height = 1;
+  params_38->dilation_width = 1;
+  params_38->conv_extra.kernel_tm = NULL;
+  params_38->conv_extra.conv_mode = CSINN_DIRECT;
+  params_38->pad_top = 0;
+  params_38->pad_left = 0;
+  params_38->pad_down = 0;
+  params_38->pad_right = 0;
+  params_38->base.name = "conv2d_67_fuse_multiply_68_fuse_add_69";
+  csinn_conv2d_init(output_37, output_38, kernel_38, bias_38, params_38);
+  struct csinn_tensor *output_39 = csinn_alloc_tensor(sess);
+  output_39->name = "output_39";
+  output_39->dtype = CSINN_DTYPE_UINT8;
+  output_39->layout = CSINN_LAYOUT_NCHW;
+  output_39->dim[0] = 1;
+  output_39->dim[1] = 512;
+  output_39->dim[2] = 19;
+  output_39->dim[3] = 19;
+  output_39->dim_count = 4;
+  output_39->qinfo = (struct csinn_quant_info *)(params_base + 817028);
+  output_39->quant_channel = 1;
+  struct csinn_relu_params *params_39 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_39->base.name = "relu_70";
+  csinn_relu_init(output_38, output_39, params_39);
+  struct csinn_tensor *output_40 = csinn_alloc_tensor(sess);
+  output_40->name = "output_40";
+  output_40->dtype = CSINN_DTYPE_UINT8;
+  output_40->layout = CSINN_LAYOUT_NCHW;
+  output_40->dim[0] = 1;
+  output_40->dim[1] = 512;
+  output_40->dim[2] = 19;
+  output_40->dim[3] = 19;
+  output_40->dim_count = 4;
+  output_40->qinfo = (struct csinn_quant_info *)(params_base + 817052);
+  output_40->quant_channel = 1;
+  struct csinn_tensor *kernel_40 = csinn_alloc_tensor(sess);
+  kernel_40->name = "kernel_40";
+  kernel_40->data = params_base + 817100;
+  kernel_40->is_const = 1;
+  kernel_40->dtype = CSINN_DTYPE_UINT8;
+  kernel_40->layout = CSINN_LAYOUT_O1HW;
+  kernel_40->dim[0] = 512;
+  kernel_40->dim[1] = 1;
+  kernel_40->dim[2] = 3;
+  kernel_40->dim[3] = 3;
+  kernel_40->dim_count = 4;
+  kernel_40->qinfo = (struct csinn_quant_info *)(params_base + 817076);
+  kernel_40->quant_channel = 1;
+  struct csinn_tensor *bias_40 = csinn_alloc_tensor(sess);
+  bias_40->name = "bias_40";
+  bias_40->data = params_base + 821732;
+  bias_40->is_const = 1;
+  bias_40->dtype = CSINN_DTYPE_INT32;
+  bias_40->layout = CSINN_LAYOUT_O;
+  bias_40->dim[0] = 512;
+  bias_40->dim_count = 1;
+  bias_40->qinfo = (struct csinn_quant_info *)(params_base + 821708);
+  bias_40->quant_channel = 1;
+  struct csinn_conv2d_params *params_40 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_40->group = 512;
+  params_40->stride_height = 1;
+  params_40->stride_width = 1;
+  params_40->dilation_height = 1;
+  params_40->dilation_width = 1;
+  params_40->conv_extra.kernel_tm = NULL;
+  params_40->conv_extra.conv_mode = CSINN_DIRECT;
+  params_40->pad_top = 1;
+  params_40->pad_left = 1;
+  params_40->pad_down = 1;
+  params_40->pad_right = 1;
+  params_40->base.name = "conv2d_71_fuse_multiply_72_fuse_add_73";
+  csinn_conv2d_init(output_39, output_40, kernel_40, bias_40, params_40);
+  struct csinn_tensor *output_41 = csinn_alloc_tensor(sess);
+  output_41->name = "output_41";
+  output_41->dtype = CSINN_DTYPE_UINT8;
+  output_41->layout = CSINN_LAYOUT_NCHW;
+  output_41->dim[0] = 1;
+  output_41->dim[1] = 512;
+  output_41->dim[2] = 19;
+  output_41->dim[3] = 19;
+  output_41->dim_count = 4;
+  output_41->qinfo = (struct csinn_quant_info *)(params_base + 823780);
+  output_41->quant_channel = 1;
+  struct csinn_relu_params *params_41 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_41->base.name = "relu_74";
+  csinn_relu_init(output_40, output_41, params_41);
+  struct csinn_tensor *output_42 = csinn_alloc_tensor(sess);
+  output_42->name = "output_42";
+  output_42->dtype = CSINN_DTYPE_UINT8;
+  output_42->layout = CSINN_LAYOUT_NCHW;
+  output_42->dim[0] = 1;
+  output_42->dim[1] = 512;
+  output_42->dim[2] = 19;
+  output_42->dim[3] = 19;
+  output_42->dim_count = 4;
+  output_42->qinfo = (struct csinn_quant_info *)(params_base + 823804);
+  output_42->quant_channel = 1;
+  struct csinn_tensor *kernel_42 = csinn_alloc_tensor(sess);
+  kernel_42->name = "kernel_42";
+  kernel_42->data = params_base + 823852;
+  kernel_42->is_const = 1;
+  kernel_42->dtype = CSINN_DTYPE_UINT8;
+  kernel_42->layout = CSINN_LAYOUT_OIHW;
+  kernel_42->dim[0] = 512;
+  kernel_42->dim[1] = 512;
+  kernel_42->dim[2] = 1;
+  kernel_42->dim[3] = 1;
+  kernel_42->dim_count = 4;
+  kernel_42->qinfo = (struct csinn_quant_info *)(params_base + 823828);
+  kernel_42->quant_channel = 1;
+  struct csinn_tensor *bias_42 = csinn_alloc_tensor(sess);
+  bias_42->name = "bias_42";
+  bias_42->data = params_base + 1086020;
+  bias_42->is_const = 1;
+  bias_42->dtype = CSINN_DTYPE_INT32;
+  bias_42->layout = CSINN_LAYOUT_O;
+  bias_42->dim[0] = 512;
+  bias_42->dim_count = 1;
+  bias_42->qinfo = (struct csinn_quant_info *)(params_base + 1085996);
+  bias_42->quant_channel = 1;
+  struct csinn_conv2d_params *params_42 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_42->group = 1;
+  params_42->stride_height = 1;
+  params_42->stride_width = 1;
+  params_42->dilation_height = 1;
+  params_42->dilation_width = 1;
+  params_42->conv_extra.kernel_tm = NULL;
+  params_42->conv_extra.conv_mode = CSINN_DIRECT;
+  params_42->pad_top = 0;
+  params_42->pad_left = 0;
+  params_42->pad_down = 0;
+  params_42->pad_right = 0;
+  params_42->base.name = "conv2d_75_fuse_multiply_76_fuse_add_77";
+  csinn_conv2d_init(output_41, output_42, kernel_42, bias_42, params_42);
+  struct csinn_tensor *output_43 = csinn_alloc_tensor(sess);
+  output_43->name = "output_43";
+  output_43->dtype = CSINN_DTYPE_UINT8;
+  output_43->layout = CSINN_LAYOUT_NCHW;
+  output_43->dim[0] = 1;
+  output_43->dim[1] = 512;
+  output_43->dim[2] = 19;
+  output_43->dim[3] = 19;
+  output_43->dim_count = 4;
+  output_43->qinfo = (struct csinn_quant_info *)(params_base + 1088068);
+  output_43->quant_channel = 1;
+  struct csinn_relu_params *params_43 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_43->base.name = "relu_78";
+  csinn_relu_init(output_42, output_43, params_43);
+  struct csinn_tensor *output_44 = csinn_alloc_tensor(sess);
+  output_44->name = "output_44";
+  output_44->dtype = CSINN_DTYPE_UINT8;
+  output_44->layout = CSINN_LAYOUT_NCHW;
+  output_44->dim[0] = 1;
+  output_44->dim[1] = 512;
+  output_44->dim[2] = 19;
+  output_44->dim[3] = 19;
+  output_44->dim_count = 4;
+  output_44->qinfo = (struct csinn_quant_info *)(params_base + 1088092);
+  output_44->quant_channel = 1;
+  struct csinn_tensor *kernel_44 = csinn_alloc_tensor(sess);
+  kernel_44->name = "kernel_44";
+  kernel_44->data = params_base + 1088140;
+  kernel_44->is_const = 1;
+  kernel_44->dtype = CSINN_DTYPE_UINT8;
+  kernel_44->layout = CSINN_LAYOUT_O1HW;
+  kernel_44->dim[0] = 512;
+  kernel_44->dim[1] = 1;
+  kernel_44->dim[2] = 3;
+  kernel_44->dim[3] = 3;
+  kernel_44->dim_count = 4;
+  kernel_44->qinfo = (struct csinn_quant_info *)(params_base + 1088116);
+  kernel_44->quant_channel = 1;
+  struct csinn_tensor *bias_44 = csinn_alloc_tensor(sess);
+  bias_44->name = "bias_44";
+  bias_44->data = params_base + 1092772;
+  bias_44->is_const = 1;
+  bias_44->dtype = CSINN_DTYPE_INT32;
+  bias_44->layout = CSINN_LAYOUT_O;
+  bias_44->dim[0] = 512;
+  bias_44->dim_count = 1;
+  bias_44->qinfo = (struct csinn_quant_info *)(params_base + 1092748);
+  bias_44->quant_channel = 1;
+  struct csinn_conv2d_params *params_44 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_44->group = 512;
+  params_44->stride_height = 1;
+  params_44->stride_width = 1;
+  params_44->dilation_height = 1;
+  params_44->dilation_width = 1;
+  params_44->conv_extra.kernel_tm = NULL;
+  params_44->conv_extra.conv_mode = CSINN_DIRECT;
+  params_44->pad_top = 1;
+  params_44->pad_left = 1;
+  params_44->pad_down = 1;
+  params_44->pad_right = 1;
+  params_44->base.name = "conv2d_79_fuse_multiply_80_fuse_add_81";
+  csinn_conv2d_init(output_43, output_44, kernel_44, bias_44, params_44);
+  struct csinn_tensor *output_45 = csinn_alloc_tensor(sess);
+  output_45->name = "output_45";
+  output_45->dtype = CSINN_DTYPE_UINT8;
+  output_45->layout = CSINN_LAYOUT_NCHW;
+  output_45->dim[0] = 1;
+  output_45->dim[1] = 512;
+  output_45->dim[2] = 19;
+  output_45->dim[3] = 19;
+  output_45->dim_count = 4;
+  output_45->qinfo = (struct csinn_quant_info *)(params_base + 1094820);
+  output_45->quant_channel = 1;
+  struct csinn_relu_params *params_45 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_45->base.name = "relu_82";
+  csinn_relu_init(output_44, output_45, params_45);
+  struct csinn_tensor *output_46 = csinn_alloc_tensor(sess);
+  output_46->name = "output_46";
+  output_46->dtype = CSINN_DTYPE_UINT8;
+  output_46->layout = CSINN_LAYOUT_NCHW;
+  output_46->dim[0] = 1;
+  output_46->dim[1] = 512;
+  output_46->dim[2] = 19;
+  output_46->dim[3] = 19;
+  output_46->dim_count = 4;
+  output_46->qinfo = (struct csinn_quant_info *)(params_base + 1094844);
+  output_46->quant_channel = 1;
+  struct csinn_tensor *kernel_46 = csinn_alloc_tensor(sess);
+  kernel_46->name = "kernel_46";
+  kernel_46->data = params_base + 1094892;
+  kernel_46->is_const = 1;
+  kernel_46->dtype = CSINN_DTYPE_UINT8;
+  kernel_46->layout = CSINN_LAYOUT_OIHW;
+  kernel_46->dim[0] = 512;
+  kernel_46->dim[1] = 512;
+  kernel_46->dim[2] = 1;
+  kernel_46->dim[3] = 1;
+  kernel_46->dim_count = 4;
+  kernel_46->qinfo = (struct csinn_quant_info *)(params_base + 1094868);
+  kernel_46->quant_channel = 1;
+  struct csinn_tensor *bias_46 = csinn_alloc_tensor(sess);
+  bias_46->name = "bias_46";
+  bias_46->data = params_base + 1357060;
+  bias_46->is_const = 1;
+  bias_46->dtype = CSINN_DTYPE_INT32;
+  bias_46->layout = CSINN_LAYOUT_O;
+  bias_46->dim[0] = 512;
+  bias_46->dim_count = 1;
+  bias_46->qinfo = (struct csinn_quant_info *)(params_base + 1357036);
+  bias_46->quant_channel = 1;
+  struct csinn_conv2d_params *params_46 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_46->group = 1;
+  params_46->stride_height = 1;
+  params_46->stride_width = 1;
+  params_46->dilation_height = 1;
+  params_46->dilation_width = 1;
+  params_46->conv_extra.kernel_tm = NULL;
+  params_46->conv_extra.conv_mode = CSINN_DIRECT;
+  params_46->pad_top = 0;
+  params_46->pad_left = 0;
+  params_46->pad_down = 0;
+  params_46->pad_right = 0;
+  params_46->base.name = "conv2d_83_fuse_multiply_84_fuse_add_85";
+  csinn_conv2d_init(output_45, output_46, kernel_46, bias_46, params_46);
+  struct csinn_tensor *output_47 = csinn_alloc_tensor(sess);
+  output_47->name = "output_47";
+  output_47->dtype = CSINN_DTYPE_UINT8;
+  output_47->layout = CSINN_LAYOUT_NCHW;
+  output_47->dim[0] = 1;
+  output_47->dim[1] = 512;
+  output_47->dim[2] = 19;
+  output_47->dim[3] = 19;
+  output_47->dim_count = 4;
+  output_47->qinfo = (struct csinn_quant_info *)(params_base + 1359108);
+  output_47->quant_channel = 1;
+  struct csinn_relu_params *params_47 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_47->base.name = "relu_86";
+  csinn_relu_init(output_46, output_47, params_47);
+  struct csinn_tensor *output_48 = csinn_alloc_tensor(sess);
+  output_48->name = "output_48";
+  output_48->dtype = CSINN_DTYPE_UINT8;
+  output_48->layout = CSINN_LAYOUT_NCHW;
+  output_48->dim[0] = 1;
+  output_48->dim[1] = 512;
+  output_48->dim[2] = 19;
+  output_48->dim[3] = 19;
+  output_48->dim_count = 4;
+  output_48->qinfo = (struct csinn_quant_info *)(params_base + 1359132);
+  output_48->quant_channel = 1;
+  struct csinn_tensor *kernel_48 = csinn_alloc_tensor(sess);
+  kernel_48->name = "kernel_48";
+  kernel_48->data = params_base + 1359180;
+  kernel_48->is_const = 1;
+  kernel_48->dtype = CSINN_DTYPE_UINT8;
+  kernel_48->layout = CSINN_LAYOUT_O1HW;
+  kernel_48->dim[0] = 512;
+  kernel_48->dim[1] = 1;
+  kernel_48->dim[2] = 3;
+  kernel_48->dim[3] = 3;
+  kernel_48->dim_count = 4;
+  kernel_48->qinfo = (struct csinn_quant_info *)(params_base + 1359156);
+  kernel_48->quant_channel = 1;
+  struct csinn_tensor *bias_48 = csinn_alloc_tensor(sess);
+  bias_48->name = "bias_48";
+  bias_48->data = params_base + 1363812;
+  bias_48->is_const = 1;
+  bias_48->dtype = CSINN_DTYPE_INT32;
+  bias_48->layout = CSINN_LAYOUT_O;
+  bias_48->dim[0] = 512;
+  bias_48->dim_count = 1;
+  bias_48->qinfo = (struct csinn_quant_info *)(params_base + 1363788);
+  bias_48->quant_channel = 1;
+  struct csinn_conv2d_params *params_48 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_48->group = 512;
+  params_48->stride_height = 1;
+  params_48->stride_width = 1;
+  params_48->dilation_height = 1;
+  params_48->dilation_width = 1;
+  params_48->conv_extra.kernel_tm = NULL;
+  params_48->conv_extra.conv_mode = CSINN_DIRECT;
+  params_48->pad_top = 1;
+  params_48->pad_left = 1;
+  params_48->pad_down = 1;
+  params_48->pad_right = 1;
+  params_48->base.name = "conv2d_87_fuse_multiply_88_fuse_add_89";
+  csinn_conv2d_init(output_47, output_48, kernel_48, bias_48, params_48);
+  struct csinn_tensor *output_49 = csinn_alloc_tensor(sess);
+  output_49->name = "output_49";
+  output_49->dtype = CSINN_DTYPE_UINT8;
+  output_49->layout = CSINN_LAYOUT_NCHW;
+  output_49->dim[0] = 1;
+  output_49->dim[1] = 512;
+  output_49->dim[2] = 19;
+  output_49->dim[3] = 19;
+  output_49->dim_count = 4;
+  output_49->qinfo = (struct csinn_quant_info *)(params_base + 1365860);
+  output_49->quant_channel = 1;
+  struct csinn_relu_params *params_49 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_49->base.name = "relu_90";
+  csinn_relu_init(output_48, output_49, params_49);
+  struct csinn_tensor *output_50 = csinn_alloc_tensor(sess);
+  output_50->name = "output_50";
+  output_50->dtype = CSINN_DTYPE_UINT8;
+  output_50->layout = CSINN_LAYOUT_NCHW;
+  output_50->dim[0] = 1;
+  output_50->dim[1] = 512;
+  output_50->dim[2] = 19;
+  output_50->dim[3] = 19;
+  output_50->dim_count = 4;
+  output_50->qinfo = (struct csinn_quant_info *)(params_base + 1365884);
+  output_50->quant_channel = 1;
+  struct csinn_tensor *kernel_50 = csinn_alloc_tensor(sess);
+  kernel_50->name = "kernel_50";
+  kernel_50->data = params_base + 1365932;
+  kernel_50->is_const = 1;
+  kernel_50->dtype = CSINN_DTYPE_UINT8;
+  kernel_50->layout = CSINN_LAYOUT_OIHW;
+  kernel_50->dim[0] = 512;
+  kernel_50->dim[1] = 512;
+  kernel_50->dim[2] = 1;
+  kernel_50->dim[3] = 1;
+  kernel_50->dim_count = 4;
+  kernel_50->qinfo = (struct csinn_quant_info *)(params_base + 1365908);
+  kernel_50->quant_channel = 1;
+  struct csinn_tensor *bias_50 = csinn_alloc_tensor(sess);
+  bias_50->name = "bias_50";
+  bias_50->data = params_base + 1628100;
+  bias_50->is_const = 1;
+  bias_50->dtype = CSINN_DTYPE_INT32;
+  bias_50->layout = CSINN_LAYOUT_O;
+  bias_50->dim[0] = 512;
+  bias_50->dim_count = 1;
+  bias_50->qinfo = (struct csinn_quant_info *)(params_base + 1628076);
+  bias_50->quant_channel = 1;
+  struct csinn_conv2d_params *params_50 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_50->group = 1;
+  params_50->stride_height = 1;
+  params_50->stride_width = 1;
+  params_50->dilation_height = 1;
+  params_50->dilation_width = 1;
+  params_50->conv_extra.kernel_tm = NULL;
+  params_50->conv_extra.conv_mode = CSINN_DIRECT;
+  params_50->pad_top = 0;
+  params_50->pad_left = 0;
+  params_50->pad_down = 0;
+  params_50->pad_right = 0;
+  params_50->base.name = "conv2d_91_fuse_multiply_92_fuse_add_93";
+  csinn_conv2d_init(output_49, output_50, kernel_50, bias_50, params_50);
+  struct csinn_tensor *output_51 = csinn_alloc_tensor(sess);
+  output_51->name = "output_51";
+  output_51->dtype = CSINN_DTYPE_UINT8;
+  output_51->layout = CSINN_LAYOUT_NCHW;
+  output_51->dim[0] = 1;
+  output_51->dim[1] = 512;
+  output_51->dim[2] = 19;
+  output_51->dim[3] = 19;
+  output_51->dim_count = 4;
+  output_51->qinfo = (struct csinn_quant_info *)(params_base + 1630148);
+  output_51->quant_channel = 1;
+  struct csinn_relu_params *params_51 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_51->base.name = "relu_94";
+  csinn_relu_init(output_50, output_51, params_51);
+  struct csinn_tensor *output_52 = csinn_alloc_tensor(sess);
+  output_52->name = "output_52";
+  output_52->dtype = CSINN_DTYPE_UINT8;
+  output_52->layout = CSINN_LAYOUT_NCHW;
+  output_52->dim[0] = 1;
+  output_52->dim[1] = 12;
+  output_52->dim[2] = 19;
+  output_52->dim[3] = 19;
+  output_52->dim_count = 4;
+  output_52->qinfo = (struct csinn_quant_info *)(params_base + 1630172);
+  output_52->quant_channel = 1;
+  struct csinn_tensor *kernel_52 = csinn_alloc_tensor(sess);
+  kernel_52->name = "kernel_52";
+  kernel_52->data = params_base + 1630220;
+  kernel_52->is_const = 1;
+  kernel_52->dtype = CSINN_DTYPE_UINT8;
+  kernel_52->layout = CSINN_LAYOUT_OIHW;
+  kernel_52->dim[0] = 12;
+  kernel_52->dim[1] = 512;
+  kernel_52->dim[2] = 1;
+  kernel_52->dim[3] = 1;
+  kernel_52->dim_count = 4;
+  kernel_52->qinfo = (struct csinn_quant_info *)(params_base + 1630196);
+  kernel_52->quant_channel = 1;
+  struct csinn_tensor *bias_52 = csinn_alloc_tensor(sess);
+  bias_52->name = "bias_52";
+  bias_52->data = params_base + 1636388;
+  bias_52->is_const = 1;
+  bias_52->dtype = CSINN_DTYPE_INT32;
+  bias_52->layout = CSINN_LAYOUT_O;
+  bias_52->dim[0] = 12;
+  bias_52->dim_count = 1;
+  bias_52->qinfo = (struct csinn_quant_info *)(params_base + 1636364);
+  bias_52->quant_channel = 1;
+  struct csinn_conv2d_params *params_52 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_52->group = 1;
+  params_52->stride_height = 1;
+  params_52->stride_width = 1;
+  params_52->dilation_height = 1;
+  params_52->dilation_width = 1;
+  params_52->conv_extra.kernel_tm = NULL;
+  params_52->conv_extra.conv_mode = CSINN_DIRECT;
+  params_52->pad_top = 0;
+  params_52->pad_left = 0;
+  params_52->pad_down = 0;
+  params_52->pad_right = 0;
+  params_52->base.name = "conv2d_95_fuse_bias_add_96";
+  csinn_conv2d_init(output_51, output_52, kernel_52, bias_52, params_52);
+  int32_t *permute_53 = malloc(4 * 4);
+  permute_53[0] = 0;
+  permute_53[1] = 2;
+  permute_53[2] = 3;
+  permute_53[3] = 1;
+  struct csinn_tensor *output_53 = csinn_alloc_tensor(sess);
+  output_53->name = "output_53";
+  output_53->dtype = CSINN_DTYPE_UINT8;
+  output_53->layout = CSINN_LAYOUT_NCHW;
+  output_53->dim[0] = 1;
+  output_53->dim[1] = 19;
+  output_53->dim[2] = 19;
+  output_53->dim[3] = 12;
+  output_53->dim_count = 4;
+  output_53->qinfo = (struct csinn_quant_info *)(params_base + 1636436);
+  output_53->quant_channel = 1;
+  struct csinn_transpose_params *params_53 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_53->permute = permute_53;
+  params_53->permute_num = 4;
+  params_53->base.name = "transpose_97";
+  csinn_transpose_init(output_52, output_53, params_53);
+  int32_t *shape_54 = malloc(2 * 4);
+  shape_54[0] = 1;
+  shape_54[1] = 4332;
+  struct csinn_tensor *output_54 = csinn_alloc_tensor(sess);
+  output_54->name = "output_54";
+  output_54->dtype = CSINN_DTYPE_UINT8;
+  output_54->layout = CSINN_LAYOUT_NC;
+  output_54->dim[0] = 1;
+  output_54->dim[1] = 4332;
+  output_54->dim_count = 2;
+  output_54->qinfo = (struct csinn_quant_info *)(params_base + 1636460);
+  output_54->quant_channel = 1;
+  struct csinn_reshape_params *params_54 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_54->shape = shape_54;
+  params_54->shape_num = 2;
+  params_54->base.name = "batch_flatten_98";
+  csinn_reshape_init(output_53, output_54, params_54);
+  struct csinn_tensor *output_56 = csinn_alloc_tensor(sess);
+  output_56->name = "output_56";
+  output_56->dtype = CSINN_DTYPE_UINT8;
+  output_56->layout = CSINN_LAYOUT_NC;
+  output_56->dim[0] = 1;
+  output_56->dim[1] = 4332;
+  output_56->dim_count = 2;
+  output_56->qinfo = (struct csinn_quant_info *)(params_base + 1636484);
+  output_56->quant_channel = 1;
+  struct csinn_tensor *rhs_56 = csinn_alloc_tensor(sess);
+  rhs_56->name = "rhs_56";
+  rhs_56->data = params_base + 1636532;
+  rhs_56->is_const = 1;
+  rhs_56->dtype = CSINN_DTYPE_UINT8;
+  rhs_56->layout = CSINN_LAYOUT_OI;
+  rhs_56->dim[0] = 1;
+  rhs_56->dim[1] = 4332;
+  rhs_56->dim_count = 2;
+  rhs_56->qinfo = (struct csinn_quant_info *)(params_base + 1636508);
+  rhs_56->quant_channel = 1;
+  struct csinn_diso_params *params_56 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_56->base.name = "mul_167";
+  csinn_mul_init(output_54, rhs_56, output_56, params_56);
+  struct csinn_tensor *output_58 = csinn_alloc_tensor(sess);
+  output_58->name = "output_58";
+  output_58->dtype = CSINN_DTYPE_UINT8;
+  output_58->layout = CSINN_LAYOUT_NCHW;
+  output_58->dim[0] = 1;
+  output_58->dim[1] = 512;
+  output_58->dim[2] = 10;
+  output_58->dim[3] = 10;
+  output_58->dim_count = 4;
+  output_58->qinfo = (struct csinn_quant_info *)(params_base + 1640864);
+  output_58->quant_channel = 1;
+  struct csinn_tensor *kernel_58 = csinn_alloc_tensor(sess);
+  kernel_58->name = "kernel_58";
+  kernel_58->data = params_base + 1640912;
+  kernel_58->is_const = 1;
+  kernel_58->dtype = CSINN_DTYPE_UINT8;
+  kernel_58->layout = CSINN_LAYOUT_O1HW;
+  kernel_58->dim[0] = 512;
+  kernel_58->dim[1] = 1;
+  kernel_58->dim[2] = 3;
+  kernel_58->dim[3] = 3;
+  kernel_58->dim_count = 4;
+  kernel_58->qinfo = (struct csinn_quant_info *)(params_base + 1640888);
+  kernel_58->quant_channel = 1;
+  struct csinn_tensor *bias_58 = csinn_alloc_tensor(sess);
+  bias_58->name = "bias_58";
+  bias_58->data = params_base + 1645544;
+  bias_58->is_const = 1;
+  bias_58->dtype = CSINN_DTYPE_INT32;
+  bias_58->layout = CSINN_LAYOUT_O;
+  bias_58->dim[0] = 512;
+  bias_58->dim_count = 1;
+  bias_58->qinfo = (struct csinn_quant_info *)(params_base + 1645520);
+  bias_58->quant_channel = 1;
+  struct csinn_conv2d_params *params_58 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_58->group = 512;
+  params_58->stride_height = 2;
+  params_58->stride_width = 2;
+  params_58->dilation_height = 1;
+  params_58->dilation_width = 1;
+  params_58->conv_extra.kernel_tm = NULL;
+  params_58->conv_extra.conv_mode = CSINN_DIRECT;
+  params_58->pad_top = 1;
+  params_58->pad_left = 1;
+  params_58->pad_down = 1;
+  params_58->pad_right = 1;
+  params_58->base.name = "conv2d_99_fuse_multiply_100_fuse_add_101";
+  csinn_conv2d_init(output_51, output_58, kernel_58, bias_58, params_58);
+  struct csinn_tensor *output_59 = csinn_alloc_tensor(sess);
+  output_59->name = "output_59";
+  output_59->dtype = CSINN_DTYPE_UINT8;
+  output_59->layout = CSINN_LAYOUT_NCHW;
+  output_59->dim[0] = 1;
+  output_59->dim[1] = 512;
+  output_59->dim[2] = 10;
+  output_59->dim[3] = 10;
+  output_59->dim_count = 4;
+  output_59->qinfo = (struct csinn_quant_info *)(params_base + 1647592);
+  output_59->quant_channel = 1;
+  struct csinn_relu_params *params_59 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_59->base.name = "relu_102";
+  csinn_relu_init(output_58, output_59, params_59);
+  struct csinn_tensor *output_60 = csinn_alloc_tensor(sess);
+  output_60->name = "output_60";
+  output_60->dtype = CSINN_DTYPE_UINT8;
+  output_60->layout = CSINN_LAYOUT_NCHW;
+  output_60->dim[0] = 1;
+  output_60->dim[1] = 1024;
+  output_60->dim[2] = 10;
+  output_60->dim[3] = 10;
+  output_60->dim_count = 4;
+  output_60->qinfo = (struct csinn_quant_info *)(params_base + 1647616);
+  output_60->quant_channel = 1;
+  struct csinn_tensor *kernel_60 = csinn_alloc_tensor(sess);
+  kernel_60->name = "kernel_60";
+  kernel_60->data = params_base + 1647664;
+  kernel_60->is_const = 1;
+  kernel_60->dtype = CSINN_DTYPE_UINT8;
+  kernel_60->layout = CSINN_LAYOUT_OIHW;
+  kernel_60->dim[0] = 1024;
+  kernel_60->dim[1] = 512;
+  kernel_60->dim[2] = 1;
+  kernel_60->dim[3] = 1;
+  kernel_60->dim_count = 4;
+  kernel_60->qinfo = (struct csinn_quant_info *)(params_base + 1647640);
+  kernel_60->quant_channel = 1;
+  struct csinn_tensor *bias_60 = csinn_alloc_tensor(sess);
+  bias_60->name = "bias_60";
+  bias_60->data = params_base + 2171976;
+  bias_60->is_const = 1;
+  bias_60->dtype = CSINN_DTYPE_INT32;
+  bias_60->layout = CSINN_LAYOUT_O;
+  bias_60->dim[0] = 1024;
+  bias_60->dim_count = 1;
+  bias_60->qinfo = (struct csinn_quant_info *)(params_base + 2171952);
+  bias_60->quant_channel = 1;
+  struct csinn_conv2d_params *params_60 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_60->group = 1;
+  params_60->stride_height = 1;
+  params_60->stride_width = 1;
+  params_60->dilation_height = 1;
+  params_60->dilation_width = 1;
+  params_60->conv_extra.kernel_tm = NULL;
+  params_60->conv_extra.conv_mode = CSINN_DIRECT;
+  params_60->pad_top = 0;
+  params_60->pad_left = 0;
+  params_60->pad_down = 0;
+  params_60->pad_right = 0;
+  params_60->base.name = "conv2d_103_fuse_multiply_104_fuse_add_105";
+  csinn_conv2d_init(output_59, output_60, kernel_60, bias_60, params_60);
+  struct csinn_tensor *output_61 = csinn_alloc_tensor(sess);
+  output_61->name = "output_61";
+  output_61->dtype = CSINN_DTYPE_UINT8;
+  output_61->layout = CSINN_LAYOUT_NCHW;
+  output_61->dim[0] = 1;
+  output_61->dim[1] = 1024;
+  output_61->dim[2] = 10;
+  output_61->dim[3] = 10;
+  output_61->dim_count = 4;
+  output_61->qinfo = (struct csinn_quant_info *)(params_base + 2176072);
+  output_61->quant_channel = 1;
+  struct csinn_relu_params *params_61 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_61->base.name = "relu_106";
+  csinn_relu_init(output_60, output_61, params_61);
+  struct csinn_tensor *output_62 = csinn_alloc_tensor(sess);
+  output_62->name = "output_62";
+  output_62->dtype = CSINN_DTYPE_UINT8;
+  output_62->layout = CSINN_LAYOUT_NCHW;
+  output_62->dim[0] = 1;
+  output_62->dim[1] = 1024;
+  output_62->dim[2] = 10;
+  output_62->dim[3] = 10;
+  output_62->dim_count = 4;
+  output_62->qinfo = (struct csinn_quant_info *)(params_base + 2176096);
+  output_62->quant_channel = 1;
+  struct csinn_tensor *kernel_62 = csinn_alloc_tensor(sess);
+  kernel_62->name = "kernel_62";
+  kernel_62->data = params_base + 2176144;
+  kernel_62->is_const = 1;
+  kernel_62->dtype = CSINN_DTYPE_UINT8;
+  kernel_62->layout = CSINN_LAYOUT_O1HW;
+  kernel_62->dim[0] = 1024;
+  kernel_62->dim[1] = 1;
+  kernel_62->dim[2] = 3;
+  kernel_62->dim[3] = 3;
+  kernel_62->dim_count = 4;
+  kernel_62->qinfo = (struct csinn_quant_info *)(params_base + 2176120);
+  kernel_62->quant_channel = 1;
+  struct csinn_tensor *bias_62 = csinn_alloc_tensor(sess);
+  bias_62->name = "bias_62";
+  bias_62->data = params_base + 2185384;
+  bias_62->is_const = 1;
+  bias_62->dtype = CSINN_DTYPE_INT32;
+  bias_62->layout = CSINN_LAYOUT_O;
+  bias_62->dim[0] = 1024;
+  bias_62->dim_count = 1;
+  bias_62->qinfo = (struct csinn_quant_info *)(params_base + 2185360);
+  bias_62->quant_channel = 1;
+  struct csinn_conv2d_params *params_62 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_62->group = 1024;
+  params_62->stride_height = 1;
+  params_62->stride_width = 1;
+  params_62->dilation_height = 1;
+  params_62->dilation_width = 1;
+  params_62->conv_extra.kernel_tm = NULL;
+  params_62->conv_extra.conv_mode = CSINN_DIRECT;
+  params_62->pad_top = 1;
+  params_62->pad_left = 1;
+  params_62->pad_down = 1;
+  params_62->pad_right = 1;
+  params_62->base.name = "conv2d_107_fuse_multiply_108_fuse_add_109";
+  csinn_conv2d_init(output_61, output_62, kernel_62, bias_62, params_62);
+  struct csinn_tensor *output_63 = csinn_alloc_tensor(sess);
+  output_63->name = "output_63";
+  output_63->dtype = CSINN_DTYPE_UINT8;
+  output_63->layout = CSINN_LAYOUT_NCHW;
+  output_63->dim[0] = 1;
+  output_63->dim[1] = 1024;
+  output_63->dim[2] = 10;
+  output_63->dim[3] = 10;
+  output_63->dim_count = 4;
+  output_63->qinfo = (struct csinn_quant_info *)(params_base + 2189480);
+  output_63->quant_channel = 1;
+  struct csinn_relu_params *params_63 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_63->base.name = "relu_110";
+  csinn_relu_init(output_62, output_63, params_63);
+  struct csinn_tensor *output_64 = csinn_alloc_tensor(sess);
+  output_64->name = "output_64";
+  output_64->dtype = CSINN_DTYPE_UINT8;
+  output_64->layout = CSINN_LAYOUT_NCHW;
+  output_64->dim[0] = 1;
+  output_64->dim[1] = 1024;
+  output_64->dim[2] = 10;
+  output_64->dim[3] = 10;
+  output_64->dim_count = 4;
+  output_64->qinfo = (struct csinn_quant_info *)(params_base + 2189504);
+  output_64->quant_channel = 1;
+  struct csinn_tensor *kernel_64 = csinn_alloc_tensor(sess);
+  kernel_64->name = "kernel_64";
+  kernel_64->data = params_base + 2189552;
+  kernel_64->is_const = 1;
+  kernel_64->dtype = CSINN_DTYPE_UINT8;
+  kernel_64->layout = CSINN_LAYOUT_OIHW;
+  kernel_64->dim[0] = 1024;
+  kernel_64->dim[1] = 1024;
+  kernel_64->dim[2] = 1;
+  kernel_64->dim[3] = 1;
+  kernel_64->dim_count = 4;
+  kernel_64->qinfo = (struct csinn_quant_info *)(params_base + 2189528);
+  kernel_64->quant_channel = 1;
+  struct csinn_tensor *bias_64 = csinn_alloc_tensor(sess);
+  bias_64->name = "bias_64";
+  bias_64->data = params_base + 3238152;
+  bias_64->is_const = 1;
+  bias_64->dtype = CSINN_DTYPE_INT32;
+  bias_64->layout = CSINN_LAYOUT_O;
+  bias_64->dim[0] = 1024;
+  bias_64->dim_count = 1;
+  bias_64->qinfo = (struct csinn_quant_info *)(params_base + 3238128);
+  bias_64->quant_channel = 1;
+  struct csinn_conv2d_params *params_64 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_64->group = 1;
+  params_64->stride_height = 1;
+  params_64->stride_width = 1;
+  params_64->dilation_height = 1;
+  params_64->dilation_width = 1;
+  params_64->conv_extra.kernel_tm = NULL;
+  params_64->conv_extra.conv_mode = CSINN_DIRECT;
+  params_64->pad_top = 0;
+  params_64->pad_left = 0;
+  params_64->pad_down = 0;
+  params_64->pad_right = 0;
+  params_64->base.name = "conv2d_111_fuse_multiply_112_fuse_add_113";
+  csinn_conv2d_init(output_63, output_64, kernel_64, bias_64, params_64);
+  struct csinn_tensor *output_65 = csinn_alloc_tensor(sess);
+  output_65->name = "output_65";
+  output_65->dtype = CSINN_DTYPE_UINT8;
+  output_65->layout = CSINN_LAYOUT_NCHW;
+  output_65->dim[0] = 1;
+  output_65->dim[1] = 1024;
+  output_65->dim[2] = 10;
+  output_65->dim[3] = 10;
+  output_65->dim_count = 4;
+  output_65->qinfo = (struct csinn_quant_info *)(params_base + 3242248);
+  output_65->quant_channel = 1;
+  struct csinn_relu_params *params_65 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_65->base.name = "relu_114";
+  csinn_relu_init(output_64, output_65, params_65);
+  struct csinn_tensor *output_66 = csinn_alloc_tensor(sess);
+  output_66->name = "output_66";
+  output_66->dtype = CSINN_DTYPE_UINT8;
+  output_66->layout = CSINN_LAYOUT_NCHW;
+  output_66->dim[0] = 1;
+  output_66->dim[1] = 24;
+  output_66->dim[2] = 10;
+  output_66->dim[3] = 10;
+  output_66->dim_count = 4;
+  output_66->qinfo = (struct csinn_quant_info *)(params_base + 3242272);
+  output_66->quant_channel = 1;
+  struct csinn_tensor *kernel_66 = csinn_alloc_tensor(sess);
+  kernel_66->name = "kernel_66";
+  kernel_66->data = params_base + 3242320;
+  kernel_66->is_const = 1;
+  kernel_66->dtype = CSINN_DTYPE_UINT8;
+  kernel_66->layout = CSINN_LAYOUT_OIHW;
+  kernel_66->dim[0] = 24;
+  kernel_66->dim[1] = 1024;
+  kernel_66->dim[2] = 1;
+  kernel_66->dim[3] = 1;
+  kernel_66->dim_count = 4;
+  kernel_66->qinfo = (struct csinn_quant_info *)(params_base + 3242296);
+  kernel_66->quant_channel = 1;
+  struct csinn_tensor *bias_66 = csinn_alloc_tensor(sess);
+  bias_66->name = "bias_66";
+  bias_66->data = params_base + 3266920;
+  bias_66->is_const = 1;
+  bias_66->dtype = CSINN_DTYPE_INT32;
+  bias_66->layout = CSINN_LAYOUT_O;
+  bias_66->dim[0] = 24;
+  bias_66->dim_count = 1;
+  bias_66->qinfo = (struct csinn_quant_info *)(params_base + 3266896);
+  bias_66->quant_channel = 1;
+  struct csinn_conv2d_params *params_66 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_66->group = 1;
+  params_66->stride_height = 1;
+  params_66->stride_width = 1;
+  params_66->dilation_height = 1;
+  params_66->dilation_width = 1;
+  params_66->conv_extra.kernel_tm = NULL;
+  params_66->conv_extra.conv_mode = CSINN_DIRECT;
+  params_66->pad_top = 0;
+  params_66->pad_left = 0;
+  params_66->pad_down = 0;
+  params_66->pad_right = 0;
+  params_66->base.name = "conv2d_115_fuse_bias_add_116";
+  csinn_conv2d_init(output_65, output_66, kernel_66, bias_66, params_66);
+  int32_t *permute_67 = malloc(4 * 4);
+  permute_67[0] = 0;
+  permute_67[1] = 2;
+  permute_67[2] = 3;
+  permute_67[3] = 1;
+  struct csinn_tensor *output_67 = csinn_alloc_tensor(sess);
+  output_67->name = "output_67";
+  output_67->dtype = CSINN_DTYPE_UINT8;
+  output_67->layout = CSINN_LAYOUT_NCHW;
+  output_67->dim[0] = 1;
+  output_67->dim[1] = 10;
+  output_67->dim[2] = 10;
+  output_67->dim[3] = 24;
+  output_67->dim_count = 4;
+  output_67->qinfo = (struct csinn_quant_info *)(params_base + 3267016);
+  output_67->quant_channel = 1;
+  struct csinn_transpose_params *params_67 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_67->permute = permute_67;
+  params_67->permute_num = 4;
+  params_67->base.name = "transpose_117";
+  csinn_transpose_init(output_66, output_67, params_67);
+  int32_t *shape_68 = malloc(2 * 4);
+  shape_68[0] = 1;
+  shape_68[1] = 2400;
+  struct csinn_tensor *output_68 = csinn_alloc_tensor(sess);
+  output_68->name = "output_68";
+  output_68->dtype = CSINN_DTYPE_UINT8;
+  output_68->layout = CSINN_LAYOUT_NC;
+  output_68->dim[0] = 1;
+  output_68->dim[1] = 2400;
+  output_68->dim_count = 2;
+  output_68->qinfo = (struct csinn_quant_info *)(params_base + 3267040);
+  output_68->quant_channel = 1;
+  struct csinn_reshape_params *params_68 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_68->shape = shape_68;
+  params_68->shape_num = 2;
+  params_68->base.name = "batch_flatten_118";
+  csinn_reshape_init(output_67, output_68, params_68);
+  struct csinn_tensor *output_70 = csinn_alloc_tensor(sess);
+  output_70->name = "output_70";
+  output_70->dtype = CSINN_DTYPE_UINT8;
+  output_70->layout = CSINN_LAYOUT_NC;
+  output_70->dim[0] = 1;
+  output_70->dim[1] = 2400;
+  output_70->dim_count = 2;
+  output_70->qinfo = (struct csinn_quant_info *)(params_base + 3267064);
+  output_70->quant_channel = 1;
+  struct csinn_tensor *rhs_70 = csinn_alloc_tensor(sess);
+  rhs_70->name = "rhs_70";
+  rhs_70->data = params_base + 3267112;
+  rhs_70->is_const = 1;
+  rhs_70->dtype = CSINN_DTYPE_UINT8;
+  rhs_70->layout = CSINN_LAYOUT_OI;
+  rhs_70->dim[0] = 1;
+  rhs_70->dim[1] = 2400;
+  rhs_70->dim_count = 2;
+  rhs_70->qinfo = (struct csinn_quant_info *)(params_base + 3267088);
+  rhs_70->quant_channel = 1;
+  struct csinn_diso_params *params_70 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_70->base.name = "mul_168";
+  csinn_mul_init(output_68, rhs_70, output_70, params_70);
+  struct csinn_tensor *output_72 = csinn_alloc_tensor(sess);
+  output_72->name = "output_72";
+  output_72->dtype = CSINN_DTYPE_UINT8;
+  output_72->layout = CSINN_LAYOUT_NCHW;
+  output_72->dim[0] = 1;
+  output_72->dim[1] = 256;
+  output_72->dim[2] = 10;
+  output_72->dim[3] = 10;
+  output_72->dim_count = 4;
+  output_72->qinfo = (struct csinn_quant_info *)(params_base + 3269512);
+  output_72->quant_channel = 1;
+  struct csinn_tensor *kernel_72 = csinn_alloc_tensor(sess);
+  kernel_72->name = "kernel_72";
+  kernel_72->data = params_base + 3269560;
+  kernel_72->is_const = 1;
+  kernel_72->dtype = CSINN_DTYPE_UINT8;
+  kernel_72->layout = CSINN_LAYOUT_OIHW;
+  kernel_72->dim[0] = 256;
+  kernel_72->dim[1] = 1024;
+  kernel_72->dim[2] = 1;
+  kernel_72->dim[3] = 1;
+  kernel_72->dim_count = 4;
+  kernel_72->qinfo = (struct csinn_quant_info *)(params_base + 3269536);
+  kernel_72->quant_channel = 1;
+  struct csinn_tensor *bias_72 = csinn_alloc_tensor(sess);
+  bias_72->name = "bias_72";
+  bias_72->data = params_base + 3531728;
+  bias_72->is_const = 1;
+  bias_72->dtype = CSINN_DTYPE_INT32;
+  bias_72->layout = CSINN_LAYOUT_O;
+  bias_72->dim[0] = 256;
+  bias_72->dim_count = 1;
+  bias_72->qinfo = (struct csinn_quant_info *)(params_base + 3531704);
+  bias_72->quant_channel = 1;
+  struct csinn_conv2d_params *params_72 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_72->group = 1;
+  params_72->stride_height = 1;
+  params_72->stride_width = 1;
+  params_72->dilation_height = 1;
+  params_72->dilation_width = 1;
+  params_72->conv_extra.kernel_tm = NULL;
+  params_72->conv_extra.conv_mode = CSINN_DIRECT;
+  params_72->pad_top = 0;
+  params_72->pad_left = 0;
+  params_72->pad_down = 0;
+  params_72->pad_right = 0;
+  params_72->base.name = "conv2d_119_fuse_multiply_120_fuse_add_121";
+  csinn_conv2d_init(output_65, output_72, kernel_72, bias_72, params_72);
+  struct csinn_tensor *output_73 = csinn_alloc_tensor(sess);
+  output_73->name = "output_73";
+  output_73->dtype = CSINN_DTYPE_UINT8;
+  output_73->layout = CSINN_LAYOUT_NCHW;
+  output_73->dim[0] = 1;
+  output_73->dim[1] = 256;
+  output_73->dim[2] = 10;
+  output_73->dim[3] = 10;
+  output_73->dim_count = 4;
+  output_73->qinfo = (struct csinn_quant_info *)(params_base + 3532752);
+  output_73->quant_channel = 1;
+  struct csinn_relu_params *params_73 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_73->base.name = "relu_122";
+  csinn_relu_init(output_72, output_73, params_73);
+  struct csinn_tensor *output_74 = csinn_alloc_tensor(sess);
+  output_74->name = "output_74";
+  output_74->dtype = CSINN_DTYPE_UINT8;
+  output_74->layout = CSINN_LAYOUT_NCHW;
+  output_74->dim[0] = 1;
+  output_74->dim[1] = 512;
+  output_74->dim[2] = 5;
+  output_74->dim[3] = 5;
+  output_74->dim_count = 4;
+  output_74->qinfo = (struct csinn_quant_info *)(params_base + 3532776);
+  output_74->quant_channel = 1;
+  struct csinn_tensor *kernel_74 = csinn_alloc_tensor(sess);
+  kernel_74->name = "kernel_74";
+  kernel_74->data = params_base + 3532824;
+  kernel_74->is_const = 1;
+  kernel_74->dtype = CSINN_DTYPE_UINT8;
+  kernel_74->layout = CSINN_LAYOUT_OIHW;
+  kernel_74->dim[0] = 512;
+  kernel_74->dim[1] = 256;
+  kernel_74->dim[2] = 3;
+  kernel_74->dim[3] = 3;
+  kernel_74->dim_count = 4;
+  kernel_74->qinfo = (struct csinn_quant_info *)(params_base + 3532800);
+  kernel_74->quant_channel = 1;
+  struct csinn_tensor *bias_74 = csinn_alloc_tensor(sess);
+  bias_74->name = "bias_74";
+  bias_74->data = params_base + 4712496;
+  bias_74->is_const = 1;
+  bias_74->dtype = CSINN_DTYPE_INT32;
+  bias_74->layout = CSINN_LAYOUT_O;
+  bias_74->dim[0] = 512;
+  bias_74->dim_count = 1;
+  bias_74->qinfo = (struct csinn_quant_info *)(params_base + 4712472);
+  bias_74->quant_channel = 1;
+  struct csinn_conv2d_params *params_74 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_74->group = 1;
+  params_74->stride_height = 2;
+  params_74->stride_width = 2;
+  params_74->dilation_height = 1;
+  params_74->dilation_width = 1;
+  params_74->conv_extra.kernel_tm = NULL;
+  params_74->conv_extra.conv_mode = CSINN_DIRECT;
+  params_74->pad_top = 1;
+  params_74->pad_left = 1;
+  params_74->pad_down = 1;
+  params_74->pad_right = 1;
+  params_74->base.name = "conv2d_123_fuse_multiply_124_fuse_add_125";
+  csinn_conv2d_init(output_73, output_74, kernel_74, bias_74, params_74);
+  struct csinn_tensor *output_75 = csinn_alloc_tensor(sess);
+  output_75->name = "output_75";
+  output_75->dtype = CSINN_DTYPE_UINT8;
+  output_75->layout = CSINN_LAYOUT_NCHW;
+  output_75->dim[0] = 1;
+  output_75->dim[1] = 512;
+  output_75->dim[2] = 5;
+  output_75->dim[3] = 5;
+  output_75->dim_count = 4;
+  output_75->qinfo = (struct csinn_quant_info *)(params_base + 4714544);
+  output_75->quant_channel = 1;
+  struct csinn_relu_params *params_75 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_75->base.name = "relu_126";
+  csinn_relu_init(output_74, output_75, params_75);
+  struct csinn_tensor *output_76 = csinn_alloc_tensor(sess);
+  output_76->name = "output_76";
+  output_76->dtype = CSINN_DTYPE_UINT8;
+  output_76->layout = CSINN_LAYOUT_NCHW;
+  output_76->dim[0] = 1;
+  output_76->dim[1] = 24;
+  output_76->dim[2] = 5;
+  output_76->dim[3] = 5;
+  output_76->dim_count = 4;
+  output_76->qinfo = (struct csinn_quant_info *)(params_base + 4714568);
+  output_76->quant_channel = 1;
+  struct csinn_tensor *kernel_76 = csinn_alloc_tensor(sess);
+  kernel_76->name = "kernel_76";
+  kernel_76->data = params_base + 4714616;
+  kernel_76->is_const = 1;
+  kernel_76->dtype = CSINN_DTYPE_UINT8;
+  kernel_76->layout = CSINN_LAYOUT_OIHW;
+  kernel_76->dim[0] = 24;
+  kernel_76->dim[1] = 512;
+  kernel_76->dim[2] = 1;
+  kernel_76->dim[3] = 1;
+  kernel_76->dim_count = 4;
+  kernel_76->qinfo = (struct csinn_quant_info *)(params_base + 4714592);
+  kernel_76->quant_channel = 1;
+  struct csinn_tensor *bias_76 = csinn_alloc_tensor(sess);
+  bias_76->name = "bias_76";
+  bias_76->data = params_base + 4726928;
+  bias_76->is_const = 1;
+  bias_76->dtype = CSINN_DTYPE_INT32;
+  bias_76->layout = CSINN_LAYOUT_O;
+  bias_76->dim[0] = 24;
+  bias_76->dim_count = 1;
+  bias_76->qinfo = (struct csinn_quant_info *)(params_base + 4726904);
+  bias_76->quant_channel = 1;
+  struct csinn_conv2d_params *params_76 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_76->group = 1;
+  params_76->stride_height = 1;
+  params_76->stride_width = 1;
+  params_76->dilation_height = 1;
+  params_76->dilation_width = 1;
+  params_76->conv_extra.kernel_tm = NULL;
+  params_76->conv_extra.conv_mode = CSINN_DIRECT;
+  params_76->pad_top = 0;
+  params_76->pad_left = 0;
+  params_76->pad_down = 0;
+  params_76->pad_right = 0;
+  params_76->base.name = "conv2d_127_fuse_bias_add_128";
+  csinn_conv2d_init(output_75, output_76, kernel_76, bias_76, params_76);
+  int32_t *permute_77 = malloc(4 * 4);
+  permute_77[0] = 0;
+  permute_77[1] = 2;
+  permute_77[2] = 3;
+  permute_77[3] = 1;
+  struct csinn_tensor *output_77 = csinn_alloc_tensor(sess);
+  output_77->name = "output_77";
+  output_77->dtype = CSINN_DTYPE_UINT8;
+  output_77->layout = CSINN_LAYOUT_NCHW;
+  output_77->dim[0] = 1;
+  output_77->dim[1] = 5;
+  output_77->dim[2] = 5;
+  output_77->dim[3] = 24;
+  output_77->dim_count = 4;
+  output_77->qinfo = (struct csinn_quant_info *)(params_base + 4727024);
+  output_77->quant_channel = 1;
+  struct csinn_transpose_params *params_77 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_77->permute = permute_77;
+  params_77->permute_num = 4;
+  params_77->base.name = "transpose_129";
+  csinn_transpose_init(output_76, output_77, params_77);
+  int32_t *shape_78 = malloc(2 * 4);
+  shape_78[0] = 1;
+  shape_78[1] = 600;
+  struct csinn_tensor *output_78 = csinn_alloc_tensor(sess);
+  output_78->name = "output_78";
+  output_78->dtype = CSINN_DTYPE_UINT8;
+  output_78->layout = CSINN_LAYOUT_NC;
+  output_78->dim[0] = 1;
+  output_78->dim[1] = 600;
+  output_78->dim_count = 2;
+  output_78->qinfo = (struct csinn_quant_info *)(params_base + 4727048);
+  output_78->quant_channel = 1;
+  struct csinn_reshape_params *params_78 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_78->shape = shape_78;
+  params_78->shape_num = 2;
+  params_78->base.name = "batch_flatten_130";
+  csinn_reshape_init(output_77, output_78, params_78);
+  struct csinn_tensor *output_80 = csinn_alloc_tensor(sess);
+  output_80->name = "output_80";
+  output_80->dtype = CSINN_DTYPE_UINT8;
+  output_80->layout = CSINN_LAYOUT_NC;
+  output_80->dim[0] = 1;
+  output_80->dim[1] = 600;
+  output_80->dim_count = 2;
+  output_80->qinfo = (struct csinn_quant_info *)(params_base + 4727072);
+  output_80->quant_channel = 1;
+  struct csinn_tensor *rhs_80 = csinn_alloc_tensor(sess);
+  rhs_80->name = "rhs_80";
+  rhs_80->data = params_base + 4727120;
+  rhs_80->is_const = 1;
+  rhs_80->dtype = CSINN_DTYPE_UINT8;
+  rhs_80->layout = CSINN_LAYOUT_OI;
+  rhs_80->dim[0] = 1;
+  rhs_80->dim[1] = 600;
+  rhs_80->dim_count = 2;
+  rhs_80->qinfo = (struct csinn_quant_info *)(params_base + 4727096);
+  rhs_80->quant_channel = 1;
+  struct csinn_diso_params *params_80 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_80->base.name = "mul_169";
+  csinn_mul_init(output_78, rhs_80, output_80, params_80);
+  struct csinn_tensor *output_82 = csinn_alloc_tensor(sess);
+  output_82->name = "output_82";
+  output_82->dtype = CSINN_DTYPE_UINT8;
+  output_82->layout = CSINN_LAYOUT_NCHW;
+  output_82->dim[0] = 1;
+  output_82->dim[1] = 128;
+  output_82->dim[2] = 5;
+  output_82->dim[3] = 5;
+  output_82->dim_count = 4;
+  output_82->qinfo = (struct csinn_quant_info *)(params_base + 4727720);
+  output_82->quant_channel = 1;
+  struct csinn_tensor *kernel_82 = csinn_alloc_tensor(sess);
+  kernel_82->name = "kernel_82";
+  kernel_82->data = params_base + 4727768;
+  kernel_82->is_const = 1;
+  kernel_82->dtype = CSINN_DTYPE_UINT8;
+  kernel_82->layout = CSINN_LAYOUT_OIHW;
+  kernel_82->dim[0] = 128;
+  kernel_82->dim[1] = 512;
+  kernel_82->dim[2] = 1;
+  kernel_82->dim[3] = 1;
+  kernel_82->dim_count = 4;
+  kernel_82->qinfo = (struct csinn_quant_info *)(params_base + 4727744);
+  kernel_82->quant_channel = 1;
+  struct csinn_tensor *bias_82 = csinn_alloc_tensor(sess);
+  bias_82->name = "bias_82";
+  bias_82->data = params_base + 4793328;
+  bias_82->is_const = 1;
+  bias_82->dtype = CSINN_DTYPE_INT32;
+  bias_82->layout = CSINN_LAYOUT_O;
+  bias_82->dim[0] = 128;
+  bias_82->dim_count = 1;
+  bias_82->qinfo = (struct csinn_quant_info *)(params_base + 4793304);
+  bias_82->quant_channel = 1;
+  struct csinn_conv2d_params *params_82 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_82->group = 1;
+  params_82->stride_height = 1;
+  params_82->stride_width = 1;
+  params_82->dilation_height = 1;
+  params_82->dilation_width = 1;
+  params_82->conv_extra.kernel_tm = NULL;
+  params_82->conv_extra.conv_mode = CSINN_DIRECT;
+  params_82->pad_top = 0;
+  params_82->pad_left = 0;
+  params_82->pad_down = 0;
+  params_82->pad_right = 0;
+  params_82->base.name = "conv2d_131_fuse_multiply_132_fuse_add_133";
+  csinn_conv2d_init(output_75, output_82, kernel_82, bias_82, params_82);
+  struct csinn_tensor *output_83 = csinn_alloc_tensor(sess);
+  output_83->name = "output_83";
+  output_83->dtype = CSINN_DTYPE_UINT8;
+  output_83->layout = CSINN_LAYOUT_NCHW;
+  output_83->dim[0] = 1;
+  output_83->dim[1] = 128;
+  output_83->dim[2] = 5;
+  output_83->dim[3] = 5;
+  output_83->dim_count = 4;
+  output_83->qinfo = (struct csinn_quant_info *)(params_base + 4793840);
+  output_83->quant_channel = 1;
+  struct csinn_relu_params *params_83 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_83->base.name = "relu_134";
+  csinn_relu_init(output_82, output_83, params_83);
+  struct csinn_tensor *output_84 = csinn_alloc_tensor(sess);
+  output_84->name = "output_84";
+  output_84->dtype = CSINN_DTYPE_UINT8;
+  output_84->layout = CSINN_LAYOUT_NCHW;
+  output_84->dim[0] = 1;
+  output_84->dim[1] = 256;
+  output_84->dim[2] = 3;
+  output_84->dim[3] = 3;
+  output_84->dim_count = 4;
+  output_84->qinfo = (struct csinn_quant_info *)(params_base + 4793864);
+  output_84->quant_channel = 1;
+  struct csinn_tensor *kernel_84 = csinn_alloc_tensor(sess);
+  kernel_84->name = "kernel_84";
+  kernel_84->data = params_base + 4793912;
+  kernel_84->is_const = 1;
+  kernel_84->dtype = CSINN_DTYPE_UINT8;
+  kernel_84->layout = CSINN_LAYOUT_OIHW;
+  kernel_84->dim[0] = 256;
+  kernel_84->dim[1] = 128;
+  kernel_84->dim[2] = 3;
+  kernel_84->dim[3] = 3;
+  kernel_84->dim_count = 4;
+  kernel_84->qinfo = (struct csinn_quant_info *)(params_base + 4793888);
+  kernel_84->quant_channel = 1;
+  struct csinn_tensor *bias_84 = csinn_alloc_tensor(sess);
+  bias_84->name = "bias_84";
+  bias_84->data = params_base + 5088848;
+  bias_84->is_const = 1;
+  bias_84->dtype = CSINN_DTYPE_INT32;
+  bias_84->layout = CSINN_LAYOUT_O;
+  bias_84->dim[0] = 256;
+  bias_84->dim_count = 1;
+  bias_84->qinfo = (struct csinn_quant_info *)(params_base + 5088824);
+  bias_84->quant_channel = 1;
+  struct csinn_conv2d_params *params_84 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_84->group = 1;
+  params_84->stride_height = 2;
+  params_84->stride_width = 2;
+  params_84->dilation_height = 1;
+  params_84->dilation_width = 1;
+  params_84->conv_extra.kernel_tm = NULL;
+  params_84->conv_extra.conv_mode = CSINN_DIRECT;
+  params_84->pad_top = 1;
+  params_84->pad_left = 1;
+  params_84->pad_down = 1;
+  params_84->pad_right = 1;
+  params_84->base.name = "conv2d_135_fuse_multiply_136_fuse_add_137";
+  csinn_conv2d_init(output_83, output_84, kernel_84, bias_84, params_84);
+  struct csinn_tensor *output_85 = csinn_alloc_tensor(sess);
+  output_85->name = "output_85";
+  output_85->dtype = CSINN_DTYPE_UINT8;
+  output_85->layout = CSINN_LAYOUT_NCHW;
+  output_85->dim[0] = 1;
+  output_85->dim[1] = 256;
+  output_85->dim[2] = 3;
+  output_85->dim[3] = 3;
+  output_85->dim_count = 4;
+  output_85->qinfo = (struct csinn_quant_info *)(params_base + 5089872);
+  output_85->quant_channel = 1;
+  struct csinn_relu_params *params_85 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_85->base.name = "relu_138";
+  csinn_relu_init(output_84, output_85, params_85);
+  struct csinn_tensor *output_86 = csinn_alloc_tensor(sess);
+  output_86->name = "output_86";
+  output_86->dtype = CSINN_DTYPE_UINT8;
+  output_86->layout = CSINN_LAYOUT_NCHW;
+  output_86->dim[0] = 1;
+  output_86->dim[1] = 24;
+  output_86->dim[2] = 3;
+  output_86->dim[3] = 3;
+  output_86->dim_count = 4;
+  output_86->qinfo = (struct csinn_quant_info *)(params_base + 5089896);
+  output_86->quant_channel = 1;
+  struct csinn_tensor *kernel_86 = csinn_alloc_tensor(sess);
+  kernel_86->name = "kernel_86";
+  kernel_86->data = params_base + 5089944;
+  kernel_86->is_const = 1;
+  kernel_86->dtype = CSINN_DTYPE_UINT8;
+  kernel_86->layout = CSINN_LAYOUT_OIHW;
+  kernel_86->dim[0] = 24;
+  kernel_86->dim[1] = 256;
+  kernel_86->dim[2] = 1;
+  kernel_86->dim[3] = 1;
+  kernel_86->dim_count = 4;
+  kernel_86->qinfo = (struct csinn_quant_info *)(params_base + 5089920);
+  kernel_86->quant_channel = 1;
+  struct csinn_tensor *bias_86 = csinn_alloc_tensor(sess);
+  bias_86->name = "bias_86";
+  bias_86->data = params_base + 5096112;
+  bias_86->is_const = 1;
+  bias_86->dtype = CSINN_DTYPE_INT32;
+  bias_86->layout = CSINN_LAYOUT_O;
+  bias_86->dim[0] = 24;
+  bias_86->dim_count = 1;
+  bias_86->qinfo = (struct csinn_quant_info *)(params_base + 5096088);
+  bias_86->quant_channel = 1;
+  struct csinn_conv2d_params *params_86 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_86->group = 1;
+  params_86->stride_height = 1;
+  params_86->stride_width = 1;
+  params_86->dilation_height = 1;
+  params_86->dilation_width = 1;
+  params_86->conv_extra.kernel_tm = NULL;
+  params_86->conv_extra.conv_mode = CSINN_DIRECT;
+  params_86->pad_top = 0;
+  params_86->pad_left = 0;
+  params_86->pad_down = 0;
+  params_86->pad_right = 0;
+  params_86->base.name = "conv2d_139_fuse_bias_add_140";
+  csinn_conv2d_init(output_85, output_86, kernel_86, bias_86, params_86);
+  int32_t *permute_87 = malloc(4 * 4);
+  permute_87[0] = 0;
+  permute_87[1] = 2;
+  permute_87[2] = 3;
+  permute_87[3] = 1;
+  struct csinn_tensor *output_87 = csinn_alloc_tensor(sess);
+  output_87->name = "output_87";
+  output_87->dtype = CSINN_DTYPE_UINT8;
+  output_87->layout = CSINN_LAYOUT_NCHW;
+  output_87->dim[0] = 1;
+  output_87->dim[1] = 3;
+  output_87->dim[2] = 3;
+  output_87->dim[3] = 24;
+  output_87->dim_count = 4;
+  output_87->qinfo = (struct csinn_quant_info *)(params_base + 5096208);
+  output_87->quant_channel = 1;
+  struct csinn_transpose_params *params_87 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_87->permute = permute_87;
+  params_87->permute_num = 4;
+  params_87->base.name = "transpose_141";
+  csinn_transpose_init(output_86, output_87, params_87);
+  int32_t *shape_88 = malloc(2 * 4);
+  shape_88[0] = 1;
+  shape_88[1] = 216;
+  struct csinn_tensor *output_88 = csinn_alloc_tensor(sess);
+  output_88->name = "output_88";
+  output_88->dtype = CSINN_DTYPE_UINT8;
+  output_88->layout = CSINN_LAYOUT_NC;
+  output_88->dim[0] = 1;
+  output_88->dim[1] = 216;
+  output_88->dim_count = 2;
+  output_88->qinfo = (struct csinn_quant_info *)(params_base + 5096232);
+  output_88->quant_channel = 1;
+  struct csinn_reshape_params *params_88 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_88->shape = shape_88;
+  params_88->shape_num = 2;
+  params_88->base.name = "batch_flatten_142";
+  csinn_reshape_init(output_87, output_88, params_88);
+  struct csinn_tensor *output_90 = csinn_alloc_tensor(sess);
+  output_90->name = "output_90";
+  output_90->dtype = CSINN_DTYPE_UINT8;
+  output_90->layout = CSINN_LAYOUT_NC;
+  output_90->dim[0] = 1;
+  output_90->dim[1] = 216;
+  output_90->dim_count = 2;
+  output_90->qinfo = (struct csinn_quant_info *)(params_base + 5096256);
+  output_90->quant_channel = 1;
+  struct csinn_tensor *rhs_90 = csinn_alloc_tensor(sess);
+  rhs_90->name = "rhs_90";
+  rhs_90->data = params_base + 5096304;
+  rhs_90->is_const = 1;
+  rhs_90->dtype = CSINN_DTYPE_UINT8;
+  rhs_90->layout = CSINN_LAYOUT_OI;
+  rhs_90->dim[0] = 1;
+  rhs_90->dim[1] = 216;
+  rhs_90->dim_count = 2;
+  rhs_90->qinfo = (struct csinn_quant_info *)(params_base + 5096280);
+  rhs_90->quant_channel = 1;
+  struct csinn_diso_params *params_90 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_90->base.name = "mul_170";
+  csinn_mul_init(output_88, rhs_90, output_90, params_90);
+  struct csinn_tensor *output_92 = csinn_alloc_tensor(sess);
+  output_92->name = "output_92";
+  output_92->dtype = CSINN_DTYPE_UINT8;
+  output_92->layout = CSINN_LAYOUT_NCHW;
+  output_92->dim[0] = 1;
+  output_92->dim[1] = 128;
+  output_92->dim[2] = 3;
+  output_92->dim[3] = 3;
+  output_92->dim_count = 4;
+  output_92->qinfo = (struct csinn_quant_info *)(params_base + 5096520);
+  output_92->quant_channel = 1;
+  struct csinn_tensor *kernel_92 = csinn_alloc_tensor(sess);
+  kernel_92->name = "kernel_92";
+  kernel_92->data = params_base + 5096568;
+  kernel_92->is_const = 1;
+  kernel_92->dtype = CSINN_DTYPE_UINT8;
+  kernel_92->layout = CSINN_LAYOUT_OIHW;
+  kernel_92->dim[0] = 128;
+  kernel_92->dim[1] = 256;
+  kernel_92->dim[2] = 1;
+  kernel_92->dim[3] = 1;
+  kernel_92->dim_count = 4;
+  kernel_92->qinfo = (struct csinn_quant_info *)(params_base + 5096544);
+  kernel_92->quant_channel = 1;
+  struct csinn_tensor *bias_92 = csinn_alloc_tensor(sess);
+  bias_92->name = "bias_92";
+  bias_92->data = params_base + 5129360;
+  bias_92->is_const = 1;
+  bias_92->dtype = CSINN_DTYPE_INT32;
+  bias_92->layout = CSINN_LAYOUT_O;
+  bias_92->dim[0] = 128;
+  bias_92->dim_count = 1;
+  bias_92->qinfo = (struct csinn_quant_info *)(params_base + 5129336);
+  bias_92->quant_channel = 1;
+  struct csinn_conv2d_params *params_92 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_92->group = 1;
+  params_92->stride_height = 1;
+  params_92->stride_width = 1;
+  params_92->dilation_height = 1;
+  params_92->dilation_width = 1;
+  params_92->conv_extra.kernel_tm = NULL;
+  params_92->conv_extra.conv_mode = CSINN_DIRECT;
+  params_92->pad_top = 0;
+  params_92->pad_left = 0;
+  params_92->pad_down = 0;
+  params_92->pad_right = 0;
+  params_92->base.name = "conv2d_143_fuse_multiply_144_fuse_add_145";
+  csinn_conv2d_init(output_85, output_92, kernel_92, bias_92, params_92);
+  struct csinn_tensor *output_93 = csinn_alloc_tensor(sess);
+  output_93->name = "output_93";
+  output_93->dtype = CSINN_DTYPE_UINT8;
+  output_93->layout = CSINN_LAYOUT_NCHW;
+  output_93->dim[0] = 1;
+  output_93->dim[1] = 128;
+  output_93->dim[2] = 3;
+  output_93->dim[3] = 3;
+  output_93->dim_count = 4;
+  output_93->qinfo = (struct csinn_quant_info *)(params_base + 5129872);
+  output_93->quant_channel = 1;
+  struct csinn_relu_params *params_93 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_93->base.name = "relu_146";
+  csinn_relu_init(output_92, output_93, params_93);
+  struct csinn_tensor *output_94 = csinn_alloc_tensor(sess);
+  output_94->name = "output_94";
+  output_94->dtype = CSINN_DTYPE_UINT8;
+  output_94->layout = CSINN_LAYOUT_NCHW;
+  output_94->dim[0] = 1;
+  output_94->dim[1] = 256;
+  output_94->dim[2] = 2;
+  output_94->dim[3] = 2;
+  output_94->dim_count = 4;
+  output_94->qinfo = (struct csinn_quant_info *)(params_base + 5129896);
+  output_94->quant_channel = 1;
+  struct csinn_tensor *kernel_94 = csinn_alloc_tensor(sess);
+  kernel_94->name = "kernel_94";
+  kernel_94->data = params_base + 5129944;
+  kernel_94->is_const = 1;
+  kernel_94->dtype = CSINN_DTYPE_UINT8;
+  kernel_94->layout = CSINN_LAYOUT_OIHW;
+  kernel_94->dim[0] = 256;
+  kernel_94->dim[1] = 128;
+  kernel_94->dim[2] = 3;
+  kernel_94->dim[3] = 3;
+  kernel_94->dim_count = 4;
+  kernel_94->qinfo = (struct csinn_quant_info *)(params_base + 5129920);
+  kernel_94->quant_channel = 1;
+  struct csinn_tensor *bias_94 = csinn_alloc_tensor(sess);
+  bias_94->name = "bias_94";
+  bias_94->data = params_base + 5424880;
+  bias_94->is_const = 1;
+  bias_94->dtype = CSINN_DTYPE_INT32;
+  bias_94->layout = CSINN_LAYOUT_O;
+  bias_94->dim[0] = 256;
+  bias_94->dim_count = 1;
+  bias_94->qinfo = (struct csinn_quant_info *)(params_base + 5424856);
+  bias_94->quant_channel = 1;
+  struct csinn_conv2d_params *params_94 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_94->group = 1;
+  params_94->stride_height = 2;
+  params_94->stride_width = 2;
+  params_94->dilation_height = 1;
+  params_94->dilation_width = 1;
+  params_94->conv_extra.kernel_tm = NULL;
+  params_94->conv_extra.conv_mode = CSINN_DIRECT;
+  params_94->pad_top = 1;
+  params_94->pad_left = 1;
+  params_94->pad_down = 1;
+  params_94->pad_right = 1;
+  params_94->base.name = "conv2d_147_fuse_multiply_148_fuse_add_149";
+  csinn_conv2d_init(output_93, output_94, kernel_94, bias_94, params_94);
+  struct csinn_tensor *output_95 = csinn_alloc_tensor(sess);
+  output_95->name = "output_95";
+  output_95->dtype = CSINN_DTYPE_UINT8;
+  output_95->layout = CSINN_LAYOUT_NCHW;
+  output_95->dim[0] = 1;
+  output_95->dim[1] = 256;
+  output_95->dim[2] = 2;
+  output_95->dim[3] = 2;
+  output_95->dim_count = 4;
+  output_95->qinfo = (struct csinn_quant_info *)(params_base + 5425904);
+  output_95->quant_channel = 1;
+  struct csinn_relu_params *params_95 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_95->base.name = "relu_150";
+  csinn_relu_init(output_94, output_95, params_95);
+  struct csinn_tensor *output_96 = csinn_alloc_tensor(sess);
+  output_96->name = "output_96";
+  output_96->dtype = CSINN_DTYPE_UINT8;
+  output_96->layout = CSINN_LAYOUT_NCHW;
+  output_96->dim[0] = 1;
+  output_96->dim[1] = 24;
+  output_96->dim[2] = 2;
+  output_96->dim[3] = 2;
+  output_96->dim_count = 4;
+  output_96->qinfo = (struct csinn_quant_info *)(params_base + 5425928);
+  output_96->quant_channel = 1;
+  struct csinn_tensor *kernel_96 = csinn_alloc_tensor(sess);
+  kernel_96->name = "kernel_96";
+  kernel_96->data = params_base + 5425976;
+  kernel_96->is_const = 1;
+  kernel_96->dtype = CSINN_DTYPE_UINT8;
+  kernel_96->layout = CSINN_LAYOUT_OIHW;
+  kernel_96->dim[0] = 24;
+  kernel_96->dim[1] = 256;
+  kernel_96->dim[2] = 1;
+  kernel_96->dim[3] = 1;
+  kernel_96->dim_count = 4;
+  kernel_96->qinfo = (struct csinn_quant_info *)(params_base + 5425952);
+  kernel_96->quant_channel = 1;
+  struct csinn_tensor *bias_96 = csinn_alloc_tensor(sess);
+  bias_96->name = "bias_96";
+  bias_96->data = params_base + 5432144;
+  bias_96->is_const = 1;
+  bias_96->dtype = CSINN_DTYPE_INT32;
+  bias_96->layout = CSINN_LAYOUT_O;
+  bias_96->dim[0] = 24;
+  bias_96->dim_count = 1;
+  bias_96->qinfo = (struct csinn_quant_info *)(params_base + 5432120);
+  bias_96->quant_channel = 1;
+  struct csinn_conv2d_params *params_96 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_96->group = 1;
+  params_96->stride_height = 1;
+  params_96->stride_width = 1;
+  params_96->dilation_height = 1;
+  params_96->dilation_width = 1;
+  params_96->conv_extra.kernel_tm = NULL;
+  params_96->conv_extra.conv_mode = CSINN_DIRECT;
+  params_96->pad_top = 0;
+  params_96->pad_left = 0;
+  params_96->pad_down = 0;
+  params_96->pad_right = 0;
+  params_96->base.name = "conv2d_151_fuse_bias_add_152";
+  csinn_conv2d_init(output_95, output_96, kernel_96, bias_96, params_96);
+  int32_t *permute_97 = malloc(4 * 4);
+  permute_97[0] = 0;
+  permute_97[1] = 2;
+  permute_97[2] = 3;
+  permute_97[3] = 1;
+  struct csinn_tensor *output_97 = csinn_alloc_tensor(sess);
+  output_97->name = "output_97";
+  output_97->dtype = CSINN_DTYPE_UINT8;
+  output_97->layout = CSINN_LAYOUT_NCHW;
+  output_97->dim[0] = 1;
+  output_97->dim[1] = 2;
+  output_97->dim[2] = 2;
+  output_97->dim[3] = 24;
+  output_97->dim_count = 4;
+  output_97->qinfo = (struct csinn_quant_info *)(params_base + 5432240);
+  output_97->quant_channel = 1;
+  struct csinn_transpose_params *params_97 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_97->permute = permute_97;
+  params_97->permute_num = 4;
+  params_97->base.name = "transpose_153";
+  csinn_transpose_init(output_96, output_97, params_97);
+  int32_t *shape_98 = malloc(2 * 4);
+  shape_98[0] = 1;
+  shape_98[1] = 96;
+  struct csinn_tensor *output_98 = csinn_alloc_tensor(sess);
+  output_98->name = "output_98";
+  output_98->dtype = CSINN_DTYPE_UINT8;
+  output_98->layout = CSINN_LAYOUT_NC;
+  output_98->dim[0] = 1;
+  output_98->dim[1] = 96;
+  output_98->dim_count = 2;
+  output_98->qinfo = (struct csinn_quant_info *)(params_base + 5432264);
+  output_98->quant_channel = 1;
+  struct csinn_reshape_params *params_98 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_98->shape = shape_98;
+  params_98->shape_num = 2;
+  params_98->base.name = "batch_flatten_154";
+  csinn_reshape_init(output_97, output_98, params_98);
+  struct csinn_tensor *output_100 = csinn_alloc_tensor(sess);
+  output_100->name = "output_100";
+  output_100->dtype = CSINN_DTYPE_UINT8;
+  output_100->layout = CSINN_LAYOUT_NC;
+  output_100->dim[0] = 1;
+  output_100->dim[1] = 96;
+  output_100->dim_count = 2;
+  output_100->qinfo = (struct csinn_quant_info *)(params_base + 5432288);
+  output_100->quant_channel = 1;
+  struct csinn_tensor *rhs_100 = csinn_alloc_tensor(sess);
+  rhs_100->name = "rhs_100";
+  rhs_100->data = params_base + 5432336;
+  rhs_100->is_const = 1;
+  rhs_100->dtype = CSINN_DTYPE_UINT8;
+  rhs_100->layout = CSINN_LAYOUT_OI;
+  rhs_100->dim[0] = 1;
+  rhs_100->dim[1] = 96;
+  rhs_100->dim_count = 2;
+  rhs_100->qinfo = (struct csinn_quant_info *)(params_base + 5432312);
+  rhs_100->quant_channel = 1;
+  struct csinn_diso_params *params_100 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_100->base.name = "mul_171";
+  csinn_mul_init(output_98, rhs_100, output_100, params_100);
+  struct csinn_tensor *output_102 = csinn_alloc_tensor(sess);
+  output_102->name = "output_102";
+  output_102->dtype = CSINN_DTYPE_UINT8;
+  output_102->layout = CSINN_LAYOUT_NCHW;
+  output_102->dim[0] = 1;
+  output_102->dim[1] = 64;
+  output_102->dim[2] = 2;
+  output_102->dim[3] = 2;
+  output_102->dim_count = 4;
+  output_102->qinfo = (struct csinn_quant_info *)(params_base + 5432432);
+  output_102->quant_channel = 1;
+  struct csinn_tensor *kernel_102 = csinn_alloc_tensor(sess);
+  kernel_102->name = "kernel_102";
+  kernel_102->data = params_base + 5432480;
+  kernel_102->is_const = 1;
+  kernel_102->dtype = CSINN_DTYPE_UINT8;
+  kernel_102->layout = CSINN_LAYOUT_OIHW;
+  kernel_102->dim[0] = 64;
+  kernel_102->dim[1] = 256;
+  kernel_102->dim[2] = 1;
+  kernel_102->dim[3] = 1;
+  kernel_102->dim_count = 4;
+  kernel_102->qinfo = (struct csinn_quant_info *)(params_base + 5432456);
+  kernel_102->quant_channel = 1;
+  struct csinn_tensor *bias_102 = csinn_alloc_tensor(sess);
+  bias_102->name = "bias_102";
+  bias_102->data = params_base + 5448888;
+  bias_102->is_const = 1;
+  bias_102->dtype = CSINN_DTYPE_INT32;
+  bias_102->layout = CSINN_LAYOUT_O;
+  bias_102->dim[0] = 64;
+  bias_102->dim_count = 1;
+  bias_102->qinfo = (struct csinn_quant_info *)(params_base + 5448864);
+  bias_102->quant_channel = 1;
+  struct csinn_conv2d_params *params_102 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_102->group = 1;
+  params_102->stride_height = 1;
+  params_102->stride_width = 1;
+  params_102->dilation_height = 1;
+  params_102->dilation_width = 1;
+  params_102->conv_extra.kernel_tm = NULL;
+  params_102->conv_extra.conv_mode = CSINN_DIRECT;
+  params_102->pad_top = 0;
+  params_102->pad_left = 0;
+  params_102->pad_down = 0;
+  params_102->pad_right = 0;
+  params_102->base.name = "conv2d_155_fuse_multiply_156_fuse_add_157";
+  csinn_conv2d_init(output_95, output_102, kernel_102, bias_102, params_102);
+  struct csinn_tensor *output_103 = csinn_alloc_tensor(sess);
+  output_103->name = "output_103";
+  output_103->dtype = CSINN_DTYPE_UINT8;
+  output_103->layout = CSINN_LAYOUT_NCHW;
+  output_103->dim[0] = 1;
+  output_103->dim[1] = 64;
+  output_103->dim[2] = 2;
+  output_103->dim[3] = 2;
+  output_103->dim_count = 4;
+  output_103->qinfo = (struct csinn_quant_info *)(params_base + 5449144);
+  output_103->quant_channel = 1;
+  struct csinn_relu_params *params_103 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_103->base.name = "relu_158";
+  csinn_relu_init(output_102, output_103, params_103);
+  struct csinn_tensor *output_104 = csinn_alloc_tensor(sess);
+  output_104->name = "output_104";
+  output_104->dtype = CSINN_DTYPE_UINT8;
+  output_104->layout = CSINN_LAYOUT_NCHW;
+  output_104->dim[0] = 1;
+  output_104->dim[1] = 128;
+  output_104->dim[2] = 1;
+  output_104->dim[3] = 1;
+  output_104->dim_count = 4;
+  output_104->qinfo = (struct csinn_quant_info *)(params_base + 5449168);
+  output_104->quant_channel = 1;
+  struct csinn_tensor *kernel_104 = csinn_alloc_tensor(sess);
+  kernel_104->name = "kernel_104";
+  kernel_104->data = params_base + 5449216;
+  kernel_104->is_const = 1;
+  kernel_104->dtype = CSINN_DTYPE_UINT8;
+  kernel_104->layout = CSINN_LAYOUT_OIHW;
+  kernel_104->dim[0] = 128;
+  kernel_104->dim[1] = 64;
+  kernel_104->dim[2] = 3;
+  kernel_104->dim[3] = 3;
+  kernel_104->dim_count = 4;
+  kernel_104->qinfo = (struct csinn_quant_info *)(params_base + 5449192);
+  kernel_104->quant_channel = 1;
+  struct csinn_tensor *bias_104 = csinn_alloc_tensor(sess);
+  bias_104->name = "bias_104";
+  bias_104->data = params_base + 5522968;
+  bias_104->is_const = 1;
+  bias_104->dtype = CSINN_DTYPE_INT32;
+  bias_104->layout = CSINN_LAYOUT_O;
+  bias_104->dim[0] = 128;
+  bias_104->dim_count = 1;
+  bias_104->qinfo = (struct csinn_quant_info *)(params_base + 5522944);
+  bias_104->quant_channel = 1;
+  struct csinn_conv2d_params *params_104 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_104->group = 1;
+  params_104->stride_height = 2;
+  params_104->stride_width = 2;
+  params_104->dilation_height = 1;
+  params_104->dilation_width = 1;
+  params_104->conv_extra.kernel_tm = NULL;
+  params_104->conv_extra.conv_mode = CSINN_DIRECT;
+  params_104->pad_top = 1;
+  params_104->pad_left = 1;
+  params_104->pad_down = 1;
+  params_104->pad_right = 1;
+  params_104->base.name = "conv2d_159_fuse_multiply_160_fuse_add_161";
+  csinn_conv2d_init(output_103, output_104, kernel_104, bias_104, params_104);
+  struct csinn_tensor *output_105 = csinn_alloc_tensor(sess);
+  output_105->name = "output_105";
+  output_105->dtype = CSINN_DTYPE_UINT8;
+  output_105->layout = CSINN_LAYOUT_NCHW;
+  output_105->dim[0] = 1;
+  output_105->dim[1] = 128;
+  output_105->dim[2] = 1;
+  output_105->dim[3] = 1;
+  output_105->dim_count = 4;
+  output_105->qinfo = (struct csinn_quant_info *)(params_base + 5523480);
+  output_105->quant_channel = 1;
+  struct csinn_relu_params *params_105 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_105->base.name = "relu_162";
+  csinn_relu_init(output_104, output_105, params_105);
+  struct csinn_tensor *output_106 = csinn_alloc_tensor(sess);
+  output_106->name = "output_106";
+  output_106->dtype = CSINN_DTYPE_UINT8;
+  output_106->layout = CSINN_LAYOUT_NCHW;
+  output_106->dim[0] = 1;
+  output_106->dim[1] = 24;
+  output_106->dim[2] = 1;
+  output_106->dim[3] = 1;
+  output_106->dim_count = 4;
+  output_106->qinfo = (struct csinn_quant_info *)(params_base + 5523504);
+  output_106->quant_channel = 1;
+  struct csinn_tensor *kernel_106 = csinn_alloc_tensor(sess);
+  kernel_106->name = "kernel_106";
+  kernel_106->data = params_base + 5523552;
+  kernel_106->is_const = 1;
+  kernel_106->dtype = CSINN_DTYPE_UINT8;
+  kernel_106->layout = CSINN_LAYOUT_OIHW;
+  kernel_106->dim[0] = 24;
+  kernel_106->dim[1] = 128;
+  kernel_106->dim[2] = 1;
+  kernel_106->dim[3] = 1;
+  kernel_106->dim_count = 4;
+  kernel_106->qinfo = (struct csinn_quant_info *)(params_base + 5523528);
+  kernel_106->quant_channel = 1;
+  struct csinn_tensor *bias_106 = csinn_alloc_tensor(sess);
+  bias_106->name = "bias_106";
+  bias_106->data = params_base + 5526648;
+  bias_106->is_const = 1;
+  bias_106->dtype = CSINN_DTYPE_INT32;
+  bias_106->layout = CSINN_LAYOUT_O;
+  bias_106->dim[0] = 24;
+  bias_106->dim_count = 1;
+  bias_106->qinfo = (struct csinn_quant_info *)(params_base + 5526624);
+  bias_106->quant_channel = 1;
+  struct csinn_conv2d_params *params_106 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_106->group = 1;
+  params_106->stride_height = 1;
+  params_106->stride_width = 1;
+  params_106->dilation_height = 1;
+  params_106->dilation_width = 1;
+  params_106->conv_extra.kernel_tm = NULL;
+  params_106->conv_extra.conv_mode = CSINN_DIRECT;
+  params_106->pad_top = 0;
+  params_106->pad_left = 0;
+  params_106->pad_down = 0;
+  params_106->pad_right = 0;
+  params_106->base.name = "conv2d_163_fuse_bias_add_164";
+  csinn_conv2d_init(output_105, output_106, kernel_106, bias_106, params_106);
+  int32_t *permute_107 = malloc(4 * 4);
+  permute_107[0] = 0;
+  permute_107[1] = 2;
+  permute_107[2] = 3;
+  permute_107[3] = 1;
+  struct csinn_tensor *output_107 = csinn_alloc_tensor(sess);
+  output_107->name = "output_107";
+  output_107->dtype = CSINN_DTYPE_UINT8;
+  output_107->layout = CSINN_LAYOUT_NCHW;
+  output_107->dim[0] = 1;
+  output_107->dim[1] = 1;
+  output_107->dim[2] = 1;
+  output_107->dim[3] = 24;
+  output_107->dim_count = 4;
+  output_107->qinfo = (struct csinn_quant_info *)(params_base + 5526744);
+  output_107->quant_channel = 1;
+  struct csinn_transpose_params *params_107 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_107->permute = permute_107;
+  params_107->permute_num = 4;
+  params_107->base.name = "transpose_165";
+  csinn_transpose_init(output_106, output_107, params_107);
+  int32_t *shape_108 = malloc(2 * 4);
+  shape_108[0] = 1;
+  shape_108[1] = 24;
+  struct csinn_tensor *output_108 = csinn_alloc_tensor(sess);
+  output_108->name = "output_108";
+  output_108->dtype = CSINN_DTYPE_UINT8;
+  output_108->layout = CSINN_LAYOUT_NC;
+  output_108->dim[0] = 1;
+  output_108->dim[1] = 24;
+  output_108->dim_count = 2;
+  output_108->qinfo = (struct csinn_quant_info *)(params_base + 5526768);
+  output_108->quant_channel = 1;
+  struct csinn_reshape_params *params_108 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_108->shape = shape_108;
+  params_108->shape_num = 2;
+  params_108->base.name = "batch_flatten_166";
+  csinn_reshape_init(output_107, output_108, params_108);
+  struct csinn_tensor *output_110 = csinn_alloc_tensor(sess);
+  output_110->name = "output_110";
+  output_110->dtype = CSINN_DTYPE_UINT8;
+  output_110->layout = CSINN_LAYOUT_NC;
+  output_110->dim[0] = 1;
+  output_110->dim[1] = 24;
+  output_110->dim_count = 2;
+  output_110->qinfo = (struct csinn_quant_info *)(params_base + 5526792);
+  output_110->quant_channel = 1;
+  struct csinn_tensor *rhs_110 = csinn_alloc_tensor(sess);
+  rhs_110->name = "rhs_110";
+  rhs_110->data = params_base + 5526840;
+  rhs_110->is_const = 1;
+  rhs_110->dtype = CSINN_DTYPE_UINT8;
+  rhs_110->layout = CSINN_LAYOUT_OI;
+  rhs_110->dim[0] = 1;
+  rhs_110->dim[1] = 24;
+  rhs_110->dim_count = 2;
+  rhs_110->qinfo = (struct csinn_quant_info *)(params_base + 5526816);
+  rhs_110->quant_channel = 1;
+  struct csinn_diso_params *params_110 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_110->base.name = "mul_172";
+  csinn_mul_init(output_108, rhs_110, output_110, params_110);
+  struct csinn_tensor *input_112[6];
+  struct csinn_tensor *output_112 = csinn_alloc_tensor(sess);
+  output_112->name = "concatenate_167_112";
+  output_112->dtype = CSINN_DTYPE_UINT8;
+  output_112->layout = CSINN_LAYOUT_NC;
+  output_112->dim[0] = 1;
+  output_112->dim[1] = 7668;
+  output_112->dim_count = 2;
+  output_112->qinfo = (struct csinn_quant_info *)(params_base + 5526864);
+  output_112->quant_channel = 1;
+  struct csinn_concat_params *params_112 = csinn_alloc_params(sizeof(struct csinn_concat_params), sess);
+  params_112->inputs_count = 6;
+  params_112->axis = 1;
+  params_112->base.name = "concatenate_167";
+  csinn_concat_init(input_112, output_112, params_112);
+  struct csinn_tensor *output_113 = csinn_alloc_tensor(sess);
+  output_113->name = "output_113";
+  output_113->dtype = CSINN_DTYPE_UINT8;
+  output_113->layout = CSINN_LAYOUT_NCHW;
+  output_113->dim[0] = 1;
+  output_113->dim[1] = 63;
+  output_113->dim[2] = 19;
+  output_113->dim[3] = 19;
+  output_113->dim_count = 4;
+  output_113->qinfo = (struct csinn_quant_info *)(params_base + 5526888);
+  output_113->quant_channel = 1;
+  struct csinn_tensor *kernel_113 = csinn_alloc_tensor(sess);
+  kernel_113->name = "kernel_113";
+  kernel_113->data = params_base + 5526936;
+  kernel_113->is_const = 1;
+  kernel_113->dtype = CSINN_DTYPE_UINT8;
+  kernel_113->layout = CSINN_LAYOUT_OIHW;
+  kernel_113->dim[0] = 63;
+  kernel_113->dim[1] = 512;
+  kernel_113->dim[2] = 1;
+  kernel_113->dim[3] = 1;
+  kernel_113->dim_count = 4;
+  kernel_113->qinfo = (struct csinn_quant_info *)(params_base + 5526912);
+  kernel_113->quant_channel = 1;
+  struct csinn_tensor *bias_113 = csinn_alloc_tensor(sess);
+  bias_113->name = "bias_113";
+  bias_113->data = params_base + 5559216;
+  bias_113->is_const = 1;
+  bias_113->dtype = CSINN_DTYPE_INT32;
+  bias_113->layout = CSINN_LAYOUT_O;
+  bias_113->dim[0] = 63;
+  bias_113->dim_count = 1;
+  bias_113->qinfo = (struct csinn_quant_info *)(params_base + 5559192);
+  bias_113->quant_channel = 1;
+  struct csinn_conv2d_params *params_113 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_113->group = 1;
+  params_113->stride_height = 1;
+  params_113->stride_width = 1;
+  params_113->dilation_height = 1;
+  params_113->dilation_width = 1;
+  params_113->conv_extra.kernel_tm = NULL;
+  params_113->conv_extra.conv_mode = CSINN_DIRECT;
+  params_113->pad_top = 0;
+  params_113->pad_left = 0;
+  params_113->pad_down = 0;
+  params_113->pad_right = 0;
+  params_113->base.name = "conv2d_168_fuse_bias_add_169";
+  csinn_conv2d_init(output_51, output_113, kernel_113, bias_113, params_113);
+  int32_t *permute_114 = malloc(4 * 4);
+  permute_114[0] = 0;
+  permute_114[1] = 2;
+  permute_114[2] = 3;
+  permute_114[3] = 1;
+  struct csinn_tensor *output_114 = csinn_alloc_tensor(sess);
+  output_114->name = "output_114";
+  output_114->dtype = CSINN_DTYPE_UINT8;
+  output_114->layout = CSINN_LAYOUT_NCHW;
+  output_114->dim[0] = 1;
+  output_114->dim[1] = 19;
+  output_114->dim[2] = 19;
+  output_114->dim[3] = 63;
+  output_114->dim_count = 4;
+  output_114->qinfo = (struct csinn_quant_info *)(params_base + 5559468);
+  output_114->quant_channel = 1;
+  struct csinn_transpose_params *params_114 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_114->permute = permute_114;
+  params_114->permute_num = 4;
+  params_114->base.name = "transpose_170";
+  csinn_transpose_init(output_113, output_114, params_114);
+  int32_t *shape_115 = malloc(2 * 4);
+  shape_115[0] = 1;
+  shape_115[1] = 22743;
+  struct csinn_tensor *output_115 = csinn_alloc_tensor(sess);
+  output_115->name = "output_115";
+  output_115->dtype = CSINN_DTYPE_UINT8;
+  output_115->layout = CSINN_LAYOUT_NC;
+  output_115->dim[0] = 1;
+  output_115->dim[1] = 22743;
+  output_115->dim_count = 2;
+  output_115->qinfo = (struct csinn_quant_info *)(params_base + 5559492);
+  output_115->quant_channel = 1;
+  struct csinn_reshape_params *params_115 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_115->shape = shape_115;
+  params_115->shape_num = 2;
+  params_115->base.name = "batch_flatten_171";
+  csinn_reshape_init(output_114, output_115, params_115);
+  struct csinn_tensor *output_117 = csinn_alloc_tensor(sess);
+  output_117->name = "output_117";
+  output_117->dtype = CSINN_DTYPE_UINT8;
+  output_117->layout = CSINN_LAYOUT_NC;
+  output_117->dim[0] = 1;
+  output_117->dim[1] = 22743;
+  output_117->dim_count = 2;
+  output_117->qinfo = (struct csinn_quant_info *)(params_base + 5559516);
+  output_117->quant_channel = 1;
+  struct csinn_tensor *rhs_117 = csinn_alloc_tensor(sess);
+  rhs_117->name = "rhs_117";
+  rhs_117->data = params_base + 5559564;
+  rhs_117->is_const = 1;
+  rhs_117->dtype = CSINN_DTYPE_UINT8;
+  rhs_117->layout = CSINN_LAYOUT_OI;
+  rhs_117->dim[0] = 1;
+  rhs_117->dim[1] = 22743;
+  rhs_117->dim_count = 2;
+  rhs_117->qinfo = (struct csinn_quant_info *)(params_base + 5559540);
+  rhs_117->quant_channel = 1;
+  struct csinn_diso_params *params_117 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_117->base.name = "mul_173";
+  csinn_mul_init(output_115, rhs_117, output_117, params_117);
+  struct csinn_tensor *output_119 = csinn_alloc_tensor(sess);
+  output_119->name = "output_119";
+  output_119->dtype = CSINN_DTYPE_UINT8;
+  output_119->layout = CSINN_LAYOUT_NCHW;
+  output_119->dim[0] = 1;
+  output_119->dim[1] = 126;
+  output_119->dim[2] = 10;
+  output_119->dim[3] = 10;
+  output_119->dim_count = 4;
+  output_119->qinfo = (struct csinn_quant_info *)(params_base + 5582307);
+  output_119->quant_channel = 1;
+  struct csinn_tensor *kernel_119 = csinn_alloc_tensor(sess);
+  kernel_119->name = "kernel_119";
+  kernel_119->data = params_base + 5582355;
+  kernel_119->is_const = 1;
+  kernel_119->dtype = CSINN_DTYPE_UINT8;
+  kernel_119->layout = CSINN_LAYOUT_OIHW;
+  kernel_119->dim[0] = 126;
+  kernel_119->dim[1] = 1024;
+  kernel_119->dim[2] = 1;
+  kernel_119->dim[3] = 1;
+  kernel_119->dim_count = 4;
+  kernel_119->qinfo = (struct csinn_quant_info *)(params_base + 5582331);
+  kernel_119->quant_channel = 1;
+  struct csinn_tensor *bias_119 = csinn_alloc_tensor(sess);
+  bias_119->name = "bias_119";
+  bias_119->data = params_base + 5711403;
+  bias_119->is_const = 1;
+  bias_119->dtype = CSINN_DTYPE_INT32;
+  bias_119->layout = CSINN_LAYOUT_O;
+  bias_119->dim[0] = 126;
+  bias_119->dim_count = 1;
+  bias_119->qinfo = (struct csinn_quant_info *)(params_base + 5711379);
+  bias_119->quant_channel = 1;
+  struct csinn_conv2d_params *params_119 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_119->group = 1;
+  params_119->stride_height = 1;
+  params_119->stride_width = 1;
+  params_119->dilation_height = 1;
+  params_119->dilation_width = 1;
+  params_119->conv_extra.kernel_tm = NULL;
+  params_119->conv_extra.conv_mode = CSINN_DIRECT;
+  params_119->pad_top = 0;
+  params_119->pad_left = 0;
+  params_119->pad_down = 0;
+  params_119->pad_right = 0;
+  params_119->base.name = "conv2d_172_fuse_bias_add_173";
+  csinn_conv2d_init(output_65, output_119, kernel_119, bias_119, params_119);
+  int32_t *permute_120 = malloc(4 * 4);
+  permute_120[0] = 0;
+  permute_120[1] = 2;
+  permute_120[2] = 3;
+  permute_120[3] = 1;
+  struct csinn_tensor *output_120 = csinn_alloc_tensor(sess);
+  output_120->name = "output_120";
+  output_120->dtype = CSINN_DTYPE_UINT8;
+  output_120->layout = CSINN_LAYOUT_NCHW;
+  output_120->dim[0] = 1;
+  output_120->dim[1] = 10;
+  output_120->dim[2] = 10;
+  output_120->dim[3] = 126;
+  output_120->dim_count = 4;
+  output_120->qinfo = (struct csinn_quant_info *)(params_base + 5711907);
+  output_120->quant_channel = 1;
+  struct csinn_transpose_params *params_120 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_120->permute = permute_120;
+  params_120->permute_num = 4;
+  params_120->base.name = "transpose_174";
+  csinn_transpose_init(output_119, output_120, params_120);
+  int32_t *shape_121 = malloc(2 * 4);
+  shape_121[0] = 1;
+  shape_121[1] = 12600;
+  struct csinn_tensor *output_121 = csinn_alloc_tensor(sess);
+  output_121->name = "output_121";
+  output_121->dtype = CSINN_DTYPE_UINT8;
+  output_121->layout = CSINN_LAYOUT_NC;
+  output_121->dim[0] = 1;
+  output_121->dim[1] = 12600;
+  output_121->dim_count = 2;
+  output_121->qinfo = (struct csinn_quant_info *)(params_base + 5711931);
+  output_121->quant_channel = 1;
+  struct csinn_reshape_params *params_121 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_121->shape = shape_121;
+  params_121->shape_num = 2;
+  params_121->base.name = "batch_flatten_175";
+  csinn_reshape_init(output_120, output_121, params_121);
+  struct csinn_tensor *output_123 = csinn_alloc_tensor(sess);
+  output_123->name = "output_123";
+  output_123->dtype = CSINN_DTYPE_UINT8;
+  output_123->layout = CSINN_LAYOUT_NC;
+  output_123->dim[0] = 1;
+  output_123->dim[1] = 12600;
+  output_123->dim_count = 2;
+  output_123->qinfo = (struct csinn_quant_info *)(params_base + 5711955);
+  output_123->quant_channel = 1;
+  struct csinn_tensor *rhs_123 = csinn_alloc_tensor(sess);
+  rhs_123->name = "rhs_123";
+  rhs_123->data = params_base + 5712003;
+  rhs_123->is_const = 1;
+  rhs_123->dtype = CSINN_DTYPE_UINT8;
+  rhs_123->layout = CSINN_LAYOUT_OI;
+  rhs_123->dim[0] = 1;
+  rhs_123->dim[1] = 12600;
+  rhs_123->dim_count = 2;
+  rhs_123->qinfo = (struct csinn_quant_info *)(params_base + 5711979);
+  rhs_123->quant_channel = 1;
+  struct csinn_diso_params *params_123 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_123->base.name = "mul_174";
+  csinn_mul_init(output_121, rhs_123, output_123, params_123);
+  struct csinn_tensor *output_125 = csinn_alloc_tensor(sess);
+  output_125->name = "output_125";
+  output_125->dtype = CSINN_DTYPE_UINT8;
+  output_125->layout = CSINN_LAYOUT_NCHW;
+  output_125->dim[0] = 1;
+  output_125->dim[1] = 126;
+  output_125->dim[2] = 5;
+  output_125->dim[3] = 5;
+  output_125->dim_count = 4;
+  output_125->qinfo = (struct csinn_quant_info *)(params_base + 5724603);
+  output_125->quant_channel = 1;
+  struct csinn_tensor *kernel_125 = csinn_alloc_tensor(sess);
+  kernel_125->name = "kernel_125";
+  kernel_125->data = params_base + 5724651;
+  kernel_125->is_const = 1;
+  kernel_125->dtype = CSINN_DTYPE_UINT8;
+  kernel_125->layout = CSINN_LAYOUT_OIHW;
+  kernel_125->dim[0] = 126;
+  kernel_125->dim[1] = 512;
+  kernel_125->dim[2] = 1;
+  kernel_125->dim[3] = 1;
+  kernel_125->dim_count = 4;
+  kernel_125->qinfo = (struct csinn_quant_info *)(params_base + 5724627);
+  kernel_125->quant_channel = 1;
+  struct csinn_tensor *bias_125 = csinn_alloc_tensor(sess);
+  bias_125->name = "bias_125";
+  bias_125->data = params_base + 5789187;
+  bias_125->is_const = 1;
+  bias_125->dtype = CSINN_DTYPE_INT32;
+  bias_125->layout = CSINN_LAYOUT_O;
+  bias_125->dim[0] = 126;
+  bias_125->dim_count = 1;
+  bias_125->qinfo = (struct csinn_quant_info *)(params_base + 5789163);
+  bias_125->quant_channel = 1;
+  struct csinn_conv2d_params *params_125 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_125->group = 1;
+  params_125->stride_height = 1;
+  params_125->stride_width = 1;
+  params_125->dilation_height = 1;
+  params_125->dilation_width = 1;
+  params_125->conv_extra.kernel_tm = NULL;
+  params_125->conv_extra.conv_mode = CSINN_DIRECT;
+  params_125->pad_top = 0;
+  params_125->pad_left = 0;
+  params_125->pad_down = 0;
+  params_125->pad_right = 0;
+  params_125->base.name = "conv2d_176_fuse_bias_add_177";
+  csinn_conv2d_init(output_75, output_125, kernel_125, bias_125, params_125);
+  int32_t *permute_126 = malloc(4 * 4);
+  permute_126[0] = 0;
+  permute_126[1] = 2;
+  permute_126[2] = 3;
+  permute_126[3] = 1;
+  struct csinn_tensor *output_126 = csinn_alloc_tensor(sess);
+  output_126->name = "output_126";
+  output_126->dtype = CSINN_DTYPE_UINT8;
+  output_126->layout = CSINN_LAYOUT_NCHW;
+  output_126->dim[0] = 1;
+  output_126->dim[1] = 5;
+  output_126->dim[2] = 5;
+  output_126->dim[3] = 126;
+  output_126->dim_count = 4;
+  output_126->qinfo = (struct csinn_quant_info *)(params_base + 5789691);
+  output_126->quant_channel = 1;
+  struct csinn_transpose_params *params_126 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_126->permute = permute_126;
+  params_126->permute_num = 4;
+  params_126->base.name = "transpose_178";
+  csinn_transpose_init(output_125, output_126, params_126);
+  int32_t *shape_127 = malloc(2 * 4);
+  shape_127[0] = 1;
+  shape_127[1] = 3150;
+  struct csinn_tensor *output_127 = csinn_alloc_tensor(sess);
+  output_127->name = "output_127";
+  output_127->dtype = CSINN_DTYPE_UINT8;
+  output_127->layout = CSINN_LAYOUT_NC;
+  output_127->dim[0] = 1;
+  output_127->dim[1] = 3150;
+  output_127->dim_count = 2;
+  output_127->qinfo = (struct csinn_quant_info *)(params_base + 5789715);
+  output_127->quant_channel = 1;
+  struct csinn_reshape_params *params_127 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_127->shape = shape_127;
+  params_127->shape_num = 2;
+  params_127->base.name = "batch_flatten_179";
+  csinn_reshape_init(output_126, output_127, params_127);
+  struct csinn_tensor *output_129 = csinn_alloc_tensor(sess);
+  output_129->name = "output_129";
+  output_129->dtype = CSINN_DTYPE_UINT8;
+  output_129->layout = CSINN_LAYOUT_NC;
+  output_129->dim[0] = 1;
+  output_129->dim[1] = 3150;
+  output_129->dim_count = 2;
+  output_129->qinfo = (struct csinn_quant_info *)(params_base + 5789739);
+  output_129->quant_channel = 1;
+  struct csinn_tensor *rhs_129 = csinn_alloc_tensor(sess);
+  rhs_129->name = "rhs_129";
+  rhs_129->data = params_base + 5789787;
+  rhs_129->is_const = 1;
+  rhs_129->dtype = CSINN_DTYPE_UINT8;
+  rhs_129->layout = CSINN_LAYOUT_OI;
+  rhs_129->dim[0] = 1;
+  rhs_129->dim[1] = 3150;
+  rhs_129->dim_count = 2;
+  rhs_129->qinfo = (struct csinn_quant_info *)(params_base + 5789763);
+  rhs_129->quant_channel = 1;
+  struct csinn_diso_params *params_129 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_129->base.name = "mul_175";
+  csinn_mul_init(output_127, rhs_129, output_129, params_129);
+  struct csinn_tensor *output_131 = csinn_alloc_tensor(sess);
+  output_131->name = "output_131";
+  output_131->dtype = CSINN_DTYPE_UINT8;
+  output_131->layout = CSINN_LAYOUT_NCHW;
+  output_131->dim[0] = 1;
+  output_131->dim[1] = 126;
+  output_131->dim[2] = 3;
+  output_131->dim[3] = 3;
+  output_131->dim_count = 4;
+  output_131->qinfo = (struct csinn_quant_info *)(params_base + 5792937);
+  output_131->quant_channel = 1;
+  struct csinn_tensor *kernel_131 = csinn_alloc_tensor(sess);
+  kernel_131->name = "kernel_131";
+  kernel_131->data = params_base + 5792985;
+  kernel_131->is_const = 1;
+  kernel_131->dtype = CSINN_DTYPE_UINT8;
+  kernel_131->layout = CSINN_LAYOUT_OIHW;
+  kernel_131->dim[0] = 126;
+  kernel_131->dim[1] = 256;
+  kernel_131->dim[2] = 1;
+  kernel_131->dim[3] = 1;
+  kernel_131->dim_count = 4;
+  kernel_131->qinfo = (struct csinn_quant_info *)(params_base + 5792961);
+  kernel_131->quant_channel = 1;
+  struct csinn_tensor *bias_131 = csinn_alloc_tensor(sess);
+  bias_131->name = "bias_131";
+  bias_131->data = params_base + 5825265;
+  bias_131->is_const = 1;
+  bias_131->dtype = CSINN_DTYPE_INT32;
+  bias_131->layout = CSINN_LAYOUT_O;
+  bias_131->dim[0] = 126;
+  bias_131->dim_count = 1;
+  bias_131->qinfo = (struct csinn_quant_info *)(params_base + 5825241);
+  bias_131->quant_channel = 1;
+  struct csinn_conv2d_params *params_131 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_131->group = 1;
+  params_131->stride_height = 1;
+  params_131->stride_width = 1;
+  params_131->dilation_height = 1;
+  params_131->dilation_width = 1;
+  params_131->conv_extra.kernel_tm = NULL;
+  params_131->conv_extra.conv_mode = CSINN_DIRECT;
+  params_131->pad_top = 0;
+  params_131->pad_left = 0;
+  params_131->pad_down = 0;
+  params_131->pad_right = 0;
+  params_131->base.name = "conv2d_180_fuse_bias_add_181";
+  csinn_conv2d_init(output_85, output_131, kernel_131, bias_131, params_131);
+  int32_t *permute_132 = malloc(4 * 4);
+  permute_132[0] = 0;
+  permute_132[1] = 2;
+  permute_132[2] = 3;
+  permute_132[3] = 1;
+  struct csinn_tensor *output_132 = csinn_alloc_tensor(sess);
+  output_132->name = "output_132";
+  output_132->dtype = CSINN_DTYPE_UINT8;
+  output_132->layout = CSINN_LAYOUT_NCHW;
+  output_132->dim[0] = 1;
+  output_132->dim[1] = 3;
+  output_132->dim[2] = 3;
+  output_132->dim[3] = 126;
+  output_132->dim_count = 4;
+  output_132->qinfo = (struct csinn_quant_info *)(params_base + 5825769);
+  output_132->quant_channel = 1;
+  struct csinn_transpose_params *params_132 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_132->permute = permute_132;
+  params_132->permute_num = 4;
+  params_132->base.name = "transpose_182";
+  csinn_transpose_init(output_131, output_132, params_132);
+  int32_t *shape_133 = malloc(2 * 4);
+  shape_133[0] = 1;
+  shape_133[1] = 1134;
+  struct csinn_tensor *output_133 = csinn_alloc_tensor(sess);
+  output_133->name = "output_133";
+  output_133->dtype = CSINN_DTYPE_UINT8;
+  output_133->layout = CSINN_LAYOUT_NC;
+  output_133->dim[0] = 1;
+  output_133->dim[1] = 1134;
+  output_133->dim_count = 2;
+  output_133->qinfo = (struct csinn_quant_info *)(params_base + 5825793);
+  output_133->quant_channel = 1;
+  struct csinn_reshape_params *params_133 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_133->shape = shape_133;
+  params_133->shape_num = 2;
+  params_133->base.name = "batch_flatten_183";
+  csinn_reshape_init(output_132, output_133, params_133);
+  struct csinn_tensor *output_135 = csinn_alloc_tensor(sess);
+  output_135->name = "output_135";
+  output_135->dtype = CSINN_DTYPE_UINT8;
+  output_135->layout = CSINN_LAYOUT_NC;
+  output_135->dim[0] = 1;
+  output_135->dim[1] = 1134;
+  output_135->dim_count = 2;
+  output_135->qinfo = (struct csinn_quant_info *)(params_base + 5825817);
+  output_135->quant_channel = 1;
+  struct csinn_tensor *rhs_135 = csinn_alloc_tensor(sess);
+  rhs_135->name = "rhs_135";
+  rhs_135->data = params_base + 5825865;
+  rhs_135->is_const = 1;
+  rhs_135->dtype = CSINN_DTYPE_UINT8;
+  rhs_135->layout = CSINN_LAYOUT_OI;
+  rhs_135->dim[0] = 1;
+  rhs_135->dim[1] = 1134;
+  rhs_135->dim_count = 2;
+  rhs_135->qinfo = (struct csinn_quant_info *)(params_base + 5825841);
+  rhs_135->quant_channel = 1;
+  struct csinn_diso_params *params_135 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_135->base.name = "mul_176";
+  csinn_mul_init(output_133, rhs_135, output_135, params_135);
+  struct csinn_tensor *output_137 = csinn_alloc_tensor(sess);
+  output_137->name = "output_137";
+  output_137->dtype = CSINN_DTYPE_UINT8;
+  output_137->layout = CSINN_LAYOUT_NCHW;
+  output_137->dim[0] = 1;
+  output_137->dim[1] = 126;
+  output_137->dim[2] = 2;
+  output_137->dim[3] = 2;
+  output_137->dim_count = 4;
+  output_137->qinfo = (struct csinn_quant_info *)(params_base + 5826999);
+  output_137->quant_channel = 1;
+  struct csinn_tensor *kernel_137 = csinn_alloc_tensor(sess);
+  kernel_137->name = "kernel_137";
+  kernel_137->data = params_base + 5827047;
+  kernel_137->is_const = 1;
+  kernel_137->dtype = CSINN_DTYPE_UINT8;
+  kernel_137->layout = CSINN_LAYOUT_OIHW;
+  kernel_137->dim[0] = 126;
+  kernel_137->dim[1] = 256;
+  kernel_137->dim[2] = 1;
+  kernel_137->dim[3] = 1;
+  kernel_137->dim_count = 4;
+  kernel_137->qinfo = (struct csinn_quant_info *)(params_base + 5827023);
+  kernel_137->quant_channel = 1;
+  struct csinn_tensor *bias_137 = csinn_alloc_tensor(sess);
+  bias_137->name = "bias_137";
+  bias_137->data = params_base + 5859327;
+  bias_137->is_const = 1;
+  bias_137->dtype = CSINN_DTYPE_INT32;
+  bias_137->layout = CSINN_LAYOUT_O;
+  bias_137->dim[0] = 126;
+  bias_137->dim_count = 1;
+  bias_137->qinfo = (struct csinn_quant_info *)(params_base + 5859303);
+  bias_137->quant_channel = 1;
+  struct csinn_conv2d_params *params_137 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_137->group = 1;
+  params_137->stride_height = 1;
+  params_137->stride_width = 1;
+  params_137->dilation_height = 1;
+  params_137->dilation_width = 1;
+  params_137->conv_extra.kernel_tm = NULL;
+  params_137->conv_extra.conv_mode = CSINN_DIRECT;
+  params_137->pad_top = 0;
+  params_137->pad_left = 0;
+  params_137->pad_down = 0;
+  params_137->pad_right = 0;
+  params_137->base.name = "conv2d_184_fuse_bias_add_185";
+  csinn_conv2d_init(output_95, output_137, kernel_137, bias_137, params_137);
+  int32_t *permute_138 = malloc(4 * 4);
+  permute_138[0] = 0;
+  permute_138[1] = 2;
+  permute_138[2] = 3;
+  permute_138[3] = 1;
+  struct csinn_tensor *output_138 = csinn_alloc_tensor(sess);
+  output_138->name = "output_138";
+  output_138->dtype = CSINN_DTYPE_UINT8;
+  output_138->layout = CSINN_LAYOUT_NCHW;
+  output_138->dim[0] = 1;
+  output_138->dim[1] = 2;
+  output_138->dim[2] = 2;
+  output_138->dim[3] = 126;
+  output_138->dim_count = 4;
+  output_138->qinfo = (struct csinn_quant_info *)(params_base + 5859831);
+  output_138->quant_channel = 1;
+  struct csinn_transpose_params *params_138 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_138->permute = permute_138;
+  params_138->permute_num = 4;
+  params_138->base.name = "transpose_186";
+  csinn_transpose_init(output_137, output_138, params_138);
+  int32_t *shape_139 = malloc(2 * 4);
+  shape_139[0] = 1;
+  shape_139[1] = 504;
+  struct csinn_tensor *output_139 = csinn_alloc_tensor(sess);
+  output_139->name = "output_139";
+  output_139->dtype = CSINN_DTYPE_UINT8;
+  output_139->layout = CSINN_LAYOUT_NC;
+  output_139->dim[0] = 1;
+  output_139->dim[1] = 504;
+  output_139->dim_count = 2;
+  output_139->qinfo = (struct csinn_quant_info *)(params_base + 5859855);
+  output_139->quant_channel = 1;
+  struct csinn_reshape_params *params_139 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_139->shape = shape_139;
+  params_139->shape_num = 2;
+  params_139->base.name = "batch_flatten_187";
+  csinn_reshape_init(output_138, output_139, params_139);
+  struct csinn_tensor *output_141 = csinn_alloc_tensor(sess);
+  output_141->name = "output_141";
+  output_141->dtype = CSINN_DTYPE_UINT8;
+  output_141->layout = CSINN_LAYOUT_NC;
+  output_141->dim[0] = 1;
+  output_141->dim[1] = 504;
+  output_141->dim_count = 2;
+  output_141->qinfo = (struct csinn_quant_info *)(params_base + 5859879);
+  output_141->quant_channel = 1;
+  struct csinn_tensor *rhs_141 = csinn_alloc_tensor(sess);
+  rhs_141->name = "rhs_141";
+  rhs_141->data = params_base + 5859927;
+  rhs_141->is_const = 1;
+  rhs_141->dtype = CSINN_DTYPE_UINT8;
+  rhs_141->layout = CSINN_LAYOUT_OI;
+  rhs_141->dim[0] = 1;
+  rhs_141->dim[1] = 504;
+  rhs_141->dim_count = 2;
+  rhs_141->qinfo = (struct csinn_quant_info *)(params_base + 5859903);
+  rhs_141->quant_channel = 1;
+  struct csinn_diso_params *params_141 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_141->base.name = "mul_177";
+  csinn_mul_init(output_139, rhs_141, output_141, params_141);
+  struct csinn_tensor *output_143 = csinn_alloc_tensor(sess);
+  output_143->name = "output_143";
+  output_143->dtype = CSINN_DTYPE_UINT8;
+  output_143->layout = CSINN_LAYOUT_NCHW;
+  output_143->dim[0] = 1;
+  output_143->dim[1] = 126;
+  output_143->dim[2] = 1;
+  output_143->dim[3] = 1;
+  output_143->dim_count = 4;
+  output_143->qinfo = (struct csinn_quant_info *)(params_base + 5860431);
+  output_143->quant_channel = 1;
+  struct csinn_tensor *kernel_143 = csinn_alloc_tensor(sess);
+  kernel_143->name = "kernel_143";
+  kernel_143->data = params_base + 5860479;
+  kernel_143->is_const = 1;
+  kernel_143->dtype = CSINN_DTYPE_UINT8;
+  kernel_143->layout = CSINN_LAYOUT_OIHW;
+  kernel_143->dim[0] = 126;
+  kernel_143->dim[1] = 128;
+  kernel_143->dim[2] = 1;
+  kernel_143->dim[3] = 1;
+  kernel_143->dim_count = 4;
+  kernel_143->qinfo = (struct csinn_quant_info *)(params_base + 5860455);
+  kernel_143->quant_channel = 1;
+  struct csinn_tensor *bias_143 = csinn_alloc_tensor(sess);
+  bias_143->name = "bias_143";
+  bias_143->data = params_base + 5876631;
+  bias_143->is_const = 1;
+  bias_143->dtype = CSINN_DTYPE_INT32;
+  bias_143->layout = CSINN_LAYOUT_O;
+  bias_143->dim[0] = 126;
+  bias_143->dim_count = 1;
+  bias_143->qinfo = (struct csinn_quant_info *)(params_base + 5876607);
+  bias_143->quant_channel = 1;
+  struct csinn_conv2d_params *params_143 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_143->group = 1;
+  params_143->stride_height = 1;
+  params_143->stride_width = 1;
+  params_143->dilation_height = 1;
+  params_143->dilation_width = 1;
+  params_143->conv_extra.kernel_tm = NULL;
+  params_143->conv_extra.conv_mode = CSINN_DIRECT;
+  params_143->pad_top = 0;
+  params_143->pad_left = 0;
+  params_143->pad_down = 0;
+  params_143->pad_right = 0;
+  params_143->base.name = "conv2d_188_fuse_bias_add_189";
+  csinn_conv2d_init(output_105, output_143, kernel_143, bias_143, params_143);
+  int32_t *permute_144 = malloc(4 * 4);
+  permute_144[0] = 0;
+  permute_144[1] = 2;
+  permute_144[2] = 3;
+  permute_144[3] = 1;
+  struct csinn_tensor *output_144 = csinn_alloc_tensor(sess);
+  output_144->name = "output_144";
+  output_144->dtype = CSINN_DTYPE_UINT8;
+  output_144->layout = CSINN_LAYOUT_NCHW;
+  output_144->dim[0] = 1;
+  output_144->dim[1] = 1;
+  output_144->dim[2] = 1;
+  output_144->dim[3] = 126;
+  output_144->dim_count = 4;
+  output_144->qinfo = (struct csinn_quant_info *)(params_base + 5877135);
+  output_144->quant_channel = 1;
+  struct csinn_transpose_params *params_144 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_144->permute = permute_144;
+  params_144->permute_num = 4;
+  params_144->base.name = "transpose_190";
+  csinn_transpose_init(output_143, output_144, params_144);
+  int32_t *shape_145 = malloc(2 * 4);
+  shape_145[0] = 1;
+  shape_145[1] = 126;
+  struct csinn_tensor *output_145 = csinn_alloc_tensor(sess);
+  output_145->name = "output_145";
+  output_145->dtype = CSINN_DTYPE_UINT8;
+  output_145->layout = CSINN_LAYOUT_NC;
+  output_145->dim[0] = 1;
+  output_145->dim[1] = 126;
+  output_145->dim_count = 2;
+  output_145->qinfo = (struct csinn_quant_info *)(params_base + 5877159);
+  output_145->quant_channel = 1;
+  struct csinn_reshape_params *params_145 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_145->shape = shape_145;
+  params_145->shape_num = 2;
+  params_145->base.name = "batch_flatten_191";
+  csinn_reshape_init(output_144, output_145, params_145);
+  struct csinn_tensor *output_147 = csinn_alloc_tensor(sess);
+  output_147->name = "output_147";
+  output_147->dtype = CSINN_DTYPE_UINT8;
+  output_147->layout = CSINN_LAYOUT_NC;
+  output_147->dim[0] = 1;
+  output_147->dim[1] = 126;
+  output_147->dim_count = 2;
+  output_147->qinfo = (struct csinn_quant_info *)(params_base + 5877183);
+  output_147->quant_channel = 1;
+  struct csinn_tensor *rhs_147 = csinn_alloc_tensor(sess);
+  rhs_147->name = "rhs_147";
+  rhs_147->data = params_base + 5877231;
+  rhs_147->is_const = 1;
+  rhs_147->dtype = CSINN_DTYPE_UINT8;
+  rhs_147->layout = CSINN_LAYOUT_OI;
+  rhs_147->dim[0] = 1;
+  rhs_147->dim[1] = 126;
+  rhs_147->dim_count = 2;
+  rhs_147->qinfo = (struct csinn_quant_info *)(params_base + 5877207);
+  rhs_147->quant_channel = 1;
+  struct csinn_diso_params *params_147 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_147->base.name = "mul_178";
+  csinn_mul_init(output_145, rhs_147, output_147, params_147);
+  struct csinn_tensor *input_149[6];
+  struct csinn_tensor *output_149 = csinn_alloc_tensor(sess);
+  output_149->name = "output_149";
+  output_149->dtype = CSINN_DTYPE_UINT8;
+  output_149->layout = CSINN_LAYOUT_NC;
+  output_149->dim[0] = 1;
+  output_149->dim[1] = 40257;
+  output_149->dim_count = 2;
+  output_149->qinfo = (struct csinn_quant_info *)(params_base + 5877357);
+  output_149->quant_channel = 1;
+  struct csinn_concat_params *params_149 = csinn_alloc_params(sizeof(struct csinn_concat_params), sess);
+  params_149->inputs_count = 6;
+  params_149->axis = 1;
+  params_149->base.name = "concatenate_192";
+  csinn_concat_init(input_149, output_149, params_149);
+  int32_t *shape_150 = malloc(3 * 4);
+  shape_150[0] = 1;
+  shape_150[1] = 1917;
+  shape_150[2] = 21;
+  struct csinn_tensor *output_150 = csinn_alloc_tensor(sess);
+  output_150->name = "output_150";
+  output_150->dtype = CSINN_DTYPE_UINT8;
+  output_150->layout = CSINN_LAYOUT_NCW;
+  output_150->dim[0] = 1;
+  output_150->dim[1] = 1917;
+  output_150->dim[2] = 21;
+  output_150->dim_count = 3;
+  output_150->qinfo = (struct csinn_quant_info *)(params_base + 5877381);
+  output_150->quant_channel = 1;
+  struct csinn_reshape_params *params_150 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_150->shape = shape_150;
+  params_150->shape_num = 3;
+  params_150->base.name = "reshape_193";
+  csinn_reshape_init(output_149, output_150, params_150);
+  struct csinn_tensor *output_151 = csinn_alloc_tensor(sess);
+  output_151->name = "output_151";
+  output_151->dtype = CSINN_DTYPE_UINT8;
+  output_151->layout = CSINN_LAYOUT_NCW;
+  output_151->dim[0] = 1;
+  output_151->dim[1] = 1917;
+  output_151->dim[2] = 21;
+  output_151->dim_count = 3;
+  output_151->qinfo = (struct csinn_quant_info *)(params_base + 5877405);
+  output_151->quant_channel = 1;
+  struct csinn_softmax_params *params_151 = csinn_alloc_params(sizeof(struct csinn_softmax_params), sess);
+  params_151->axis = 2;
+  params_151->base.name = "softmax_194";
+  csinn_softmax_init(output_150, output_151, params_151);
+  int32_t *shape_152 = malloc(2 * 4);
+  shape_152[0] = 1;
+  shape_152[1] = 40257;
+  struct csinn_tensor *output_152 = csinn_alloc_tensor(sess);
+  output_152->name = "batch_flatten_195_152";
+  output_152->dtype = CSINN_DTYPE_UINT8;
+  output_152->layout = CSINN_LAYOUT_NC;
+  output_152->dim[0] = 1;
+  output_152->dim[1] = 40257;
+  output_152->dim_count = 2;
+  output_152->qinfo = (struct csinn_quant_info *)(params_base + 5877429);
+  output_152->quant_channel = 1;
+  struct csinn_reshape_params *params_152 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_152->shape = shape_152;
+  params_152->shape_num = 2;
+  params_152->base.name = "batch_flatten_195";
+  csinn_reshape_init(output_151, output_152, params_152);
+  data->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
+  csinn_set_tensor_entry(data, sess);
+  csinn_set_input(0, data, sess);
+
+  csinn_mul(data, rhs_1, output_1, params_1);
+  csinn_add(output_1, rhs_4, output_4, params_4);
+  csinn_conv2d(output_4, output_6, kernel_6, bias_6, params_6);
+  csinn_relu(output_6, output_7, params_7);
+  csinn_conv2d(output_7, output_8, kernel_8, bias_8, params_8);
+  csinn_relu(output_8, output_9, params_9);
+  csinn_conv2d(output_9, output_10, kernel_10, bias_10, params_10);
+  csinn_relu(output_10, output_11, params_11);
+  csinn_conv2d(output_11, output_12, kernel_12, bias_12, params_12);
+  csinn_relu(output_12, output_13, params_13);
+  csinn_conv2d(output_13, output_14, kernel_14, bias_14, params_14);
+  csinn_relu(output_14, output_15, params_15);
+  csinn_conv2d(output_15, output_16, kernel_16, bias_16, params_16);
+  csinn_relu(output_16, output_17, params_17);
+  csinn_conv2d(output_17, output_18, kernel_18, bias_18, params_18);
+  csinn_relu(output_18, output_19, params_19);
+  csinn_conv2d(output_19, output_20, kernel_20, bias_20, params_20);
+  csinn_relu(output_20, output_21, params_21);
+  csinn_conv2d(output_21, output_22, kernel_22, bias_22, params_22);
+  csinn_relu(output_22, output_23, params_23);
+  csinn_conv2d(output_23, output_24, kernel_24, bias_24, params_24);
+  csinn_relu(output_24, output_25, params_25);
+  csinn_conv2d(output_25, output_26, kernel_26, bias_26, params_26);
+  csinn_relu(output_26, output_27, params_27);
+  csinn_conv2d(output_27, output_28, kernel_28, bias_28, params_28);
+  csinn_relu(output_28, output_29, params_29);
+  csinn_conv2d(output_29, output_30, kernel_30, bias_30, params_30);
+  csinn_relu(output_30, output_31, params_31);
+  csinn_conv2d(output_31, output_32, kernel_32, bias_32, params_32);
+  csinn_relu(output_32, output_33, params_33);
+  csinn_conv2d(output_33, output_34, kernel_34, bias_34, params_34);
+  csinn_relu(output_34, output_35, params_35);
+  csinn_conv2d(output_35, output_36, kernel_36, bias_36, params_36);
+  csinn_relu(output_36, output_37, params_37);
+  csinn_conv2d(output_37, output_38, kernel_38, bias_38, params_38);
+  csinn_relu(output_38, output_39, params_39);
+  csinn_conv2d(output_39, output_40, kernel_40, bias_40, params_40);
+  csinn_relu(output_40, output_41, params_41);
+  csinn_conv2d(output_41, output_42, kernel_42, bias_42, params_42);
+  csinn_relu(output_42, output_43, params_43);
+  csinn_conv2d(output_43, output_44, kernel_44, bias_44, params_44);
+  csinn_relu(output_44, output_45, params_45);
+  csinn_conv2d(output_45, output_46, kernel_46, bias_46, params_46);
+  csinn_relu(output_46, output_47, params_47);
+  csinn_conv2d(output_47, output_48, kernel_48, bias_48, params_48);
+  csinn_relu(output_48, output_49, params_49);
+  csinn_conv2d(output_49, output_50, kernel_50, bias_50, params_50);
+  csinn_relu(output_50, output_51, params_51);
+  csinn_conv2d(output_51, output_52, kernel_52, bias_52, params_52);
+  csinn_transpose(output_52, output_53, params_53);
+  csinn_reshape(output_53, output_54, params_54);
+  csinn_mul(output_54, rhs_56, output_56, params_56);
+  csinn_conv2d(output_51, output_58, kernel_58, bias_58, params_58);
+  csinn_relu(output_58, output_59, params_59);
+  csinn_conv2d(output_59, output_60, kernel_60, bias_60, params_60);
+  csinn_relu(output_60, output_61, params_61);
+  csinn_conv2d(output_61, output_62, kernel_62, bias_62, params_62);
+  csinn_relu(output_62, output_63, params_63);
+  csinn_conv2d(output_63, output_64, kernel_64, bias_64, params_64);
+  csinn_relu(output_64, output_65, params_65);
+  csinn_conv2d(output_65, output_66, kernel_66, bias_66, params_66);
+  csinn_transpose(output_66, output_67, params_67);
+  csinn_reshape(output_67, output_68, params_68);
+  csinn_mul(output_68, rhs_70, output_70, params_70);
+  csinn_conv2d(output_65, output_72, kernel_72, bias_72, params_72);
+  csinn_relu(output_72, output_73, params_73);
+  csinn_conv2d(output_73, output_74, kernel_74, bias_74, params_74);
+  csinn_relu(output_74, output_75, params_75);
+  csinn_conv2d(output_75, output_76, kernel_76, bias_76, params_76);
+  csinn_transpose(output_76, output_77, params_77);
+  csinn_reshape(output_77, output_78, params_78);
+  csinn_mul(output_78, rhs_80, output_80, params_80);
+  csinn_conv2d(output_75, output_82, kernel_82, bias_82, params_82);
+  csinn_relu(output_82, output_83, params_83);
+  csinn_conv2d(output_83, output_84, kernel_84, bias_84, params_84);
+  csinn_relu(output_84, output_85, params_85);
+  csinn_conv2d(output_85, output_86, kernel_86, bias_86, params_86);
+  csinn_transpose(output_86, output_87, params_87);
+  csinn_reshape(output_87, output_88, params_88);
+  csinn_mul(output_88, rhs_90, output_90, params_90);
+  csinn_conv2d(output_85, output_92, kernel_92, bias_92, params_92);
+  csinn_relu(output_92, output_93, params_93);
+  csinn_conv2d(output_93, output_94, kernel_94, bias_94, params_94);
+  csinn_relu(output_94, output_95, params_95);
+  csinn_conv2d(output_95, output_96, kernel_96, bias_96, params_96);
+  csinn_transpose(output_96, output_97, params_97);
+  csinn_reshape(output_97, output_98, params_98);
+  csinn_mul(output_98, rhs_100, output_100, params_100);
+  csinn_conv2d(output_95, output_102, kernel_102, bias_102, params_102);
+  csinn_relu(output_102, output_103, params_103);
+  csinn_conv2d(output_103, output_104, kernel_104, bias_104, params_104);
+  csinn_relu(output_104, output_105, params_105);
+  csinn_conv2d(output_105, output_106, kernel_106, bias_106, params_106);
+  csinn_transpose(output_106, output_107, params_107);
+  csinn_reshape(output_107, output_108, params_108);
+  csinn_mul(output_108, rhs_110, output_110, params_110);
+  input_112[0] = output_56;
+  input_112[1] = output_70;
+  input_112[2] = output_80;
+  input_112[3] = output_90;
+  input_112[4] = output_100;
+  input_112[5] = output_110;
+  csinn_concat(input_112, output_112, params_112);
+  csinn_conv2d(output_51, output_113, kernel_113, bias_113, params_113);
+  csinn_transpose(output_113, output_114, params_114);
+  csinn_reshape(output_114, output_115, params_115);
+  csinn_mul(output_115, rhs_117, output_117, params_117);
+  csinn_conv2d(output_65, output_119, kernel_119, bias_119, params_119);
+  csinn_transpose(output_119, output_120, params_120);
+  csinn_reshape(output_120, output_121, params_121);
+  csinn_mul(output_121, rhs_123, output_123, params_123);
+  csinn_conv2d(output_75, output_125, kernel_125, bias_125, params_125);
+  csinn_transpose(output_125, output_126, params_126);
+  csinn_reshape(output_126, output_127, params_127);
+  csinn_mul(output_127, rhs_129, output_129, params_129);
+  csinn_conv2d(output_85, output_131, kernel_131, bias_131, params_131);
+  csinn_transpose(output_131, output_132, params_132);
+  csinn_reshape(output_132, output_133, params_133);
+  csinn_mul(output_133, rhs_135, output_135, params_135);
+  csinn_conv2d(output_95, output_137, kernel_137, bias_137, params_137);
+  csinn_transpose(output_137, output_138, params_138);
+  csinn_reshape(output_138, output_139, params_139);
+  csinn_mul(output_139, rhs_141, output_141, params_141);
+  csinn_conv2d(output_105, output_143, kernel_143, bias_143, params_143);
+  csinn_transpose(output_143, output_144, params_144);
+  csinn_reshape(output_144, output_145, params_145);
+  csinn_mul(output_145, rhs_147, output_147, params_147);
+  input_149[0] = output_117;
+  input_149[1] = output_123;
+  input_149[2] = output_129;
+  input_149[3] = output_135;
+  input_149[4] = output_141;
+  input_149[5] = output_147;
+  csinn_concat(input_149, output_149, params_149);
+  csinn_reshape(output_149, output_150, params_150);
+  csinn_softmax(output_150, output_151, params_151);
+  csinn_reshape(output_151, output_152, params_152);
+  csinn_set_output(0, output_112, sess);
+  csinn_set_output(1, output_152, sess);
+  shl_pnna_set_input_strides(sess, 1, 304 ,304);
+
+  csinn_session_setup(sess);
+  return sess;
+}
+void csinn_run(void* data0, void *sess) {
+  struct csinn_tensor input_tensor;
+  input_tensor.data = data0;
+  csinn_update_input(0, &input_tensor, sess);
+  csinn_session_run(sess);
+}
+
+struct csinn_session *csinn_import_binary_model(char *bm_addr) {
+  struct shl_binary_model_section_info *sinfo = (struct shl_binary_model_section_info *)(bm_addr + 4096);
+  struct csinn_session *bm_sess = (struct csinn_session *)(bm_addr + sinfo->sections->info_offset * 4096);
+  struct csinn_session *sess = csinn_alloc_session();
+  shl_bm_session_load(sess, bm_sess);
+  sess->model.bm_addr = bm_addr + sinfo->sections->graph_offset * 4096;
+  sess->model.bm_size = sinfo->sections->graph_size;
+  shl_pnna_set_input_strides(sess, 1, 304 ,304);
+  csinn_load_binary_model(sess);
+  return sess;
+}

BIN
test/face_detect/model.params


+ 3803 - 0
test/face_detect/model_fd.c

@@ -0,0 +1,3803 @@
+/* auto generate by HHB_VERSION 2.0.21 */
+
+#include <shl_pnna.h>
+
+void *csinn_(char *params_base) {
+  struct csinn_session *sess = csinn_alloc_session();
+  sess->base_quant_type = CSINN_QUANT_UINT8_ASYM;
+  sess->model.priority = 0;
+  sess->base_api = CSINN_LIGHT;
+  sess->base_dtype = CSINN_DTYPE_UINT8;
+  csinn_session_init(sess);
+  csinn_set_input_number(1, sess);
+  csinn_set_output_number(2, sess);
+
+  struct csinn_tensor *data = csinn_alloc_tensor(sess);
+  data->name = "data@@multiply_1_0";
+  data->dtype = CSINN_DTYPE_UINT8;
+  data->layout = CSINN_LAYOUT_NCHW;
+  data->dim[0] = 1;
+  data->dim[1] = 3;
+  data->dim[2] = 300;
+  data->dim[3] = 300;
+  data->dim_count = 4;
+  data->qinfo = (struct csinn_quant_info *)(params_base + 0);
+  data->quant_channel = 1;
+  struct csinn_tensor *output_1 = csinn_alloc_tensor(sess);
+  output_1->name = "output_1";
+  output_1->dtype = CSINN_DTYPE_UINT8;
+  output_1->layout = CSINN_LAYOUT_NCHW;
+  output_1->dim[0] = 1;
+  output_1->dim[1] = 3;
+  output_1->dim[2] = 300;
+  output_1->dim[3] = 300;
+  output_1->dim_count = 4;
+  output_1->qinfo = (struct csinn_quant_info *)(params_base + 24);
+  output_1->quant_channel = 1;
+  struct csinn_tensor *rhs_1 = csinn_alloc_tensor(sess);
+  rhs_1->name = "rhs_1";
+  rhs_1->data = params_base + 72;
+  rhs_1->is_const = 1;
+  rhs_1->dtype = CSINN_DTYPE_UINT8;
+  rhs_1->layout = CSINN_LAYOUT_OIHW;
+  rhs_1->dim[0] = 1;
+  rhs_1->dim[1] = 1;
+  rhs_1->dim[2] = 1;
+  rhs_1->dim[3] = 1;
+  rhs_1->dim_count = 4;
+  rhs_1->qinfo = (struct csinn_quant_info *)(params_base + 48);
+  rhs_1->quant_channel = 1;
+  struct csinn_diso_params *params_1 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_1->base.name = "multiply_1";
+  csinn_mul_init(data, rhs_1, output_1, params_1);
+  struct csinn_tensor *output_4 = csinn_alloc_tensor(sess);
+  output_4->name = "output_4";
+  output_4->dtype = CSINN_DTYPE_UINT8;
+  output_4->layout = CSINN_LAYOUT_NCHW;
+  output_4->dim[0] = 1;
+  output_4->dim[1] = 3;
+  output_4->dim[2] = 300;
+  output_4->dim[3] = 300;
+  output_4->dim_count = 4;
+  output_4->qinfo = (struct csinn_quant_info *)(params_base + 73);
+  output_4->quant_channel = 1;
+  struct csinn_tensor *rhs_4 = csinn_alloc_tensor(sess);
+  rhs_4->name = "rhs_4";
+  rhs_4->data = params_base + 121;
+  rhs_4->is_const = 1;
+  rhs_4->dtype = CSINN_DTYPE_UINT8;
+  rhs_4->layout = CSINN_LAYOUT_OIHW;
+  rhs_4->dim[0] = 1;
+  rhs_4->dim[1] = 3;
+  rhs_4->dim[2] = 1;
+  rhs_4->dim[3] = 1;
+  rhs_4->dim_count = 4;
+  rhs_4->qinfo = (struct csinn_quant_info *)(params_base + 97);
+  rhs_4->quant_channel = 1;
+  struct csinn_diso_params *params_4 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_4->base.name = "add_2";
+  csinn_add_init(output_1, rhs_4, output_4, params_4);
+  struct csinn_tensor *output_6 = csinn_alloc_tensor(sess);
+  output_6->name = "output_6";
+  output_6->dtype = CSINN_DTYPE_UINT8;
+  output_6->layout = CSINN_LAYOUT_NCHW;
+  output_6->dim[0] = 1;
+  output_6->dim[1] = 32;
+  output_6->dim[2] = 150;
+  output_6->dim[3] = 150;
+  output_6->dim_count = 4;
+  output_6->qinfo = (struct csinn_quant_info *)(params_base + 124);
+  output_6->quant_channel = 1;
+  struct csinn_tensor *kernel_6 = csinn_alloc_tensor(sess);
+  kernel_6->name = "kernel_6";
+  kernel_6->data = params_base + 172;
+  kernel_6->is_const = 1;
+  kernel_6->dtype = CSINN_DTYPE_UINT8;
+  kernel_6->layout = CSINN_LAYOUT_OIHW;
+  kernel_6->dim[0] = 32;
+  kernel_6->dim[1] = 3;
+  kernel_6->dim[2] = 3;
+  kernel_6->dim[3] = 3;
+  kernel_6->dim_count = 4;
+  kernel_6->qinfo = (struct csinn_quant_info *)(params_base + 148);
+  kernel_6->quant_channel = 1;
+  struct csinn_tensor *bias_6 = csinn_alloc_tensor(sess);
+  bias_6->name = "bias_6";
+  bias_6->data = params_base + 1060;
+  bias_6->is_const = 1;
+  bias_6->dtype = CSINN_DTYPE_INT32;
+  bias_6->layout = CSINN_LAYOUT_O;
+  bias_6->dim[0] = 32;
+  bias_6->dim_count = 1;
+  bias_6->qinfo = (struct csinn_quant_info *)(params_base + 1036);
+  bias_6->quant_channel = 1;
+  struct csinn_conv2d_params *params_6 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_6->group = 1;
+  params_6->stride_height = 2;
+  params_6->stride_width = 2;
+  params_6->dilation_height = 1;
+  params_6->dilation_width = 1;
+  params_6->conv_extra.kernel_tm = NULL;
+  params_6->conv_extra.conv_mode = CSINN_DIRECT;
+  params_6->pad_top = 1;
+  params_6->pad_left = 1;
+  params_6->pad_down = 1;
+  params_6->pad_right = 1;
+  params_6->base.name = "conv2d_3_fuse_multiply_4_fuse_add_5";
+  csinn_conv2d_init(output_4, output_6, kernel_6, bias_6, params_6);
+  struct csinn_tensor *output_7 = csinn_alloc_tensor(sess);
+  output_7->name = "output_7";
+  output_7->dtype = CSINN_DTYPE_UINT8;
+  output_7->layout = CSINN_LAYOUT_NCHW;
+  output_7->dim[0] = 1;
+  output_7->dim[1] = 32;
+  output_7->dim[2] = 150;
+  output_7->dim[3] = 150;
+  output_7->dim_count = 4;
+  output_7->qinfo = (struct csinn_quant_info *)(params_base + 1188);
+  output_7->quant_channel = 1;
+  struct csinn_relu_params *params_7 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_7->base.name = "relu_6";
+  csinn_relu_init(output_6, output_7, params_7);
+  struct csinn_tensor *output_8 = csinn_alloc_tensor(sess);
+  output_8->name = "output_8";
+  output_8->dtype = CSINN_DTYPE_UINT8;
+  output_8->layout = CSINN_LAYOUT_NCHW;
+  output_8->dim[0] = 1;
+  output_8->dim[1] = 32;
+  output_8->dim[2] = 150;
+  output_8->dim[3] = 150;
+  output_8->dim_count = 4;
+  output_8->qinfo = (struct csinn_quant_info *)(params_base + 1212);
+  output_8->quant_channel = 1;
+  struct csinn_tensor *kernel_8 = csinn_alloc_tensor(sess);
+  kernel_8->name = "kernel_8";
+  kernel_8->data = params_base + 1260;
+  kernel_8->is_const = 1;
+  kernel_8->dtype = CSINN_DTYPE_UINT8;
+  kernel_8->layout = CSINN_LAYOUT_O1HW;
+  kernel_8->dim[0] = 32;
+  kernel_8->dim[1] = 1;
+  kernel_8->dim[2] = 3;
+  kernel_8->dim[3] = 3;
+  kernel_8->dim_count = 4;
+  kernel_8->qinfo = (struct csinn_quant_info *)(params_base + 1236);
+  kernel_8->quant_channel = 1;
+  struct csinn_tensor *bias_8 = csinn_alloc_tensor(sess);
+  bias_8->name = "bias_8";
+  bias_8->data = params_base + 1572;
+  bias_8->is_const = 1;
+  bias_8->dtype = CSINN_DTYPE_INT32;
+  bias_8->layout = CSINN_LAYOUT_O;
+  bias_8->dim[0] = 32;
+  bias_8->dim_count = 1;
+  bias_8->qinfo = (struct csinn_quant_info *)(params_base + 1548);
+  bias_8->quant_channel = 1;
+  struct csinn_conv2d_params *params_8 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_8->group = 32;
+  params_8->stride_height = 1;
+  params_8->stride_width = 1;
+  params_8->dilation_height = 1;
+  params_8->dilation_width = 1;
+  params_8->conv_extra.kernel_tm = NULL;
+  params_8->conv_extra.conv_mode = CSINN_DIRECT;
+  params_8->pad_top = 1;
+  params_8->pad_left = 1;
+  params_8->pad_down = 1;
+  params_8->pad_right = 1;
+  params_8->base.name = "conv2d_7_fuse_multiply_8_fuse_add_9";
+  csinn_conv2d_init(output_7, output_8, kernel_8, bias_8, params_8);
+  struct csinn_tensor *output_9 = csinn_alloc_tensor(sess);
+  output_9->name = "output_9";
+  output_9->dtype = CSINN_DTYPE_UINT8;
+  output_9->layout = CSINN_LAYOUT_NCHW;
+  output_9->dim[0] = 1;
+  output_9->dim[1] = 32;
+  output_9->dim[2] = 150;
+  output_9->dim[3] = 150;
+  output_9->dim_count = 4;
+  output_9->qinfo = (struct csinn_quant_info *)(params_base + 1700);
+  output_9->quant_channel = 1;
+  struct csinn_relu_params *params_9 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_9->base.name = "relu_10";
+  csinn_relu_init(output_8, output_9, params_9);
+  struct csinn_tensor *output_10 = csinn_alloc_tensor(sess);
+  output_10->name = "output_10";
+  output_10->dtype = CSINN_DTYPE_UINT8;
+  output_10->layout = CSINN_LAYOUT_NCHW;
+  output_10->dim[0] = 1;
+  output_10->dim[1] = 64;
+  output_10->dim[2] = 150;
+  output_10->dim[3] = 150;
+  output_10->dim_count = 4;
+  output_10->qinfo = (struct csinn_quant_info *)(params_base + 1724);
+  output_10->quant_channel = 1;
+  struct csinn_tensor *kernel_10 = csinn_alloc_tensor(sess);
+  kernel_10->name = "kernel_10";
+  kernel_10->data = params_base + 1772;
+  kernel_10->is_const = 1;
+  kernel_10->dtype = CSINN_DTYPE_UINT8;
+  kernel_10->layout = CSINN_LAYOUT_OIHW;
+  kernel_10->dim[0] = 64;
+  kernel_10->dim[1] = 32;
+  kernel_10->dim[2] = 1;
+  kernel_10->dim[3] = 1;
+  kernel_10->dim_count = 4;
+  kernel_10->qinfo = (struct csinn_quant_info *)(params_base + 1748);
+  kernel_10->quant_channel = 1;
+  struct csinn_tensor *bias_10 = csinn_alloc_tensor(sess);
+  bias_10->name = "bias_10";
+  bias_10->data = params_base + 3844;
+  bias_10->is_const = 1;
+  bias_10->dtype = CSINN_DTYPE_INT32;
+  bias_10->layout = CSINN_LAYOUT_O;
+  bias_10->dim[0] = 64;
+  bias_10->dim_count = 1;
+  bias_10->qinfo = (struct csinn_quant_info *)(params_base + 3820);
+  bias_10->quant_channel = 1;
+  struct csinn_conv2d_params *params_10 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_10->group = 1;
+  params_10->stride_height = 1;
+  params_10->stride_width = 1;
+  params_10->dilation_height = 1;
+  params_10->dilation_width = 1;
+  params_10->conv_extra.kernel_tm = NULL;
+  params_10->conv_extra.conv_mode = CSINN_DIRECT;
+  params_10->pad_top = 0;
+  params_10->pad_left = 0;
+  params_10->pad_down = 0;
+  params_10->pad_right = 0;
+  params_10->base.name = "conv2d_11_fuse_multiply_12_fuse_add_13";
+  csinn_conv2d_init(output_9, output_10, kernel_10, bias_10, params_10);
+  struct csinn_tensor *output_11 = csinn_alloc_tensor(sess);
+  output_11->name = "output_11";
+  output_11->dtype = CSINN_DTYPE_UINT8;
+  output_11->layout = CSINN_LAYOUT_NCHW;
+  output_11->dim[0] = 1;
+  output_11->dim[1] = 64;
+  output_11->dim[2] = 150;
+  output_11->dim[3] = 150;
+  output_11->dim_count = 4;
+  output_11->qinfo = (struct csinn_quant_info *)(params_base + 4100);
+  output_11->quant_channel = 1;
+  struct csinn_relu_params *params_11 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_11->base.name = "relu_14";
+  csinn_relu_init(output_10, output_11, params_11);
+  struct csinn_tensor *output_12 = csinn_alloc_tensor(sess);
+  output_12->name = "output_12";
+  output_12->dtype = CSINN_DTYPE_UINT8;
+  output_12->layout = CSINN_LAYOUT_NCHW;
+  output_12->dim[0] = 1;
+  output_12->dim[1] = 64;
+  output_12->dim[2] = 75;
+  output_12->dim[3] = 75;
+  output_12->dim_count = 4;
+  output_12->qinfo = (struct csinn_quant_info *)(params_base + 4124);
+  output_12->quant_channel = 1;
+  struct csinn_tensor *kernel_12 = csinn_alloc_tensor(sess);
+  kernel_12->name = "kernel_12";
+  kernel_12->data = params_base + 4172;
+  kernel_12->is_const = 1;
+  kernel_12->dtype = CSINN_DTYPE_UINT8;
+  kernel_12->layout = CSINN_LAYOUT_O1HW;
+  kernel_12->dim[0] = 64;
+  kernel_12->dim[1] = 1;
+  kernel_12->dim[2] = 3;
+  kernel_12->dim[3] = 3;
+  kernel_12->dim_count = 4;
+  kernel_12->qinfo = (struct csinn_quant_info *)(params_base + 4148);
+  kernel_12->quant_channel = 1;
+  struct csinn_tensor *bias_12 = csinn_alloc_tensor(sess);
+  bias_12->name = "bias_12";
+  bias_12->data = params_base + 4772;
+  bias_12->is_const = 1;
+  bias_12->dtype = CSINN_DTYPE_INT32;
+  bias_12->layout = CSINN_LAYOUT_O;
+  bias_12->dim[0] = 64;
+  bias_12->dim_count = 1;
+  bias_12->qinfo = (struct csinn_quant_info *)(params_base + 4748);
+  bias_12->quant_channel = 1;
+  struct csinn_conv2d_params *params_12 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_12->group = 64;
+  params_12->stride_height = 2;
+  params_12->stride_width = 2;
+  params_12->dilation_height = 1;
+  params_12->dilation_width = 1;
+  params_12->conv_extra.kernel_tm = NULL;
+  params_12->conv_extra.conv_mode = CSINN_DIRECT;
+  params_12->pad_top = 1;
+  params_12->pad_left = 1;
+  params_12->pad_down = 1;
+  params_12->pad_right = 1;
+  params_12->base.name = "conv2d_15_fuse_multiply_16_fuse_add_17";
+  csinn_conv2d_init(output_11, output_12, kernel_12, bias_12, params_12);
+  struct csinn_tensor *output_13 = csinn_alloc_tensor(sess);
+  output_13->name = "output_13";
+  output_13->dtype = CSINN_DTYPE_UINT8;
+  output_13->layout = CSINN_LAYOUT_NCHW;
+  output_13->dim[0] = 1;
+  output_13->dim[1] = 64;
+  output_13->dim[2] = 75;
+  output_13->dim[3] = 75;
+  output_13->dim_count = 4;
+  output_13->qinfo = (struct csinn_quant_info *)(params_base + 5028);
+  output_13->quant_channel = 1;
+  struct csinn_relu_params *params_13 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_13->base.name = "relu_18";
+  csinn_relu_init(output_12, output_13, params_13);
+  struct csinn_tensor *output_14 = csinn_alloc_tensor(sess);
+  output_14->name = "output_14";
+  output_14->dtype = CSINN_DTYPE_UINT8;
+  output_14->layout = CSINN_LAYOUT_NCHW;
+  output_14->dim[0] = 1;
+  output_14->dim[1] = 128;
+  output_14->dim[2] = 75;
+  output_14->dim[3] = 75;
+  output_14->dim_count = 4;
+  output_14->qinfo = (struct csinn_quant_info *)(params_base + 5052);
+  output_14->quant_channel = 1;
+  struct csinn_tensor *kernel_14 = csinn_alloc_tensor(sess);
+  kernel_14->name = "kernel_14";
+  kernel_14->data = params_base + 5100;
+  kernel_14->is_const = 1;
+  kernel_14->dtype = CSINN_DTYPE_UINT8;
+  kernel_14->layout = CSINN_LAYOUT_OIHW;
+  kernel_14->dim[0] = 128;
+  kernel_14->dim[1] = 64;
+  kernel_14->dim[2] = 1;
+  kernel_14->dim[3] = 1;
+  kernel_14->dim_count = 4;
+  kernel_14->qinfo = (struct csinn_quant_info *)(params_base + 5076);
+  kernel_14->quant_channel = 1;
+  struct csinn_tensor *bias_14 = csinn_alloc_tensor(sess);
+  bias_14->name = "bias_14";
+  bias_14->data = params_base + 13316;
+  bias_14->is_const = 1;
+  bias_14->dtype = CSINN_DTYPE_INT32;
+  bias_14->layout = CSINN_LAYOUT_O;
+  bias_14->dim[0] = 128;
+  bias_14->dim_count = 1;
+  bias_14->qinfo = (struct csinn_quant_info *)(params_base + 13292);
+  bias_14->quant_channel = 1;
+  struct csinn_conv2d_params *params_14 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_14->group = 1;
+  params_14->stride_height = 1;
+  params_14->stride_width = 1;
+  params_14->dilation_height = 1;
+  params_14->dilation_width = 1;
+  params_14->conv_extra.kernel_tm = NULL;
+  params_14->conv_extra.conv_mode = CSINN_DIRECT;
+  params_14->pad_top = 0;
+  params_14->pad_left = 0;
+  params_14->pad_down = 0;
+  params_14->pad_right = 0;
+  params_14->base.name = "conv2d_19_fuse_multiply_20_fuse_add_21";
+  csinn_conv2d_init(output_13, output_14, kernel_14, bias_14, params_14);
+  struct csinn_tensor *output_15 = csinn_alloc_tensor(sess);
+  output_15->name = "output_15";
+  output_15->dtype = CSINN_DTYPE_UINT8;
+  output_15->layout = CSINN_LAYOUT_NCHW;
+  output_15->dim[0] = 1;
+  output_15->dim[1] = 128;
+  output_15->dim[2] = 75;
+  output_15->dim[3] = 75;
+  output_15->dim_count = 4;
+  output_15->qinfo = (struct csinn_quant_info *)(params_base + 13828);
+  output_15->quant_channel = 1;
+  struct csinn_relu_params *params_15 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_15->base.name = "relu_22";
+  csinn_relu_init(output_14, output_15, params_15);
+  struct csinn_tensor *output_16 = csinn_alloc_tensor(sess);
+  output_16->name = "output_16";
+  output_16->dtype = CSINN_DTYPE_UINT8;
+  output_16->layout = CSINN_LAYOUT_NCHW;
+  output_16->dim[0] = 1;
+  output_16->dim[1] = 128;
+  output_16->dim[2] = 75;
+  output_16->dim[3] = 75;
+  output_16->dim_count = 4;
+  output_16->qinfo = (struct csinn_quant_info *)(params_base + 13852);
+  output_16->quant_channel = 1;
+  struct csinn_tensor *kernel_16 = csinn_alloc_tensor(sess);
+  kernel_16->name = "kernel_16";
+  kernel_16->data = params_base + 13900;
+  kernel_16->is_const = 1;
+  kernel_16->dtype = CSINN_DTYPE_UINT8;
+  kernel_16->layout = CSINN_LAYOUT_O1HW;
+  kernel_16->dim[0] = 128;
+  kernel_16->dim[1] = 1;
+  kernel_16->dim[2] = 3;
+  kernel_16->dim[3] = 3;
+  kernel_16->dim_count = 4;
+  kernel_16->qinfo = (struct csinn_quant_info *)(params_base + 13876);
+  kernel_16->quant_channel = 1;
+  struct csinn_tensor *bias_16 = csinn_alloc_tensor(sess);
+  bias_16->name = "bias_16";
+  bias_16->data = params_base + 15076;
+  bias_16->is_const = 1;
+  bias_16->dtype = CSINN_DTYPE_INT32;
+  bias_16->layout = CSINN_LAYOUT_O;
+  bias_16->dim[0] = 128;
+  bias_16->dim_count = 1;
+  bias_16->qinfo = (struct csinn_quant_info *)(params_base + 15052);
+  bias_16->quant_channel = 1;
+  struct csinn_conv2d_params *params_16 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_16->group = 128;
+  params_16->stride_height = 1;
+  params_16->stride_width = 1;
+  params_16->dilation_height = 1;
+  params_16->dilation_width = 1;
+  params_16->conv_extra.kernel_tm = NULL;
+  params_16->conv_extra.conv_mode = CSINN_DIRECT;
+  params_16->pad_top = 1;
+  params_16->pad_left = 1;
+  params_16->pad_down = 1;
+  params_16->pad_right = 1;
+  params_16->base.name = "conv2d_23_fuse_multiply_24_fuse_add_25";
+  csinn_conv2d_init(output_15, output_16, kernel_16, bias_16, params_16);
+  struct csinn_tensor *output_17 = csinn_alloc_tensor(sess);
+  output_17->name = "output_17";
+  output_17->dtype = CSINN_DTYPE_UINT8;
+  output_17->layout = CSINN_LAYOUT_NCHW;
+  output_17->dim[0] = 1;
+  output_17->dim[1] = 128;
+  output_17->dim[2] = 75;
+  output_17->dim[3] = 75;
+  output_17->dim_count = 4;
+  output_17->qinfo = (struct csinn_quant_info *)(params_base + 15588);
+  output_17->quant_channel = 1;
+  struct csinn_relu_params *params_17 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_17->base.name = "relu_26";
+  csinn_relu_init(output_16, output_17, params_17);
+  struct csinn_tensor *output_18 = csinn_alloc_tensor(sess);
+  output_18->name = "output_18";
+  output_18->dtype = CSINN_DTYPE_UINT8;
+  output_18->layout = CSINN_LAYOUT_NCHW;
+  output_18->dim[0] = 1;
+  output_18->dim[1] = 128;
+  output_18->dim[2] = 75;
+  output_18->dim[3] = 75;
+  output_18->dim_count = 4;
+  output_18->qinfo = (struct csinn_quant_info *)(params_base + 15612);
+  output_18->quant_channel = 1;
+  struct csinn_tensor *kernel_18 = csinn_alloc_tensor(sess);
+  kernel_18->name = "kernel_18";
+  kernel_18->data = params_base + 15660;
+  kernel_18->is_const = 1;
+  kernel_18->dtype = CSINN_DTYPE_UINT8;
+  kernel_18->layout = CSINN_LAYOUT_OIHW;
+  kernel_18->dim[0] = 128;
+  kernel_18->dim[1] = 128;
+  kernel_18->dim[2] = 1;
+  kernel_18->dim[3] = 1;
+  kernel_18->dim_count = 4;
+  kernel_18->qinfo = (struct csinn_quant_info *)(params_base + 15636);
+  kernel_18->quant_channel = 1;
+  struct csinn_tensor *bias_18 = csinn_alloc_tensor(sess);
+  bias_18->name = "bias_18";
+  bias_18->data = params_base + 32068;
+  bias_18->is_const = 1;
+  bias_18->dtype = CSINN_DTYPE_INT32;
+  bias_18->layout = CSINN_LAYOUT_O;
+  bias_18->dim[0] = 128;
+  bias_18->dim_count = 1;
+  bias_18->qinfo = (struct csinn_quant_info *)(params_base + 32044);
+  bias_18->quant_channel = 1;
+  struct csinn_conv2d_params *params_18 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_18->group = 1;
+  params_18->stride_height = 1;
+  params_18->stride_width = 1;
+  params_18->dilation_height = 1;
+  params_18->dilation_width = 1;
+  params_18->conv_extra.kernel_tm = NULL;
+  params_18->conv_extra.conv_mode = CSINN_DIRECT;
+  params_18->pad_top = 0;
+  params_18->pad_left = 0;
+  params_18->pad_down = 0;
+  params_18->pad_right = 0;
+  params_18->base.name = "conv2d_27_fuse_multiply_28_fuse_add_29";
+  csinn_conv2d_init(output_17, output_18, kernel_18, bias_18, params_18);
+  struct csinn_tensor *output_19 = csinn_alloc_tensor(sess);
+  output_19->name = "output_19";
+  output_19->dtype = CSINN_DTYPE_UINT8;
+  output_19->layout = CSINN_LAYOUT_NCHW;
+  output_19->dim[0] = 1;
+  output_19->dim[1] = 128;
+  output_19->dim[2] = 75;
+  output_19->dim[3] = 75;
+  output_19->dim_count = 4;
+  output_19->qinfo = (struct csinn_quant_info *)(params_base + 32580);
+  output_19->quant_channel = 1;
+  struct csinn_relu_params *params_19 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_19->base.name = "relu_30";
+  csinn_relu_init(output_18, output_19, params_19);
+  struct csinn_tensor *output_20 = csinn_alloc_tensor(sess);
+  output_20->name = "output_20";
+  output_20->dtype = CSINN_DTYPE_UINT8;
+  output_20->layout = CSINN_LAYOUT_NCHW;
+  output_20->dim[0] = 1;
+  output_20->dim[1] = 128;
+  output_20->dim[2] = 38;
+  output_20->dim[3] = 38;
+  output_20->dim_count = 4;
+  output_20->qinfo = (struct csinn_quant_info *)(params_base + 32604);
+  output_20->quant_channel = 1;
+  struct csinn_tensor *kernel_20 = csinn_alloc_tensor(sess);
+  kernel_20->name = "kernel_20";
+  kernel_20->data = params_base + 32652;
+  kernel_20->is_const = 1;
+  kernel_20->dtype = CSINN_DTYPE_UINT8;
+  kernel_20->layout = CSINN_LAYOUT_O1HW;
+  kernel_20->dim[0] = 128;
+  kernel_20->dim[1] = 1;
+  kernel_20->dim[2] = 3;
+  kernel_20->dim[3] = 3;
+  kernel_20->dim_count = 4;
+  kernel_20->qinfo = (struct csinn_quant_info *)(params_base + 32628);
+  kernel_20->quant_channel = 1;
+  struct csinn_tensor *bias_20 = csinn_alloc_tensor(sess);
+  bias_20->name = "bias_20";
+  bias_20->data = params_base + 33828;
+  bias_20->is_const = 1;
+  bias_20->dtype = CSINN_DTYPE_INT32;
+  bias_20->layout = CSINN_LAYOUT_O;
+  bias_20->dim[0] = 128;
+  bias_20->dim_count = 1;
+  bias_20->qinfo = (struct csinn_quant_info *)(params_base + 33804);
+  bias_20->quant_channel = 1;
+  struct csinn_conv2d_params *params_20 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_20->group = 128;
+  params_20->stride_height = 2;
+  params_20->stride_width = 2;
+  params_20->dilation_height = 1;
+  params_20->dilation_width = 1;
+  params_20->conv_extra.kernel_tm = NULL;
+  params_20->conv_extra.conv_mode = CSINN_DIRECT;
+  params_20->pad_top = 1;
+  params_20->pad_left = 1;
+  params_20->pad_down = 1;
+  params_20->pad_right = 1;
+  params_20->base.name = "conv2d_31_fuse_multiply_32_fuse_add_33";
+  csinn_conv2d_init(output_19, output_20, kernel_20, bias_20, params_20);
+  struct csinn_tensor *output_21 = csinn_alloc_tensor(sess);
+  output_21->name = "output_21";
+  output_21->dtype = CSINN_DTYPE_UINT8;
+  output_21->layout = CSINN_LAYOUT_NCHW;
+  output_21->dim[0] = 1;
+  output_21->dim[1] = 128;
+  output_21->dim[2] = 38;
+  output_21->dim[3] = 38;
+  output_21->dim_count = 4;
+  output_21->qinfo = (struct csinn_quant_info *)(params_base + 34340);
+  output_21->quant_channel = 1;
+  struct csinn_relu_params *params_21 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_21->base.name = "relu_34";
+  csinn_relu_init(output_20, output_21, params_21);
+  struct csinn_tensor *output_22 = csinn_alloc_tensor(sess);
+  output_22->name = "output_22";
+  output_22->dtype = CSINN_DTYPE_UINT8;
+  output_22->layout = CSINN_LAYOUT_NCHW;
+  output_22->dim[0] = 1;
+  output_22->dim[1] = 256;
+  output_22->dim[2] = 38;
+  output_22->dim[3] = 38;
+  output_22->dim_count = 4;
+  output_22->qinfo = (struct csinn_quant_info *)(params_base + 34364);
+  output_22->quant_channel = 1;
+  struct csinn_tensor *kernel_22 = csinn_alloc_tensor(sess);
+  kernel_22->name = "kernel_22";
+  kernel_22->data = params_base + 34412;
+  kernel_22->is_const = 1;
+  kernel_22->dtype = CSINN_DTYPE_UINT8;
+  kernel_22->layout = CSINN_LAYOUT_OIHW;
+  kernel_22->dim[0] = 256;
+  kernel_22->dim[1] = 128;
+  kernel_22->dim[2] = 1;
+  kernel_22->dim[3] = 1;
+  kernel_22->dim_count = 4;
+  kernel_22->qinfo = (struct csinn_quant_info *)(params_base + 34388);
+  kernel_22->quant_channel = 1;
+  struct csinn_tensor *bias_22 = csinn_alloc_tensor(sess);
+  bias_22->name = "bias_22";
+  bias_22->data = params_base + 67204;
+  bias_22->is_const = 1;
+  bias_22->dtype = CSINN_DTYPE_INT32;
+  bias_22->layout = CSINN_LAYOUT_O;
+  bias_22->dim[0] = 256;
+  bias_22->dim_count = 1;
+  bias_22->qinfo = (struct csinn_quant_info *)(params_base + 67180);
+  bias_22->quant_channel = 1;
+  struct csinn_conv2d_params *params_22 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_22->group = 1;
+  params_22->stride_height = 1;
+  params_22->stride_width = 1;
+  params_22->dilation_height = 1;
+  params_22->dilation_width = 1;
+  params_22->conv_extra.kernel_tm = NULL;
+  params_22->conv_extra.conv_mode = CSINN_DIRECT;
+  params_22->pad_top = 0;
+  params_22->pad_left = 0;
+  params_22->pad_down = 0;
+  params_22->pad_right = 0;
+  params_22->base.name = "conv2d_35_fuse_multiply_36_fuse_add_37";
+  csinn_conv2d_init(output_21, output_22, kernel_22, bias_22, params_22);
+  struct csinn_tensor *output_23 = csinn_alloc_tensor(sess);
+  output_23->name = "output_23";
+  output_23->dtype = CSINN_DTYPE_UINT8;
+  output_23->layout = CSINN_LAYOUT_NCHW;
+  output_23->dim[0] = 1;
+  output_23->dim[1] = 256;
+  output_23->dim[2] = 38;
+  output_23->dim[3] = 38;
+  output_23->dim_count = 4;
+  output_23->qinfo = (struct csinn_quant_info *)(params_base + 68228);
+  output_23->quant_channel = 1;
+  struct csinn_relu_params *params_23 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_23->base.name = "relu_38";
+  csinn_relu_init(output_22, output_23, params_23);
+  struct csinn_tensor *output_24 = csinn_alloc_tensor(sess);
+  output_24->name = "output_24";
+  output_24->dtype = CSINN_DTYPE_UINT8;
+  output_24->layout = CSINN_LAYOUT_NCHW;
+  output_24->dim[0] = 1;
+  output_24->dim[1] = 256;
+  output_24->dim[2] = 38;
+  output_24->dim[3] = 38;
+  output_24->dim_count = 4;
+  output_24->qinfo = (struct csinn_quant_info *)(params_base + 68252);
+  output_24->quant_channel = 1;
+  struct csinn_tensor *kernel_24 = csinn_alloc_tensor(sess);
+  kernel_24->name = "kernel_24";
+  kernel_24->data = params_base + 68300;
+  kernel_24->is_const = 1;
+  kernel_24->dtype = CSINN_DTYPE_UINT8;
+  kernel_24->layout = CSINN_LAYOUT_O1HW;
+  kernel_24->dim[0] = 256;
+  kernel_24->dim[1] = 1;
+  kernel_24->dim[2] = 3;
+  kernel_24->dim[3] = 3;
+  kernel_24->dim_count = 4;
+  kernel_24->qinfo = (struct csinn_quant_info *)(params_base + 68276);
+  kernel_24->quant_channel = 1;
+  struct csinn_tensor *bias_24 = csinn_alloc_tensor(sess);
+  bias_24->name = "bias_24";
+  bias_24->data = params_base + 70628;
+  bias_24->is_const = 1;
+  bias_24->dtype = CSINN_DTYPE_INT32;
+  bias_24->layout = CSINN_LAYOUT_O;
+  bias_24->dim[0] = 256;
+  bias_24->dim_count = 1;
+  bias_24->qinfo = (struct csinn_quant_info *)(params_base + 70604);
+  bias_24->quant_channel = 1;
+  struct csinn_conv2d_params *params_24 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_24->group = 256;
+  params_24->stride_height = 1;
+  params_24->stride_width = 1;
+  params_24->dilation_height = 1;
+  params_24->dilation_width = 1;
+  params_24->conv_extra.kernel_tm = NULL;
+  params_24->conv_extra.conv_mode = CSINN_DIRECT;
+  params_24->pad_top = 1;
+  params_24->pad_left = 1;
+  params_24->pad_down = 1;
+  params_24->pad_right = 1;
+  params_24->base.name = "conv2d_39_fuse_multiply_40_fuse_add_41";
+  csinn_conv2d_init(output_23, output_24, kernel_24, bias_24, params_24);
+  struct csinn_tensor *output_25 = csinn_alloc_tensor(sess);
+  output_25->name = "output_25";
+  output_25->dtype = CSINN_DTYPE_UINT8;
+  output_25->layout = CSINN_LAYOUT_NCHW;
+  output_25->dim[0] = 1;
+  output_25->dim[1] = 256;
+  output_25->dim[2] = 38;
+  output_25->dim[3] = 38;
+  output_25->dim_count = 4;
+  output_25->qinfo = (struct csinn_quant_info *)(params_base + 71652);
+  output_25->quant_channel = 1;
+  struct csinn_relu_params *params_25 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_25->base.name = "relu_42";
+  csinn_relu_init(output_24, output_25, params_25);
+  struct csinn_tensor *output_26 = csinn_alloc_tensor(sess);
+  output_26->name = "output_26";
+  output_26->dtype = CSINN_DTYPE_UINT8;
+  output_26->layout = CSINN_LAYOUT_NCHW;
+  output_26->dim[0] = 1;
+  output_26->dim[1] = 256;
+  output_26->dim[2] = 38;
+  output_26->dim[3] = 38;
+  output_26->dim_count = 4;
+  output_26->qinfo = (struct csinn_quant_info *)(params_base + 71676);
+  output_26->quant_channel = 1;
+  struct csinn_tensor *kernel_26 = csinn_alloc_tensor(sess);
+  kernel_26->name = "kernel_26";
+  kernel_26->data = params_base + 71724;
+  kernel_26->is_const = 1;
+  kernel_26->dtype = CSINN_DTYPE_UINT8;
+  kernel_26->layout = CSINN_LAYOUT_OIHW;
+  kernel_26->dim[0] = 256;
+  kernel_26->dim[1] = 256;
+  kernel_26->dim[2] = 1;
+  kernel_26->dim[3] = 1;
+  kernel_26->dim_count = 4;
+  kernel_26->qinfo = (struct csinn_quant_info *)(params_base + 71700);
+  kernel_26->quant_channel = 1;
+  struct csinn_tensor *bias_26 = csinn_alloc_tensor(sess);
+  bias_26->name = "bias_26";
+  bias_26->data = params_base + 137284;
+  bias_26->is_const = 1;
+  bias_26->dtype = CSINN_DTYPE_INT32;
+  bias_26->layout = CSINN_LAYOUT_O;
+  bias_26->dim[0] = 256;
+  bias_26->dim_count = 1;
+  bias_26->qinfo = (struct csinn_quant_info *)(params_base + 137260);
+  bias_26->quant_channel = 1;
+  struct csinn_conv2d_params *params_26 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_26->group = 1;
+  params_26->stride_height = 1;
+  params_26->stride_width = 1;
+  params_26->dilation_height = 1;
+  params_26->dilation_width = 1;
+  params_26->conv_extra.kernel_tm = NULL;
+  params_26->conv_extra.conv_mode = CSINN_DIRECT;
+  params_26->pad_top = 0;
+  params_26->pad_left = 0;
+  params_26->pad_down = 0;
+  params_26->pad_right = 0;
+  params_26->base.name = "conv2d_43_fuse_multiply_44_fuse_add_45";
+  csinn_conv2d_init(output_25, output_26, kernel_26, bias_26, params_26);
+  struct csinn_tensor *output_27 = csinn_alloc_tensor(sess);
+  output_27->name = "output_27";
+  output_27->dtype = CSINN_DTYPE_UINT8;
+  output_27->layout = CSINN_LAYOUT_NCHW;
+  output_27->dim[0] = 1;
+  output_27->dim[1] = 256;
+  output_27->dim[2] = 38;
+  output_27->dim[3] = 38;
+  output_27->dim_count = 4;
+  output_27->qinfo = (struct csinn_quant_info *)(params_base + 138308);
+  output_27->quant_channel = 1;
+  struct csinn_relu_params *params_27 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_27->base.name = "relu_46";
+  csinn_relu_init(output_26, output_27, params_27);
+  struct csinn_tensor *output_28 = csinn_alloc_tensor(sess);
+  output_28->name = "output_28";
+  output_28->dtype = CSINN_DTYPE_UINT8;
+  output_28->layout = CSINN_LAYOUT_NCHW;
+  output_28->dim[0] = 1;
+  output_28->dim[1] = 256;
+  output_28->dim[2] = 19;
+  output_28->dim[3] = 19;
+  output_28->dim_count = 4;
+  output_28->qinfo = (struct csinn_quant_info *)(params_base + 138332);
+  output_28->quant_channel = 1;
+  struct csinn_tensor *kernel_28 = csinn_alloc_tensor(sess);
+  kernel_28->name = "kernel_28";
+  kernel_28->data = params_base + 138380;
+  kernel_28->is_const = 1;
+  kernel_28->dtype = CSINN_DTYPE_UINT8;
+  kernel_28->layout = CSINN_LAYOUT_O1HW;
+  kernel_28->dim[0] = 256;
+  kernel_28->dim[1] = 1;
+  kernel_28->dim[2] = 3;
+  kernel_28->dim[3] = 3;
+  kernel_28->dim_count = 4;
+  kernel_28->qinfo = (struct csinn_quant_info *)(params_base + 138356);
+  kernel_28->quant_channel = 1;
+  struct csinn_tensor *bias_28 = csinn_alloc_tensor(sess);
+  bias_28->name = "bias_28";
+  bias_28->data = params_base + 140708;
+  bias_28->is_const = 1;
+  bias_28->dtype = CSINN_DTYPE_INT32;
+  bias_28->layout = CSINN_LAYOUT_O;
+  bias_28->dim[0] = 256;
+  bias_28->dim_count = 1;
+  bias_28->qinfo = (struct csinn_quant_info *)(params_base + 140684);
+  bias_28->quant_channel = 1;
+  struct csinn_conv2d_params *params_28 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_28->group = 256;
+  params_28->stride_height = 2;
+  params_28->stride_width = 2;
+  params_28->dilation_height = 1;
+  params_28->dilation_width = 1;
+  params_28->conv_extra.kernel_tm = NULL;
+  params_28->conv_extra.conv_mode = CSINN_DIRECT;
+  params_28->pad_top = 1;
+  params_28->pad_left = 1;
+  params_28->pad_down = 1;
+  params_28->pad_right = 1;
+  params_28->base.name = "conv2d_47_fuse_multiply_48_fuse_add_49";
+  csinn_conv2d_init(output_27, output_28, kernel_28, bias_28, params_28);
+  struct csinn_tensor *output_29 = csinn_alloc_tensor(sess);
+  output_29->name = "output_29";
+  output_29->dtype = CSINN_DTYPE_UINT8;
+  output_29->layout = CSINN_LAYOUT_NCHW;
+  output_29->dim[0] = 1;
+  output_29->dim[1] = 256;
+  output_29->dim[2] = 19;
+  output_29->dim[3] = 19;
+  output_29->dim_count = 4;
+  output_29->qinfo = (struct csinn_quant_info *)(params_base + 141732);
+  output_29->quant_channel = 1;
+  struct csinn_relu_params *params_29 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_29->base.name = "relu_50";
+  csinn_relu_init(output_28, output_29, params_29);
+  struct csinn_tensor *output_30 = csinn_alloc_tensor(sess);
+  output_30->name = "output_30";
+  output_30->dtype = CSINN_DTYPE_UINT8;
+  output_30->layout = CSINN_LAYOUT_NCHW;
+  output_30->dim[0] = 1;
+  output_30->dim[1] = 512;
+  output_30->dim[2] = 19;
+  output_30->dim[3] = 19;
+  output_30->dim_count = 4;
+  output_30->qinfo = (struct csinn_quant_info *)(params_base + 141756);
+  output_30->quant_channel = 1;
+  struct csinn_tensor *kernel_30 = csinn_alloc_tensor(sess);
+  kernel_30->name = "kernel_30";
+  kernel_30->data = params_base + 141804;
+  kernel_30->is_const = 1;
+  kernel_30->dtype = CSINN_DTYPE_UINT8;
+  kernel_30->layout = CSINN_LAYOUT_OIHW;
+  kernel_30->dim[0] = 512;
+  kernel_30->dim[1] = 256;
+  kernel_30->dim[2] = 1;
+  kernel_30->dim[3] = 1;
+  kernel_30->dim_count = 4;
+  kernel_30->qinfo = (struct csinn_quant_info *)(params_base + 141780);
+  kernel_30->quant_channel = 1;
+  struct csinn_tensor *bias_30 = csinn_alloc_tensor(sess);
+  bias_30->name = "bias_30";
+  bias_30->data = params_base + 272900;
+  bias_30->is_const = 1;
+  bias_30->dtype = CSINN_DTYPE_INT32;
+  bias_30->layout = CSINN_LAYOUT_O;
+  bias_30->dim[0] = 512;
+  bias_30->dim_count = 1;
+  bias_30->qinfo = (struct csinn_quant_info *)(params_base + 272876);
+  bias_30->quant_channel = 1;
+  struct csinn_conv2d_params *params_30 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_30->group = 1;
+  params_30->stride_height = 1;
+  params_30->stride_width = 1;
+  params_30->dilation_height = 1;
+  params_30->dilation_width = 1;
+  params_30->conv_extra.kernel_tm = NULL;
+  params_30->conv_extra.conv_mode = CSINN_DIRECT;
+  params_30->pad_top = 0;
+  params_30->pad_left = 0;
+  params_30->pad_down = 0;
+  params_30->pad_right = 0;
+  params_30->base.name = "conv2d_51_fuse_multiply_52_fuse_add_53";
+  csinn_conv2d_init(output_29, output_30, kernel_30, bias_30, params_30);
+  struct csinn_tensor *output_31 = csinn_alloc_tensor(sess);
+  output_31->name = "output_31";
+  output_31->dtype = CSINN_DTYPE_UINT8;
+  output_31->layout = CSINN_LAYOUT_NCHW;
+  output_31->dim[0] = 1;
+  output_31->dim[1] = 512;
+  output_31->dim[2] = 19;
+  output_31->dim[3] = 19;
+  output_31->dim_count = 4;
+  output_31->qinfo = (struct csinn_quant_info *)(params_base + 274948);
+  output_31->quant_channel = 1;
+  struct csinn_relu_params *params_31 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_31->base.name = "relu_54";
+  csinn_relu_init(output_30, output_31, params_31);
+  struct csinn_tensor *output_32 = csinn_alloc_tensor(sess);
+  output_32->name = "output_32";
+  output_32->dtype = CSINN_DTYPE_UINT8;
+  output_32->layout = CSINN_LAYOUT_NCHW;
+  output_32->dim[0] = 1;
+  output_32->dim[1] = 512;
+  output_32->dim[2] = 19;
+  output_32->dim[3] = 19;
+  output_32->dim_count = 4;
+  output_32->qinfo = (struct csinn_quant_info *)(params_base + 274972);
+  output_32->quant_channel = 1;
+  struct csinn_tensor *kernel_32 = csinn_alloc_tensor(sess);
+  kernel_32->name = "kernel_32";
+  kernel_32->data = params_base + 275020;
+  kernel_32->is_const = 1;
+  kernel_32->dtype = CSINN_DTYPE_UINT8;
+  kernel_32->layout = CSINN_LAYOUT_O1HW;
+  kernel_32->dim[0] = 512;
+  kernel_32->dim[1] = 1;
+  kernel_32->dim[2] = 3;
+  kernel_32->dim[3] = 3;
+  kernel_32->dim_count = 4;
+  kernel_32->qinfo = (struct csinn_quant_info *)(params_base + 274996);
+  kernel_32->quant_channel = 1;
+  struct csinn_tensor *bias_32 = csinn_alloc_tensor(sess);
+  bias_32->name = "bias_32";
+  bias_32->data = params_base + 279652;
+  bias_32->is_const = 1;
+  bias_32->dtype = CSINN_DTYPE_INT32;
+  bias_32->layout = CSINN_LAYOUT_O;
+  bias_32->dim[0] = 512;
+  bias_32->dim_count = 1;
+  bias_32->qinfo = (struct csinn_quant_info *)(params_base + 279628);
+  bias_32->quant_channel = 1;
+  struct csinn_conv2d_params *params_32 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_32->group = 512;
+  params_32->stride_height = 1;
+  params_32->stride_width = 1;
+  params_32->dilation_height = 1;
+  params_32->dilation_width = 1;
+  params_32->conv_extra.kernel_tm = NULL;
+  params_32->conv_extra.conv_mode = CSINN_DIRECT;
+  params_32->pad_top = 1;
+  params_32->pad_left = 1;
+  params_32->pad_down = 1;
+  params_32->pad_right = 1;
+  params_32->base.name = "conv2d_55_fuse_multiply_56_fuse_add_57";
+  csinn_conv2d_init(output_31, output_32, kernel_32, bias_32, params_32);
+  struct csinn_tensor *output_33 = csinn_alloc_tensor(sess);
+  output_33->name = "output_33";
+  output_33->dtype = CSINN_DTYPE_UINT8;
+  output_33->layout = CSINN_LAYOUT_NCHW;
+  output_33->dim[0] = 1;
+  output_33->dim[1] = 512;
+  output_33->dim[2] = 19;
+  output_33->dim[3] = 19;
+  output_33->dim_count = 4;
+  output_33->qinfo = (struct csinn_quant_info *)(params_base + 281700);
+  output_33->quant_channel = 1;
+  struct csinn_relu_params *params_33 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_33->base.name = "relu_58";
+  csinn_relu_init(output_32, output_33, params_33);
+  struct csinn_tensor *output_34 = csinn_alloc_tensor(sess);
+  output_34->name = "output_34";
+  output_34->dtype = CSINN_DTYPE_UINT8;
+  output_34->layout = CSINN_LAYOUT_NCHW;
+  output_34->dim[0] = 1;
+  output_34->dim[1] = 512;
+  output_34->dim[2] = 19;
+  output_34->dim[3] = 19;
+  output_34->dim_count = 4;
+  output_34->qinfo = (struct csinn_quant_info *)(params_base + 281724);
+  output_34->quant_channel = 1;
+  struct csinn_tensor *kernel_34 = csinn_alloc_tensor(sess);
+  kernel_34->name = "kernel_34";
+  kernel_34->data = params_base + 281772;
+  kernel_34->is_const = 1;
+  kernel_34->dtype = CSINN_DTYPE_UINT8;
+  kernel_34->layout = CSINN_LAYOUT_OIHW;
+  kernel_34->dim[0] = 512;
+  kernel_34->dim[1] = 512;
+  kernel_34->dim[2] = 1;
+  kernel_34->dim[3] = 1;
+  kernel_34->dim_count = 4;
+  kernel_34->qinfo = (struct csinn_quant_info *)(params_base + 281748);
+  kernel_34->quant_channel = 1;
+  struct csinn_tensor *bias_34 = csinn_alloc_tensor(sess);
+  bias_34->name = "bias_34";
+  bias_34->data = params_base + 543940;
+  bias_34->is_const = 1;
+  bias_34->dtype = CSINN_DTYPE_INT32;
+  bias_34->layout = CSINN_LAYOUT_O;
+  bias_34->dim[0] = 512;
+  bias_34->dim_count = 1;
+  bias_34->qinfo = (struct csinn_quant_info *)(params_base + 543916);
+  bias_34->quant_channel = 1;
+  struct csinn_conv2d_params *params_34 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_34->group = 1;
+  params_34->stride_height = 1;
+  params_34->stride_width = 1;
+  params_34->dilation_height = 1;
+  params_34->dilation_width = 1;
+  params_34->conv_extra.kernel_tm = NULL;
+  params_34->conv_extra.conv_mode = CSINN_DIRECT;
+  params_34->pad_top = 0;
+  params_34->pad_left = 0;
+  params_34->pad_down = 0;
+  params_34->pad_right = 0;
+  params_34->base.name = "conv2d_59_fuse_multiply_60_fuse_add_61";
+  csinn_conv2d_init(output_33, output_34, kernel_34, bias_34, params_34);
+  struct csinn_tensor *output_35 = csinn_alloc_tensor(sess);
+  output_35->name = "output_35";
+  output_35->dtype = CSINN_DTYPE_UINT8;
+  output_35->layout = CSINN_LAYOUT_NCHW;
+  output_35->dim[0] = 1;
+  output_35->dim[1] = 512;
+  output_35->dim[2] = 19;
+  output_35->dim[3] = 19;
+  output_35->dim_count = 4;
+  output_35->qinfo = (struct csinn_quant_info *)(params_base + 545988);
+  output_35->quant_channel = 1;
+  struct csinn_relu_params *params_35 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_35->base.name = "relu_62";
+  csinn_relu_init(output_34, output_35, params_35);
+  struct csinn_tensor *output_36 = csinn_alloc_tensor(sess);
+  output_36->name = "output_36";
+  output_36->dtype = CSINN_DTYPE_UINT8;
+  output_36->layout = CSINN_LAYOUT_NCHW;
+  output_36->dim[0] = 1;
+  output_36->dim[1] = 512;
+  output_36->dim[2] = 19;
+  output_36->dim[3] = 19;
+  output_36->dim_count = 4;
+  output_36->qinfo = (struct csinn_quant_info *)(params_base + 546012);
+  output_36->quant_channel = 1;
+  struct csinn_tensor *kernel_36 = csinn_alloc_tensor(sess);
+  kernel_36->name = "kernel_36";
+  kernel_36->data = params_base + 546060;
+  kernel_36->is_const = 1;
+  kernel_36->dtype = CSINN_DTYPE_UINT8;
+  kernel_36->layout = CSINN_LAYOUT_O1HW;
+  kernel_36->dim[0] = 512;
+  kernel_36->dim[1] = 1;
+  kernel_36->dim[2] = 3;
+  kernel_36->dim[3] = 3;
+  kernel_36->dim_count = 4;
+  kernel_36->qinfo = (struct csinn_quant_info *)(params_base + 546036);
+  kernel_36->quant_channel = 1;
+  struct csinn_tensor *bias_36 = csinn_alloc_tensor(sess);
+  bias_36->name = "bias_36";
+  bias_36->data = params_base + 550692;
+  bias_36->is_const = 1;
+  bias_36->dtype = CSINN_DTYPE_INT32;
+  bias_36->layout = CSINN_LAYOUT_O;
+  bias_36->dim[0] = 512;
+  bias_36->dim_count = 1;
+  bias_36->qinfo = (struct csinn_quant_info *)(params_base + 550668);
+  bias_36->quant_channel = 1;
+  struct csinn_conv2d_params *params_36 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_36->group = 512;
+  params_36->stride_height = 1;
+  params_36->stride_width = 1;
+  params_36->dilation_height = 1;
+  params_36->dilation_width = 1;
+  params_36->conv_extra.kernel_tm = NULL;
+  params_36->conv_extra.conv_mode = CSINN_DIRECT;
+  params_36->pad_top = 1;
+  params_36->pad_left = 1;
+  params_36->pad_down = 1;
+  params_36->pad_right = 1;
+  params_36->base.name = "conv2d_63_fuse_multiply_64_fuse_add_65";
+  csinn_conv2d_init(output_35, output_36, kernel_36, bias_36, params_36);
+  struct csinn_tensor *output_37 = csinn_alloc_tensor(sess);
+  output_37->name = "output_37";
+  output_37->dtype = CSINN_DTYPE_UINT8;
+  output_37->layout = CSINN_LAYOUT_NCHW;
+  output_37->dim[0] = 1;
+  output_37->dim[1] = 512;
+  output_37->dim[2] = 19;
+  output_37->dim[3] = 19;
+  output_37->dim_count = 4;
+  output_37->qinfo = (struct csinn_quant_info *)(params_base + 552740);
+  output_37->quant_channel = 1;
+  struct csinn_relu_params *params_37 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_37->base.name = "relu_66";
+  csinn_relu_init(output_36, output_37, params_37);
+  struct csinn_tensor *output_38 = csinn_alloc_tensor(sess);
+  output_38->name = "output_38";
+  output_38->dtype = CSINN_DTYPE_UINT8;
+  output_38->layout = CSINN_LAYOUT_NCHW;
+  output_38->dim[0] = 1;
+  output_38->dim[1] = 512;
+  output_38->dim[2] = 19;
+  output_38->dim[3] = 19;
+  output_38->dim_count = 4;
+  output_38->qinfo = (struct csinn_quant_info *)(params_base + 552764);
+  output_38->quant_channel = 1;
+  struct csinn_tensor *kernel_38 = csinn_alloc_tensor(sess);
+  kernel_38->name = "kernel_38";
+  kernel_38->data = params_base + 552812;
+  kernel_38->is_const = 1;
+  kernel_38->dtype = CSINN_DTYPE_UINT8;
+  kernel_38->layout = CSINN_LAYOUT_OIHW;
+  kernel_38->dim[0] = 512;
+  kernel_38->dim[1] = 512;
+  kernel_38->dim[2] = 1;
+  kernel_38->dim[3] = 1;
+  kernel_38->dim_count = 4;
+  kernel_38->qinfo = (struct csinn_quant_info *)(params_base + 552788);
+  kernel_38->quant_channel = 1;
+  struct csinn_tensor *bias_38 = csinn_alloc_tensor(sess);
+  bias_38->name = "bias_38";
+  bias_38->data = params_base + 814980;
+  bias_38->is_const = 1;
+  bias_38->dtype = CSINN_DTYPE_INT32;
+  bias_38->layout = CSINN_LAYOUT_O;
+  bias_38->dim[0] = 512;
+  bias_38->dim_count = 1;
+  bias_38->qinfo = (struct csinn_quant_info *)(params_base + 814956);
+  bias_38->quant_channel = 1;
+  struct csinn_conv2d_params *params_38 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_38->group = 1;
+  params_38->stride_height = 1;
+  params_38->stride_width = 1;
+  params_38->dilation_height = 1;
+  params_38->dilation_width = 1;
+  params_38->conv_extra.kernel_tm = NULL;
+  params_38->conv_extra.conv_mode = CSINN_DIRECT;
+  params_38->pad_top = 0;
+  params_38->pad_left = 0;
+  params_38->pad_down = 0;
+  params_38->pad_right = 0;
+  params_38->base.name = "conv2d_67_fuse_multiply_68_fuse_add_69";
+  csinn_conv2d_init(output_37, output_38, kernel_38, bias_38, params_38);
+  struct csinn_tensor *output_39 = csinn_alloc_tensor(sess);
+  output_39->name = "output_39";
+  output_39->dtype = CSINN_DTYPE_UINT8;
+  output_39->layout = CSINN_LAYOUT_NCHW;
+  output_39->dim[0] = 1;
+  output_39->dim[1] = 512;
+  output_39->dim[2] = 19;
+  output_39->dim[3] = 19;
+  output_39->dim_count = 4;
+  output_39->qinfo = (struct csinn_quant_info *)(params_base + 817028);
+  output_39->quant_channel = 1;
+  struct csinn_relu_params *params_39 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_39->base.name = "relu_70";
+  csinn_relu_init(output_38, output_39, params_39);
+  struct csinn_tensor *output_40 = csinn_alloc_tensor(sess);
+  output_40->name = "output_40";
+  output_40->dtype = CSINN_DTYPE_UINT8;
+  output_40->layout = CSINN_LAYOUT_NCHW;
+  output_40->dim[0] = 1;
+  output_40->dim[1] = 512;
+  output_40->dim[2] = 19;
+  output_40->dim[3] = 19;
+  output_40->dim_count = 4;
+  output_40->qinfo = (struct csinn_quant_info *)(params_base + 817052);
+  output_40->quant_channel = 1;
+  struct csinn_tensor *kernel_40 = csinn_alloc_tensor(sess);
+  kernel_40->name = "kernel_40";
+  kernel_40->data = params_base + 817100;
+  kernel_40->is_const = 1;
+  kernel_40->dtype = CSINN_DTYPE_UINT8;
+  kernel_40->layout = CSINN_LAYOUT_O1HW;
+  kernel_40->dim[0] = 512;
+  kernel_40->dim[1] = 1;
+  kernel_40->dim[2] = 3;
+  kernel_40->dim[3] = 3;
+  kernel_40->dim_count = 4;
+  kernel_40->qinfo = (struct csinn_quant_info *)(params_base + 817076);
+  kernel_40->quant_channel = 1;
+  struct csinn_tensor *bias_40 = csinn_alloc_tensor(sess);
+  bias_40->name = "bias_40";
+  bias_40->data = params_base + 821732;
+  bias_40->is_const = 1;
+  bias_40->dtype = CSINN_DTYPE_INT32;
+  bias_40->layout = CSINN_LAYOUT_O;
+  bias_40->dim[0] = 512;
+  bias_40->dim_count = 1;
+  bias_40->qinfo = (struct csinn_quant_info *)(params_base + 821708);
+  bias_40->quant_channel = 1;
+  struct csinn_conv2d_params *params_40 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_40->group = 512;
+  params_40->stride_height = 1;
+  params_40->stride_width = 1;
+  params_40->dilation_height = 1;
+  params_40->dilation_width = 1;
+  params_40->conv_extra.kernel_tm = NULL;
+  params_40->conv_extra.conv_mode = CSINN_DIRECT;
+  params_40->pad_top = 1;
+  params_40->pad_left = 1;
+  params_40->pad_down = 1;
+  params_40->pad_right = 1;
+  params_40->base.name = "conv2d_71_fuse_multiply_72_fuse_add_73";
+  csinn_conv2d_init(output_39, output_40, kernel_40, bias_40, params_40);
+  struct csinn_tensor *output_41 = csinn_alloc_tensor(sess);
+  output_41->name = "output_41";
+  output_41->dtype = CSINN_DTYPE_UINT8;
+  output_41->layout = CSINN_LAYOUT_NCHW;
+  output_41->dim[0] = 1;
+  output_41->dim[1] = 512;
+  output_41->dim[2] = 19;
+  output_41->dim[3] = 19;
+  output_41->dim_count = 4;
+  output_41->qinfo = (struct csinn_quant_info *)(params_base + 823780);
+  output_41->quant_channel = 1;
+  struct csinn_relu_params *params_41 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_41->base.name = "relu_74";
+  csinn_relu_init(output_40, output_41, params_41);
+  struct csinn_tensor *output_42 = csinn_alloc_tensor(sess);
+  output_42->name = "output_42";
+  output_42->dtype = CSINN_DTYPE_UINT8;
+  output_42->layout = CSINN_LAYOUT_NCHW;
+  output_42->dim[0] = 1;
+  output_42->dim[1] = 512;
+  output_42->dim[2] = 19;
+  output_42->dim[3] = 19;
+  output_42->dim_count = 4;
+  output_42->qinfo = (struct csinn_quant_info *)(params_base + 823804);
+  output_42->quant_channel = 1;
+  struct csinn_tensor *kernel_42 = csinn_alloc_tensor(sess);
+  kernel_42->name = "kernel_42";
+  kernel_42->data = params_base + 823852;
+  kernel_42->is_const = 1;
+  kernel_42->dtype = CSINN_DTYPE_UINT8;
+  kernel_42->layout = CSINN_LAYOUT_OIHW;
+  kernel_42->dim[0] = 512;
+  kernel_42->dim[1] = 512;
+  kernel_42->dim[2] = 1;
+  kernel_42->dim[3] = 1;
+  kernel_42->dim_count = 4;
+  kernel_42->qinfo = (struct csinn_quant_info *)(params_base + 823828);
+  kernel_42->quant_channel = 1;
+  struct csinn_tensor *bias_42 = csinn_alloc_tensor(sess);
+  bias_42->name = "bias_42";
+  bias_42->data = params_base + 1086020;
+  bias_42->is_const = 1;
+  bias_42->dtype = CSINN_DTYPE_INT32;
+  bias_42->layout = CSINN_LAYOUT_O;
+  bias_42->dim[0] = 512;
+  bias_42->dim_count = 1;
+  bias_42->qinfo = (struct csinn_quant_info *)(params_base + 1085996);
+  bias_42->quant_channel = 1;
+  struct csinn_conv2d_params *params_42 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_42->group = 1;
+  params_42->stride_height = 1;
+  params_42->stride_width = 1;
+  params_42->dilation_height = 1;
+  params_42->dilation_width = 1;
+  params_42->conv_extra.kernel_tm = NULL;
+  params_42->conv_extra.conv_mode = CSINN_DIRECT;
+  params_42->pad_top = 0;
+  params_42->pad_left = 0;
+  params_42->pad_down = 0;
+  params_42->pad_right = 0;
+  params_42->base.name = "conv2d_75_fuse_multiply_76_fuse_add_77";
+  csinn_conv2d_init(output_41, output_42, kernel_42, bias_42, params_42);
+  struct csinn_tensor *output_43 = csinn_alloc_tensor(sess);
+  output_43->name = "output_43";
+  output_43->dtype = CSINN_DTYPE_UINT8;
+  output_43->layout = CSINN_LAYOUT_NCHW;
+  output_43->dim[0] = 1;
+  output_43->dim[1] = 512;
+  output_43->dim[2] = 19;
+  output_43->dim[3] = 19;
+  output_43->dim_count = 4;
+  output_43->qinfo = (struct csinn_quant_info *)(params_base + 1088068);
+  output_43->quant_channel = 1;
+  struct csinn_relu_params *params_43 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_43->base.name = "relu_78";
+  csinn_relu_init(output_42, output_43, params_43);
+  struct csinn_tensor *output_44 = csinn_alloc_tensor(sess);
+  output_44->name = "output_44";
+  output_44->dtype = CSINN_DTYPE_UINT8;
+  output_44->layout = CSINN_LAYOUT_NCHW;
+  output_44->dim[0] = 1;
+  output_44->dim[1] = 512;
+  output_44->dim[2] = 19;
+  output_44->dim[3] = 19;
+  output_44->dim_count = 4;
+  output_44->qinfo = (struct csinn_quant_info *)(params_base + 1088092);
+  output_44->quant_channel = 1;
+  struct csinn_tensor *kernel_44 = csinn_alloc_tensor(sess);
+  kernel_44->name = "kernel_44";
+  kernel_44->data = params_base + 1088140;
+  kernel_44->is_const = 1;
+  kernel_44->dtype = CSINN_DTYPE_UINT8;
+  kernel_44->layout = CSINN_LAYOUT_O1HW;
+  kernel_44->dim[0] = 512;
+  kernel_44->dim[1] = 1;
+  kernel_44->dim[2] = 3;
+  kernel_44->dim[3] = 3;
+  kernel_44->dim_count = 4;
+  kernel_44->qinfo = (struct csinn_quant_info *)(params_base + 1088116);
+  kernel_44->quant_channel = 1;
+  struct csinn_tensor *bias_44 = csinn_alloc_tensor(sess);
+  bias_44->name = "bias_44";
+  bias_44->data = params_base + 1092772;
+  bias_44->is_const = 1;
+  bias_44->dtype = CSINN_DTYPE_INT32;
+  bias_44->layout = CSINN_LAYOUT_O;
+  bias_44->dim[0] = 512;
+  bias_44->dim_count = 1;
+  bias_44->qinfo = (struct csinn_quant_info *)(params_base + 1092748);
+  bias_44->quant_channel = 1;
+  struct csinn_conv2d_params *params_44 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_44->group = 512;
+  params_44->stride_height = 1;
+  params_44->stride_width = 1;
+  params_44->dilation_height = 1;
+  params_44->dilation_width = 1;
+  params_44->conv_extra.kernel_tm = NULL;
+  params_44->conv_extra.conv_mode = CSINN_DIRECT;
+  params_44->pad_top = 1;
+  params_44->pad_left = 1;
+  params_44->pad_down = 1;
+  params_44->pad_right = 1;
+  params_44->base.name = "conv2d_79_fuse_multiply_80_fuse_add_81";
+  csinn_conv2d_init(output_43, output_44, kernel_44, bias_44, params_44);
+  struct csinn_tensor *output_45 = csinn_alloc_tensor(sess);
+  output_45->name = "output_45";
+  output_45->dtype = CSINN_DTYPE_UINT8;
+  output_45->layout = CSINN_LAYOUT_NCHW;
+  output_45->dim[0] = 1;
+  output_45->dim[1] = 512;
+  output_45->dim[2] = 19;
+  output_45->dim[3] = 19;
+  output_45->dim_count = 4;
+  output_45->qinfo = (struct csinn_quant_info *)(params_base + 1094820);
+  output_45->quant_channel = 1;
+  struct csinn_relu_params *params_45 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_45->base.name = "relu_82";
+  csinn_relu_init(output_44, output_45, params_45);
+  struct csinn_tensor *output_46 = csinn_alloc_tensor(sess);
+  output_46->name = "output_46";
+  output_46->dtype = CSINN_DTYPE_UINT8;
+  output_46->layout = CSINN_LAYOUT_NCHW;
+  output_46->dim[0] = 1;
+  output_46->dim[1] = 512;
+  output_46->dim[2] = 19;
+  output_46->dim[3] = 19;
+  output_46->dim_count = 4;
+  output_46->qinfo = (struct csinn_quant_info *)(params_base + 1094844);
+  output_46->quant_channel = 1;
+  struct csinn_tensor *kernel_46 = csinn_alloc_tensor(sess);
+  kernel_46->name = "kernel_46";
+  kernel_46->data = params_base + 1094892;
+  kernel_46->is_const = 1;
+  kernel_46->dtype = CSINN_DTYPE_UINT8;
+  kernel_46->layout = CSINN_LAYOUT_OIHW;
+  kernel_46->dim[0] = 512;
+  kernel_46->dim[1] = 512;
+  kernel_46->dim[2] = 1;
+  kernel_46->dim[3] = 1;
+  kernel_46->dim_count = 4;
+  kernel_46->qinfo = (struct csinn_quant_info *)(params_base + 1094868);
+  kernel_46->quant_channel = 1;
+  struct csinn_tensor *bias_46 = csinn_alloc_tensor(sess);
+  bias_46->name = "bias_46";
+  bias_46->data = params_base + 1357060;
+  bias_46->is_const = 1;
+  bias_46->dtype = CSINN_DTYPE_INT32;
+  bias_46->layout = CSINN_LAYOUT_O;
+  bias_46->dim[0] = 512;
+  bias_46->dim_count = 1;
+  bias_46->qinfo = (struct csinn_quant_info *)(params_base + 1357036);
+  bias_46->quant_channel = 1;
+  struct csinn_conv2d_params *params_46 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_46->group = 1;
+  params_46->stride_height = 1;
+  params_46->stride_width = 1;
+  params_46->dilation_height = 1;
+  params_46->dilation_width = 1;
+  params_46->conv_extra.kernel_tm = NULL;
+  params_46->conv_extra.conv_mode = CSINN_DIRECT;
+  params_46->pad_top = 0;
+  params_46->pad_left = 0;
+  params_46->pad_down = 0;
+  params_46->pad_right = 0;
+  params_46->base.name = "conv2d_83_fuse_multiply_84_fuse_add_85";
+  csinn_conv2d_init(output_45, output_46, kernel_46, bias_46, params_46);
+  struct csinn_tensor *output_47 = csinn_alloc_tensor(sess);
+  output_47->name = "output_47";
+  output_47->dtype = CSINN_DTYPE_UINT8;
+  output_47->layout = CSINN_LAYOUT_NCHW;
+  output_47->dim[0] = 1;
+  output_47->dim[1] = 512;
+  output_47->dim[2] = 19;
+  output_47->dim[3] = 19;
+  output_47->dim_count = 4;
+  output_47->qinfo = (struct csinn_quant_info *)(params_base + 1359108);
+  output_47->quant_channel = 1;
+  struct csinn_relu_params *params_47 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_47->base.name = "relu_86";
+  csinn_relu_init(output_46, output_47, params_47);
+  struct csinn_tensor *output_48 = csinn_alloc_tensor(sess);
+  output_48->name = "output_48";
+  output_48->dtype = CSINN_DTYPE_UINT8;
+  output_48->layout = CSINN_LAYOUT_NCHW;
+  output_48->dim[0] = 1;
+  output_48->dim[1] = 512;
+  output_48->dim[2] = 19;
+  output_48->dim[3] = 19;
+  output_48->dim_count = 4;
+  output_48->qinfo = (struct csinn_quant_info *)(params_base + 1359132);
+  output_48->quant_channel = 1;
+  struct csinn_tensor *kernel_48 = csinn_alloc_tensor(sess);
+  kernel_48->name = "kernel_48";
+  kernel_48->data = params_base + 1359180;
+  kernel_48->is_const = 1;
+  kernel_48->dtype = CSINN_DTYPE_UINT8;
+  kernel_48->layout = CSINN_LAYOUT_O1HW;
+  kernel_48->dim[0] = 512;
+  kernel_48->dim[1] = 1;
+  kernel_48->dim[2] = 3;
+  kernel_48->dim[3] = 3;
+  kernel_48->dim_count = 4;
+  kernel_48->qinfo = (struct csinn_quant_info *)(params_base + 1359156);
+  kernel_48->quant_channel = 1;
+  struct csinn_tensor *bias_48 = csinn_alloc_tensor(sess);
+  bias_48->name = "bias_48";
+  bias_48->data = params_base + 1363812;
+  bias_48->is_const = 1;
+  bias_48->dtype = CSINN_DTYPE_INT32;
+  bias_48->layout = CSINN_LAYOUT_O;
+  bias_48->dim[0] = 512;
+  bias_48->dim_count = 1;
+  bias_48->qinfo = (struct csinn_quant_info *)(params_base + 1363788);
+  bias_48->quant_channel = 1;
+  struct csinn_conv2d_params *params_48 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_48->group = 512;
+  params_48->stride_height = 1;
+  params_48->stride_width = 1;
+  params_48->dilation_height = 1;
+  params_48->dilation_width = 1;
+  params_48->conv_extra.kernel_tm = NULL;
+  params_48->conv_extra.conv_mode = CSINN_DIRECT;
+  params_48->pad_top = 1;
+  params_48->pad_left = 1;
+  params_48->pad_down = 1;
+  params_48->pad_right = 1;
+  params_48->base.name = "conv2d_87_fuse_multiply_88_fuse_add_89";
+  csinn_conv2d_init(output_47, output_48, kernel_48, bias_48, params_48);
+  struct csinn_tensor *output_49 = csinn_alloc_tensor(sess);
+  output_49->name = "output_49";
+  output_49->dtype = CSINN_DTYPE_UINT8;
+  output_49->layout = CSINN_LAYOUT_NCHW;
+  output_49->dim[0] = 1;
+  output_49->dim[1] = 512;
+  output_49->dim[2] = 19;
+  output_49->dim[3] = 19;
+  output_49->dim_count = 4;
+  output_49->qinfo = (struct csinn_quant_info *)(params_base + 1365860);
+  output_49->quant_channel = 1;
+  struct csinn_relu_params *params_49 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_49->base.name = "relu_90";
+  csinn_relu_init(output_48, output_49, params_49);
+  struct csinn_tensor *output_50 = csinn_alloc_tensor(sess);
+  output_50->name = "output_50";
+  output_50->dtype = CSINN_DTYPE_UINT8;
+  output_50->layout = CSINN_LAYOUT_NCHW;
+  output_50->dim[0] = 1;
+  output_50->dim[1] = 512;
+  output_50->dim[2] = 19;
+  output_50->dim[3] = 19;
+  output_50->dim_count = 4;
+  output_50->qinfo = (struct csinn_quant_info *)(params_base + 1365884);
+  output_50->quant_channel = 1;
+  struct csinn_tensor *kernel_50 = csinn_alloc_tensor(sess);
+  kernel_50->name = "kernel_50";
+  kernel_50->data = params_base + 1365932;
+  kernel_50->is_const = 1;
+  kernel_50->dtype = CSINN_DTYPE_UINT8;
+  kernel_50->layout = CSINN_LAYOUT_OIHW;
+  kernel_50->dim[0] = 512;
+  kernel_50->dim[1] = 512;
+  kernel_50->dim[2] = 1;
+  kernel_50->dim[3] = 1;
+  kernel_50->dim_count = 4;
+  kernel_50->qinfo = (struct csinn_quant_info *)(params_base + 1365908);
+  kernel_50->quant_channel = 1;
+  struct csinn_tensor *bias_50 = csinn_alloc_tensor(sess);
+  bias_50->name = "bias_50";
+  bias_50->data = params_base + 1628100;
+  bias_50->is_const = 1;
+  bias_50->dtype = CSINN_DTYPE_INT32;
+  bias_50->layout = CSINN_LAYOUT_O;
+  bias_50->dim[0] = 512;
+  bias_50->dim_count = 1;
+  bias_50->qinfo = (struct csinn_quant_info *)(params_base + 1628076);
+  bias_50->quant_channel = 1;
+  struct csinn_conv2d_params *params_50 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_50->group = 1;
+  params_50->stride_height = 1;
+  params_50->stride_width = 1;
+  params_50->dilation_height = 1;
+  params_50->dilation_width = 1;
+  params_50->conv_extra.kernel_tm = NULL;
+  params_50->conv_extra.conv_mode = CSINN_DIRECT;
+  params_50->pad_top = 0;
+  params_50->pad_left = 0;
+  params_50->pad_down = 0;
+  params_50->pad_right = 0;
+  params_50->base.name = "conv2d_91_fuse_multiply_92_fuse_add_93";
+  csinn_conv2d_init(output_49, output_50, kernel_50, bias_50, params_50);
+  struct csinn_tensor *output_51 = csinn_alloc_tensor(sess);
+  output_51->name = "output_51";
+  output_51->dtype = CSINN_DTYPE_UINT8;
+  output_51->layout = CSINN_LAYOUT_NCHW;
+  output_51->dim[0] = 1;
+  output_51->dim[1] = 512;
+  output_51->dim[2] = 19;
+  output_51->dim[3] = 19;
+  output_51->dim_count = 4;
+  output_51->qinfo = (struct csinn_quant_info *)(params_base + 1630148);
+  output_51->quant_channel = 1;
+  struct csinn_relu_params *params_51 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_51->base.name = "relu_94";
+  csinn_relu_init(output_50, output_51, params_51);
+  struct csinn_tensor *output_52 = csinn_alloc_tensor(sess);
+  output_52->name = "output_52";
+  output_52->dtype = CSINN_DTYPE_UINT8;
+  output_52->layout = CSINN_LAYOUT_NCHW;
+  output_52->dim[0] = 1;
+  output_52->dim[1] = 12;
+  output_52->dim[2] = 19;
+  output_52->dim[3] = 19;
+  output_52->dim_count = 4;
+  output_52->qinfo = (struct csinn_quant_info *)(params_base + 1630172);
+  output_52->quant_channel = 1;
+  struct csinn_tensor *kernel_52 = csinn_alloc_tensor(sess);
+  kernel_52->name = "kernel_52";
+  kernel_52->data = params_base + 1630220;
+  kernel_52->is_const = 1;
+  kernel_52->dtype = CSINN_DTYPE_UINT8;
+  kernel_52->layout = CSINN_LAYOUT_OIHW;
+  kernel_52->dim[0] = 12;
+  kernel_52->dim[1] = 512;
+  kernel_52->dim[2] = 1;
+  kernel_52->dim[3] = 1;
+  kernel_52->dim_count = 4;
+  kernel_52->qinfo = (struct csinn_quant_info *)(params_base + 1630196);
+  kernel_52->quant_channel = 1;
+  struct csinn_tensor *bias_52 = csinn_alloc_tensor(sess);
+  bias_52->name = "bias_52";
+  bias_52->data = params_base + 1636388;
+  bias_52->is_const = 1;
+  bias_52->dtype = CSINN_DTYPE_INT32;
+  bias_52->layout = CSINN_LAYOUT_O;
+  bias_52->dim[0] = 12;
+  bias_52->dim_count = 1;
+  bias_52->qinfo = (struct csinn_quant_info *)(params_base + 1636364);
+  bias_52->quant_channel = 1;
+  struct csinn_conv2d_params *params_52 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_52->group = 1;
+  params_52->stride_height = 1;
+  params_52->stride_width = 1;
+  params_52->dilation_height = 1;
+  params_52->dilation_width = 1;
+  params_52->conv_extra.kernel_tm = NULL;
+  params_52->conv_extra.conv_mode = CSINN_DIRECT;
+  params_52->pad_top = 0;
+  params_52->pad_left = 0;
+  params_52->pad_down = 0;
+  params_52->pad_right = 0;
+  params_52->base.name = "conv2d_95_fuse_bias_add_96";
+  csinn_conv2d_init(output_51, output_52, kernel_52, bias_52, params_52);
+  int32_t *permute_53 = malloc(4 * 4);
+  permute_53[0] = 0;
+  permute_53[1] = 2;
+  permute_53[2] = 3;
+  permute_53[3] = 1;
+  struct csinn_tensor *output_53 = csinn_alloc_tensor(sess);
+  output_53->name = "output_53";
+  output_53->dtype = CSINN_DTYPE_UINT8;
+  output_53->layout = CSINN_LAYOUT_NCHW;
+  output_53->dim[0] = 1;
+  output_53->dim[1] = 19;
+  output_53->dim[2] = 19;
+  output_53->dim[3] = 12;
+  output_53->dim_count = 4;
+  output_53->qinfo = (struct csinn_quant_info *)(params_base + 1636436);
+  output_53->quant_channel = 1;
+  struct csinn_transpose_params *params_53 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_53->permute = permute_53;
+  params_53->permute_num = 4;
+  params_53->base.name = "transpose_97";
+  csinn_transpose_init(output_52, output_53, params_53);
+  int32_t *shape_54 = malloc(2 * 4);
+  shape_54[0] = 1;
+  shape_54[1] = 4332;
+  struct csinn_tensor *output_54 = csinn_alloc_tensor(sess);
+  output_54->name = "output_54";
+  output_54->dtype = CSINN_DTYPE_UINT8;
+  output_54->layout = CSINN_LAYOUT_NC;
+  output_54->dim[0] = 1;
+  output_54->dim[1] = 4332;
+  output_54->dim_count = 2;
+  output_54->qinfo = (struct csinn_quant_info *)(params_base + 1636460);
+  output_54->quant_channel = 1;
+  struct csinn_reshape_params *params_54 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_54->shape = shape_54;
+  params_54->shape_num = 2;
+  params_54->base.name = "batch_flatten_98";
+  csinn_reshape_init(output_53, output_54, params_54);
+  struct csinn_tensor *output_56 = csinn_alloc_tensor(sess);
+  output_56->name = "output_56";
+  output_56->dtype = CSINN_DTYPE_UINT8;
+  output_56->layout = CSINN_LAYOUT_NC;
+  output_56->dim[0] = 1;
+  output_56->dim[1] = 4332;
+  output_56->dim_count = 2;
+  output_56->qinfo = (struct csinn_quant_info *)(params_base + 1636484);
+  output_56->quant_channel = 1;
+  struct csinn_tensor *rhs_56 = csinn_alloc_tensor(sess);
+  rhs_56->name = "rhs_56";
+  rhs_56->data = params_base + 1636532;
+  rhs_56->is_const = 1;
+  rhs_56->dtype = CSINN_DTYPE_UINT8;
+  rhs_56->layout = CSINN_LAYOUT_OI;
+  rhs_56->dim[0] = 1;
+  rhs_56->dim[1] = 4332;
+  rhs_56->dim_count = 2;
+  rhs_56->qinfo = (struct csinn_quant_info *)(params_base + 1636508);
+  rhs_56->quant_channel = 1;
+  struct csinn_diso_params *params_56 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_56->base.name = "mul_167";
+  csinn_mul_init(output_54, rhs_56, output_56, params_56);
+  struct csinn_tensor *output_58 = csinn_alloc_tensor(sess);
+  output_58->name = "output_58";
+  output_58->dtype = CSINN_DTYPE_UINT8;
+  output_58->layout = CSINN_LAYOUT_NCHW;
+  output_58->dim[0] = 1;
+  output_58->dim[1] = 512;
+  output_58->dim[2] = 10;
+  output_58->dim[3] = 10;
+  output_58->dim_count = 4;
+  output_58->qinfo = (struct csinn_quant_info *)(params_base + 1640864);
+  output_58->quant_channel = 1;
+  struct csinn_tensor *kernel_58 = csinn_alloc_tensor(sess);
+  kernel_58->name = "kernel_58";
+  kernel_58->data = params_base + 1640912;
+  kernel_58->is_const = 1;
+  kernel_58->dtype = CSINN_DTYPE_UINT8;
+  kernel_58->layout = CSINN_LAYOUT_O1HW;
+  kernel_58->dim[0] = 512;
+  kernel_58->dim[1] = 1;
+  kernel_58->dim[2] = 3;
+  kernel_58->dim[3] = 3;
+  kernel_58->dim_count = 4;
+  kernel_58->qinfo = (struct csinn_quant_info *)(params_base + 1640888);
+  kernel_58->quant_channel = 1;
+  struct csinn_tensor *bias_58 = csinn_alloc_tensor(sess);
+  bias_58->name = "bias_58";
+  bias_58->data = params_base + 1645544;
+  bias_58->is_const = 1;
+  bias_58->dtype = CSINN_DTYPE_INT32;
+  bias_58->layout = CSINN_LAYOUT_O;
+  bias_58->dim[0] = 512;
+  bias_58->dim_count = 1;
+  bias_58->qinfo = (struct csinn_quant_info *)(params_base + 1645520);
+  bias_58->quant_channel = 1;
+  struct csinn_conv2d_params *params_58 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_58->group = 512;
+  params_58->stride_height = 2;
+  params_58->stride_width = 2;
+  params_58->dilation_height = 1;
+  params_58->dilation_width = 1;
+  params_58->conv_extra.kernel_tm = NULL;
+  params_58->conv_extra.conv_mode = CSINN_DIRECT;
+  params_58->pad_top = 1;
+  params_58->pad_left = 1;
+  params_58->pad_down = 1;
+  params_58->pad_right = 1;
+  params_58->base.name = "conv2d_99_fuse_multiply_100_fuse_add_101";
+  csinn_conv2d_init(output_51, output_58, kernel_58, bias_58, params_58);
+  struct csinn_tensor *output_59 = csinn_alloc_tensor(sess);
+  output_59->name = "output_59";
+  output_59->dtype = CSINN_DTYPE_UINT8;
+  output_59->layout = CSINN_LAYOUT_NCHW;
+  output_59->dim[0] = 1;
+  output_59->dim[1] = 512;
+  output_59->dim[2] = 10;
+  output_59->dim[3] = 10;
+  output_59->dim_count = 4;
+  output_59->qinfo = (struct csinn_quant_info *)(params_base + 1647592);
+  output_59->quant_channel = 1;
+  struct csinn_relu_params *params_59 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_59->base.name = "relu_102";
+  csinn_relu_init(output_58, output_59, params_59);
+  struct csinn_tensor *output_60 = csinn_alloc_tensor(sess);
+  output_60->name = "output_60";
+  output_60->dtype = CSINN_DTYPE_UINT8;
+  output_60->layout = CSINN_LAYOUT_NCHW;
+  output_60->dim[0] = 1;
+  output_60->dim[1] = 1024;
+  output_60->dim[2] = 10;
+  output_60->dim[3] = 10;
+  output_60->dim_count = 4;
+  output_60->qinfo = (struct csinn_quant_info *)(params_base + 1647616);
+  output_60->quant_channel = 1;
+  struct csinn_tensor *kernel_60 = csinn_alloc_tensor(sess);
+  kernel_60->name = "kernel_60";
+  kernel_60->data = params_base + 1647664;
+  kernel_60->is_const = 1;
+  kernel_60->dtype = CSINN_DTYPE_UINT8;
+  kernel_60->layout = CSINN_LAYOUT_OIHW;
+  kernel_60->dim[0] = 1024;
+  kernel_60->dim[1] = 512;
+  kernel_60->dim[2] = 1;
+  kernel_60->dim[3] = 1;
+  kernel_60->dim_count = 4;
+  kernel_60->qinfo = (struct csinn_quant_info *)(params_base + 1647640);
+  kernel_60->quant_channel = 1;
+  struct csinn_tensor *bias_60 = csinn_alloc_tensor(sess);
+  bias_60->name = "bias_60";
+  bias_60->data = params_base + 2171976;
+  bias_60->is_const = 1;
+  bias_60->dtype = CSINN_DTYPE_INT32;
+  bias_60->layout = CSINN_LAYOUT_O;
+  bias_60->dim[0] = 1024;
+  bias_60->dim_count = 1;
+  bias_60->qinfo = (struct csinn_quant_info *)(params_base + 2171952);
+  bias_60->quant_channel = 1;
+  struct csinn_conv2d_params *params_60 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_60->group = 1;
+  params_60->stride_height = 1;
+  params_60->stride_width = 1;
+  params_60->dilation_height = 1;
+  params_60->dilation_width = 1;
+  params_60->conv_extra.kernel_tm = NULL;
+  params_60->conv_extra.conv_mode = CSINN_DIRECT;
+  params_60->pad_top = 0;
+  params_60->pad_left = 0;
+  params_60->pad_down = 0;
+  params_60->pad_right = 0;
+  params_60->base.name = "conv2d_103_fuse_multiply_104_fuse_add_105";
+  csinn_conv2d_init(output_59, output_60, kernel_60, bias_60, params_60);
+  struct csinn_tensor *output_61 = csinn_alloc_tensor(sess);
+  output_61->name = "output_61";
+  output_61->dtype = CSINN_DTYPE_UINT8;
+  output_61->layout = CSINN_LAYOUT_NCHW;
+  output_61->dim[0] = 1;
+  output_61->dim[1] = 1024;
+  output_61->dim[2] = 10;
+  output_61->dim[3] = 10;
+  output_61->dim_count = 4;
+  output_61->qinfo = (struct csinn_quant_info *)(params_base + 2176072);
+  output_61->quant_channel = 1;
+  struct csinn_relu_params *params_61 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_61->base.name = "relu_106";
+  csinn_relu_init(output_60, output_61, params_61);
+  struct csinn_tensor *output_62 = csinn_alloc_tensor(sess);
+  output_62->name = "output_62";
+  output_62->dtype = CSINN_DTYPE_UINT8;
+  output_62->layout = CSINN_LAYOUT_NCHW;
+  output_62->dim[0] = 1;
+  output_62->dim[1] = 1024;
+  output_62->dim[2] = 10;
+  output_62->dim[3] = 10;
+  output_62->dim_count = 4;
+  output_62->qinfo = (struct csinn_quant_info *)(params_base + 2176096);
+  output_62->quant_channel = 1;
+  struct csinn_tensor *kernel_62 = csinn_alloc_tensor(sess);
+  kernel_62->name = "kernel_62";
+  kernel_62->data = params_base + 2176144;
+  kernel_62->is_const = 1;
+  kernel_62->dtype = CSINN_DTYPE_UINT8;
+  kernel_62->layout = CSINN_LAYOUT_O1HW;
+  kernel_62->dim[0] = 1024;
+  kernel_62->dim[1] = 1;
+  kernel_62->dim[2] = 3;
+  kernel_62->dim[3] = 3;
+  kernel_62->dim_count = 4;
+  kernel_62->qinfo = (struct csinn_quant_info *)(params_base + 2176120);
+  kernel_62->quant_channel = 1;
+  struct csinn_tensor *bias_62 = csinn_alloc_tensor(sess);
+  bias_62->name = "bias_62";
+  bias_62->data = params_base + 2185384;
+  bias_62->is_const = 1;
+  bias_62->dtype = CSINN_DTYPE_INT32;
+  bias_62->layout = CSINN_LAYOUT_O;
+  bias_62->dim[0] = 1024;
+  bias_62->dim_count = 1;
+  bias_62->qinfo = (struct csinn_quant_info *)(params_base + 2185360);
+  bias_62->quant_channel = 1;
+  struct csinn_conv2d_params *params_62 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_62->group = 1024;
+  params_62->stride_height = 1;
+  params_62->stride_width = 1;
+  params_62->dilation_height = 1;
+  params_62->dilation_width = 1;
+  params_62->conv_extra.kernel_tm = NULL;
+  params_62->conv_extra.conv_mode = CSINN_DIRECT;
+  params_62->pad_top = 1;
+  params_62->pad_left = 1;
+  params_62->pad_down = 1;
+  params_62->pad_right = 1;
+  params_62->base.name = "conv2d_107_fuse_multiply_108_fuse_add_109";
+  csinn_conv2d_init(output_61, output_62, kernel_62, bias_62, params_62);
+  struct csinn_tensor *output_63 = csinn_alloc_tensor(sess);
+  output_63->name = "output_63";
+  output_63->dtype = CSINN_DTYPE_UINT8;
+  output_63->layout = CSINN_LAYOUT_NCHW;
+  output_63->dim[0] = 1;
+  output_63->dim[1] = 1024;
+  output_63->dim[2] = 10;
+  output_63->dim[3] = 10;
+  output_63->dim_count = 4;
+  output_63->qinfo = (struct csinn_quant_info *)(params_base + 2189480);
+  output_63->quant_channel = 1;
+  struct csinn_relu_params *params_63 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_63->base.name = "relu_110";
+  csinn_relu_init(output_62, output_63, params_63);
+  struct csinn_tensor *output_64 = csinn_alloc_tensor(sess);
+  output_64->name = "output_64";
+  output_64->dtype = CSINN_DTYPE_UINT8;
+  output_64->layout = CSINN_LAYOUT_NCHW;
+  output_64->dim[0] = 1;
+  output_64->dim[1] = 1024;
+  output_64->dim[2] = 10;
+  output_64->dim[3] = 10;
+  output_64->dim_count = 4;
+  output_64->qinfo = (struct csinn_quant_info *)(params_base + 2189504);
+  output_64->quant_channel = 1;
+  struct csinn_tensor *kernel_64 = csinn_alloc_tensor(sess);
+  kernel_64->name = "kernel_64";
+  kernel_64->data = params_base + 2189552;
+  kernel_64->is_const = 1;
+  kernel_64->dtype = CSINN_DTYPE_UINT8;
+  kernel_64->layout = CSINN_LAYOUT_OIHW;
+  kernel_64->dim[0] = 1024;
+  kernel_64->dim[1] = 1024;
+  kernel_64->dim[2] = 1;
+  kernel_64->dim[3] = 1;
+  kernel_64->dim_count = 4;
+  kernel_64->qinfo = (struct csinn_quant_info *)(params_base + 2189528);
+  kernel_64->quant_channel = 1;
+  struct csinn_tensor *bias_64 = csinn_alloc_tensor(sess);
+  bias_64->name = "bias_64";
+  bias_64->data = params_base + 3238152;
+  bias_64->is_const = 1;
+  bias_64->dtype = CSINN_DTYPE_INT32;
+  bias_64->layout = CSINN_LAYOUT_O;
+  bias_64->dim[0] = 1024;
+  bias_64->dim_count = 1;
+  bias_64->qinfo = (struct csinn_quant_info *)(params_base + 3238128);
+  bias_64->quant_channel = 1;
+  struct csinn_conv2d_params *params_64 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_64->group = 1;
+  params_64->stride_height = 1;
+  params_64->stride_width = 1;
+  params_64->dilation_height = 1;
+  params_64->dilation_width = 1;
+  params_64->conv_extra.kernel_tm = NULL;
+  params_64->conv_extra.conv_mode = CSINN_DIRECT;
+  params_64->pad_top = 0;
+  params_64->pad_left = 0;
+  params_64->pad_down = 0;
+  params_64->pad_right = 0;
+  params_64->base.name = "conv2d_111_fuse_multiply_112_fuse_add_113";
+  csinn_conv2d_init(output_63, output_64, kernel_64, bias_64, params_64);
+  struct csinn_tensor *output_65 = csinn_alloc_tensor(sess);
+  output_65->name = "output_65";
+  output_65->dtype = CSINN_DTYPE_UINT8;
+  output_65->layout = CSINN_LAYOUT_NCHW;
+  output_65->dim[0] = 1;
+  output_65->dim[1] = 1024;
+  output_65->dim[2] = 10;
+  output_65->dim[3] = 10;
+  output_65->dim_count = 4;
+  output_65->qinfo = (struct csinn_quant_info *)(params_base + 3242248);
+  output_65->quant_channel = 1;
+  struct csinn_relu_params *params_65 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_65->base.name = "relu_114";
+  csinn_relu_init(output_64, output_65, params_65);
+  struct csinn_tensor *output_66 = csinn_alloc_tensor(sess);
+  output_66->name = "output_66";
+  output_66->dtype = CSINN_DTYPE_UINT8;
+  output_66->layout = CSINN_LAYOUT_NCHW;
+  output_66->dim[0] = 1;
+  output_66->dim[1] = 24;
+  output_66->dim[2] = 10;
+  output_66->dim[3] = 10;
+  output_66->dim_count = 4;
+  output_66->qinfo = (struct csinn_quant_info *)(params_base + 3242272);
+  output_66->quant_channel = 1;
+  struct csinn_tensor *kernel_66 = csinn_alloc_tensor(sess);
+  kernel_66->name = "kernel_66";
+  kernel_66->data = params_base + 3242320;
+  kernel_66->is_const = 1;
+  kernel_66->dtype = CSINN_DTYPE_UINT8;
+  kernel_66->layout = CSINN_LAYOUT_OIHW;
+  kernel_66->dim[0] = 24;
+  kernel_66->dim[1] = 1024;
+  kernel_66->dim[2] = 1;
+  kernel_66->dim[3] = 1;
+  kernel_66->dim_count = 4;
+  kernel_66->qinfo = (struct csinn_quant_info *)(params_base + 3242296);
+  kernel_66->quant_channel = 1;
+  struct csinn_tensor *bias_66 = csinn_alloc_tensor(sess);
+  bias_66->name = "bias_66";
+  bias_66->data = params_base + 3266920;
+  bias_66->is_const = 1;
+  bias_66->dtype = CSINN_DTYPE_INT32;
+  bias_66->layout = CSINN_LAYOUT_O;
+  bias_66->dim[0] = 24;
+  bias_66->dim_count = 1;
+  bias_66->qinfo = (struct csinn_quant_info *)(params_base + 3266896);
+  bias_66->quant_channel = 1;
+  struct csinn_conv2d_params *params_66 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_66->group = 1;
+  params_66->stride_height = 1;
+  params_66->stride_width = 1;
+  params_66->dilation_height = 1;
+  params_66->dilation_width = 1;
+  params_66->conv_extra.kernel_tm = NULL;
+  params_66->conv_extra.conv_mode = CSINN_DIRECT;
+  params_66->pad_top = 0;
+  params_66->pad_left = 0;
+  params_66->pad_down = 0;
+  params_66->pad_right = 0;
+  params_66->base.name = "conv2d_115_fuse_bias_add_116";
+  csinn_conv2d_init(output_65, output_66, kernel_66, bias_66, params_66);
+  int32_t *permute_67 = malloc(4 * 4);
+  permute_67[0] = 0;
+  permute_67[1] = 2;
+  permute_67[2] = 3;
+  permute_67[3] = 1;
+  struct csinn_tensor *output_67 = csinn_alloc_tensor(sess);
+  output_67->name = "output_67";
+  output_67->dtype = CSINN_DTYPE_UINT8;
+  output_67->layout = CSINN_LAYOUT_NCHW;
+  output_67->dim[0] = 1;
+  output_67->dim[1] = 10;
+  output_67->dim[2] = 10;
+  output_67->dim[3] = 24;
+  output_67->dim_count = 4;
+  output_67->qinfo = (struct csinn_quant_info *)(params_base + 3267016);
+  output_67->quant_channel = 1;
+  struct csinn_transpose_params *params_67 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_67->permute = permute_67;
+  params_67->permute_num = 4;
+  params_67->base.name = "transpose_117";
+  csinn_transpose_init(output_66, output_67, params_67);
+  int32_t *shape_68 = malloc(2 * 4);
+  shape_68[0] = 1;
+  shape_68[1] = 2400;
+  struct csinn_tensor *output_68 = csinn_alloc_tensor(sess);
+  output_68->name = "output_68";
+  output_68->dtype = CSINN_DTYPE_UINT8;
+  output_68->layout = CSINN_LAYOUT_NC;
+  output_68->dim[0] = 1;
+  output_68->dim[1] = 2400;
+  output_68->dim_count = 2;
+  output_68->qinfo = (struct csinn_quant_info *)(params_base + 3267040);
+  output_68->quant_channel = 1;
+  struct csinn_reshape_params *params_68 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_68->shape = shape_68;
+  params_68->shape_num = 2;
+  params_68->base.name = "batch_flatten_118";
+  csinn_reshape_init(output_67, output_68, params_68);
+  struct csinn_tensor *output_70 = csinn_alloc_tensor(sess);
+  output_70->name = "output_70";
+  output_70->dtype = CSINN_DTYPE_UINT8;
+  output_70->layout = CSINN_LAYOUT_NC;
+  output_70->dim[0] = 1;
+  output_70->dim[1] = 2400;
+  output_70->dim_count = 2;
+  output_70->qinfo = (struct csinn_quant_info *)(params_base + 3267064);
+  output_70->quant_channel = 1;
+  struct csinn_tensor *rhs_70 = csinn_alloc_tensor(sess);
+  rhs_70->name = "rhs_70";
+  rhs_70->data = params_base + 3267112;
+  rhs_70->is_const = 1;
+  rhs_70->dtype = CSINN_DTYPE_UINT8;
+  rhs_70->layout = CSINN_LAYOUT_OI;
+  rhs_70->dim[0] = 1;
+  rhs_70->dim[1] = 2400;
+  rhs_70->dim_count = 2;
+  rhs_70->qinfo = (struct csinn_quant_info *)(params_base + 3267088);
+  rhs_70->quant_channel = 1;
+  struct csinn_diso_params *params_70 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_70->base.name = "mul_168";
+  csinn_mul_init(output_68, rhs_70, output_70, params_70);
+  struct csinn_tensor *output_72 = csinn_alloc_tensor(sess);
+  output_72->name = "output_72";
+  output_72->dtype = CSINN_DTYPE_UINT8;
+  output_72->layout = CSINN_LAYOUT_NCHW;
+  output_72->dim[0] = 1;
+  output_72->dim[1] = 256;
+  output_72->dim[2] = 10;
+  output_72->dim[3] = 10;
+  output_72->dim_count = 4;
+  output_72->qinfo = (struct csinn_quant_info *)(params_base + 3269512);
+  output_72->quant_channel = 1;
+  struct csinn_tensor *kernel_72 = csinn_alloc_tensor(sess);
+  kernel_72->name = "kernel_72";
+  kernel_72->data = params_base + 3269560;
+  kernel_72->is_const = 1;
+  kernel_72->dtype = CSINN_DTYPE_UINT8;
+  kernel_72->layout = CSINN_LAYOUT_OIHW;
+  kernel_72->dim[0] = 256;
+  kernel_72->dim[1] = 1024;
+  kernel_72->dim[2] = 1;
+  kernel_72->dim[3] = 1;
+  kernel_72->dim_count = 4;
+  kernel_72->qinfo = (struct csinn_quant_info *)(params_base + 3269536);
+  kernel_72->quant_channel = 1;
+  struct csinn_tensor *bias_72 = csinn_alloc_tensor(sess);
+  bias_72->name = "bias_72";
+  bias_72->data = params_base + 3531728;
+  bias_72->is_const = 1;
+  bias_72->dtype = CSINN_DTYPE_INT32;
+  bias_72->layout = CSINN_LAYOUT_O;
+  bias_72->dim[0] = 256;
+  bias_72->dim_count = 1;
+  bias_72->qinfo = (struct csinn_quant_info *)(params_base + 3531704);
+  bias_72->quant_channel = 1;
+  struct csinn_conv2d_params *params_72 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_72->group = 1;
+  params_72->stride_height = 1;
+  params_72->stride_width = 1;
+  params_72->dilation_height = 1;
+  params_72->dilation_width = 1;
+  params_72->conv_extra.kernel_tm = NULL;
+  params_72->conv_extra.conv_mode = CSINN_DIRECT;
+  params_72->pad_top = 0;
+  params_72->pad_left = 0;
+  params_72->pad_down = 0;
+  params_72->pad_right = 0;
+  params_72->base.name = "conv2d_119_fuse_multiply_120_fuse_add_121";
+  csinn_conv2d_init(output_65, output_72, kernel_72, bias_72, params_72);
+  struct csinn_tensor *output_73 = csinn_alloc_tensor(sess);
+  output_73->name = "output_73";
+  output_73->dtype = CSINN_DTYPE_UINT8;
+  output_73->layout = CSINN_LAYOUT_NCHW;
+  output_73->dim[0] = 1;
+  output_73->dim[1] = 256;
+  output_73->dim[2] = 10;
+  output_73->dim[3] = 10;
+  output_73->dim_count = 4;
+  output_73->qinfo = (struct csinn_quant_info *)(params_base + 3532752);
+  output_73->quant_channel = 1;
+  struct csinn_relu_params *params_73 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_73->base.name = "relu_122";
+  csinn_relu_init(output_72, output_73, params_73);
+  struct csinn_tensor *output_74 = csinn_alloc_tensor(sess);
+  output_74->name = "output_74";
+  output_74->dtype = CSINN_DTYPE_UINT8;
+  output_74->layout = CSINN_LAYOUT_NCHW;
+  output_74->dim[0] = 1;
+  output_74->dim[1] = 512;
+  output_74->dim[2] = 5;
+  output_74->dim[3] = 5;
+  output_74->dim_count = 4;
+  output_74->qinfo = (struct csinn_quant_info *)(params_base + 3532776);
+  output_74->quant_channel = 1;
+  struct csinn_tensor *kernel_74 = csinn_alloc_tensor(sess);
+  kernel_74->name = "kernel_74";
+  kernel_74->data = params_base + 3532824;
+  kernel_74->is_const = 1;
+  kernel_74->dtype = CSINN_DTYPE_UINT8;
+  kernel_74->layout = CSINN_LAYOUT_OIHW;
+  kernel_74->dim[0] = 512;
+  kernel_74->dim[1] = 256;
+  kernel_74->dim[2] = 3;
+  kernel_74->dim[3] = 3;
+  kernel_74->dim_count = 4;
+  kernel_74->qinfo = (struct csinn_quant_info *)(params_base + 3532800);
+  kernel_74->quant_channel = 1;
+  struct csinn_tensor *bias_74 = csinn_alloc_tensor(sess);
+  bias_74->name = "bias_74";
+  bias_74->data = params_base + 4712496;
+  bias_74->is_const = 1;
+  bias_74->dtype = CSINN_DTYPE_INT32;
+  bias_74->layout = CSINN_LAYOUT_O;
+  bias_74->dim[0] = 512;
+  bias_74->dim_count = 1;
+  bias_74->qinfo = (struct csinn_quant_info *)(params_base + 4712472);
+  bias_74->quant_channel = 1;
+  struct csinn_conv2d_params *params_74 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_74->group = 1;
+  params_74->stride_height = 2;
+  params_74->stride_width = 2;
+  params_74->dilation_height = 1;
+  params_74->dilation_width = 1;
+  params_74->conv_extra.kernel_tm = NULL;
+  params_74->conv_extra.conv_mode = CSINN_DIRECT;
+  params_74->pad_top = 1;
+  params_74->pad_left = 1;
+  params_74->pad_down = 1;
+  params_74->pad_right = 1;
+  params_74->base.name = "conv2d_123_fuse_multiply_124_fuse_add_125";
+  csinn_conv2d_init(output_73, output_74, kernel_74, bias_74, params_74);
+  struct csinn_tensor *output_75 = csinn_alloc_tensor(sess);
+  output_75->name = "output_75";
+  output_75->dtype = CSINN_DTYPE_UINT8;
+  output_75->layout = CSINN_LAYOUT_NCHW;
+  output_75->dim[0] = 1;
+  output_75->dim[1] = 512;
+  output_75->dim[2] = 5;
+  output_75->dim[3] = 5;
+  output_75->dim_count = 4;
+  output_75->qinfo = (struct csinn_quant_info *)(params_base + 4714544);
+  output_75->quant_channel = 1;
+  struct csinn_relu_params *params_75 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_75->base.name = "relu_126";
+  csinn_relu_init(output_74, output_75, params_75);
+  struct csinn_tensor *output_76 = csinn_alloc_tensor(sess);
+  output_76->name = "output_76";
+  output_76->dtype = CSINN_DTYPE_UINT8;
+  output_76->layout = CSINN_LAYOUT_NCHW;
+  output_76->dim[0] = 1;
+  output_76->dim[1] = 24;
+  output_76->dim[2] = 5;
+  output_76->dim[3] = 5;
+  output_76->dim_count = 4;
+  output_76->qinfo = (struct csinn_quant_info *)(params_base + 4714568);
+  output_76->quant_channel = 1;
+  struct csinn_tensor *kernel_76 = csinn_alloc_tensor(sess);
+  kernel_76->name = "kernel_76";
+  kernel_76->data = params_base + 4714616;
+  kernel_76->is_const = 1;
+  kernel_76->dtype = CSINN_DTYPE_UINT8;
+  kernel_76->layout = CSINN_LAYOUT_OIHW;
+  kernel_76->dim[0] = 24;
+  kernel_76->dim[1] = 512;
+  kernel_76->dim[2] = 1;
+  kernel_76->dim[3] = 1;
+  kernel_76->dim_count = 4;
+  kernel_76->qinfo = (struct csinn_quant_info *)(params_base + 4714592);
+  kernel_76->quant_channel = 1;
+  struct csinn_tensor *bias_76 = csinn_alloc_tensor(sess);
+  bias_76->name = "bias_76";
+  bias_76->data = params_base + 4726928;
+  bias_76->is_const = 1;
+  bias_76->dtype = CSINN_DTYPE_INT32;
+  bias_76->layout = CSINN_LAYOUT_O;
+  bias_76->dim[0] = 24;
+  bias_76->dim_count = 1;
+  bias_76->qinfo = (struct csinn_quant_info *)(params_base + 4726904);
+  bias_76->quant_channel = 1;
+  struct csinn_conv2d_params *params_76 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_76->group = 1;
+  params_76->stride_height = 1;
+  params_76->stride_width = 1;
+  params_76->dilation_height = 1;
+  params_76->dilation_width = 1;
+  params_76->conv_extra.kernel_tm = NULL;
+  params_76->conv_extra.conv_mode = CSINN_DIRECT;
+  params_76->pad_top = 0;
+  params_76->pad_left = 0;
+  params_76->pad_down = 0;
+  params_76->pad_right = 0;
+  params_76->base.name = "conv2d_127_fuse_bias_add_128";
+  csinn_conv2d_init(output_75, output_76, kernel_76, bias_76, params_76);
+  int32_t *permute_77 = malloc(4 * 4);
+  permute_77[0] = 0;
+  permute_77[1] = 2;
+  permute_77[2] = 3;
+  permute_77[3] = 1;
+  struct csinn_tensor *output_77 = csinn_alloc_tensor(sess);
+  output_77->name = "output_77";
+  output_77->dtype = CSINN_DTYPE_UINT8;
+  output_77->layout = CSINN_LAYOUT_NCHW;
+  output_77->dim[0] = 1;
+  output_77->dim[1] = 5;
+  output_77->dim[2] = 5;
+  output_77->dim[3] = 24;
+  output_77->dim_count = 4;
+  output_77->qinfo = (struct csinn_quant_info *)(params_base + 4727024);
+  output_77->quant_channel = 1;
+  struct csinn_transpose_params *params_77 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_77->permute = permute_77;
+  params_77->permute_num = 4;
+  params_77->base.name = "transpose_129";
+  csinn_transpose_init(output_76, output_77, params_77);
+  int32_t *shape_78 = malloc(2 * 4);
+  shape_78[0] = 1;
+  shape_78[1] = 600;
+  struct csinn_tensor *output_78 = csinn_alloc_tensor(sess);
+  output_78->name = "output_78";
+  output_78->dtype = CSINN_DTYPE_UINT8;
+  output_78->layout = CSINN_LAYOUT_NC;
+  output_78->dim[0] = 1;
+  output_78->dim[1] = 600;
+  output_78->dim_count = 2;
+  output_78->qinfo = (struct csinn_quant_info *)(params_base + 4727048);
+  output_78->quant_channel = 1;
+  struct csinn_reshape_params *params_78 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_78->shape = shape_78;
+  params_78->shape_num = 2;
+  params_78->base.name = "batch_flatten_130";
+  csinn_reshape_init(output_77, output_78, params_78);
+  struct csinn_tensor *output_80 = csinn_alloc_tensor(sess);
+  output_80->name = "output_80";
+  output_80->dtype = CSINN_DTYPE_UINT8;
+  output_80->layout = CSINN_LAYOUT_NC;
+  output_80->dim[0] = 1;
+  output_80->dim[1] = 600;
+  output_80->dim_count = 2;
+  output_80->qinfo = (struct csinn_quant_info *)(params_base + 4727072);
+  output_80->quant_channel = 1;
+  struct csinn_tensor *rhs_80 = csinn_alloc_tensor(sess);
+  rhs_80->name = "rhs_80";
+  rhs_80->data = params_base + 4727120;
+  rhs_80->is_const = 1;
+  rhs_80->dtype = CSINN_DTYPE_UINT8;
+  rhs_80->layout = CSINN_LAYOUT_OI;
+  rhs_80->dim[0] = 1;
+  rhs_80->dim[1] = 600;
+  rhs_80->dim_count = 2;
+  rhs_80->qinfo = (struct csinn_quant_info *)(params_base + 4727096);
+  rhs_80->quant_channel = 1;
+  struct csinn_diso_params *params_80 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_80->base.name = "mul_169";
+  csinn_mul_init(output_78, rhs_80, output_80, params_80);
+  struct csinn_tensor *output_82 = csinn_alloc_tensor(sess);
+  output_82->name = "output_82";
+  output_82->dtype = CSINN_DTYPE_UINT8;
+  output_82->layout = CSINN_LAYOUT_NCHW;
+  output_82->dim[0] = 1;
+  output_82->dim[1] = 128;
+  output_82->dim[2] = 5;
+  output_82->dim[3] = 5;
+  output_82->dim_count = 4;
+  output_82->qinfo = (struct csinn_quant_info *)(params_base + 4727720);
+  output_82->quant_channel = 1;
+  struct csinn_tensor *kernel_82 = csinn_alloc_tensor(sess);
+  kernel_82->name = "kernel_82";
+  kernel_82->data = params_base + 4727768;
+  kernel_82->is_const = 1;
+  kernel_82->dtype = CSINN_DTYPE_UINT8;
+  kernel_82->layout = CSINN_LAYOUT_OIHW;
+  kernel_82->dim[0] = 128;
+  kernel_82->dim[1] = 512;
+  kernel_82->dim[2] = 1;
+  kernel_82->dim[3] = 1;
+  kernel_82->dim_count = 4;
+  kernel_82->qinfo = (struct csinn_quant_info *)(params_base + 4727744);
+  kernel_82->quant_channel = 1;
+  struct csinn_tensor *bias_82 = csinn_alloc_tensor(sess);
+  bias_82->name = "bias_82";
+  bias_82->data = params_base + 4793328;
+  bias_82->is_const = 1;
+  bias_82->dtype = CSINN_DTYPE_INT32;
+  bias_82->layout = CSINN_LAYOUT_O;
+  bias_82->dim[0] = 128;
+  bias_82->dim_count = 1;
+  bias_82->qinfo = (struct csinn_quant_info *)(params_base + 4793304);
+  bias_82->quant_channel = 1;
+  struct csinn_conv2d_params *params_82 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_82->group = 1;
+  params_82->stride_height = 1;
+  params_82->stride_width = 1;
+  params_82->dilation_height = 1;
+  params_82->dilation_width = 1;
+  params_82->conv_extra.kernel_tm = NULL;
+  params_82->conv_extra.conv_mode = CSINN_DIRECT;
+  params_82->pad_top = 0;
+  params_82->pad_left = 0;
+  params_82->pad_down = 0;
+  params_82->pad_right = 0;
+  params_82->base.name = "conv2d_131_fuse_multiply_132_fuse_add_133";
+  csinn_conv2d_init(output_75, output_82, kernel_82, bias_82, params_82);
+  struct csinn_tensor *output_83 = csinn_alloc_tensor(sess);
+  output_83->name = "output_83";
+  output_83->dtype = CSINN_DTYPE_UINT8;
+  output_83->layout = CSINN_LAYOUT_NCHW;
+  output_83->dim[0] = 1;
+  output_83->dim[1] = 128;
+  output_83->dim[2] = 5;
+  output_83->dim[3] = 5;
+  output_83->dim_count = 4;
+  output_83->qinfo = (struct csinn_quant_info *)(params_base + 4793840);
+  output_83->quant_channel = 1;
+  struct csinn_relu_params *params_83 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_83->base.name = "relu_134";
+  csinn_relu_init(output_82, output_83, params_83);
+  struct csinn_tensor *output_84 = csinn_alloc_tensor(sess);
+  output_84->name = "output_84";
+  output_84->dtype = CSINN_DTYPE_UINT8;
+  output_84->layout = CSINN_LAYOUT_NCHW;
+  output_84->dim[0] = 1;
+  output_84->dim[1] = 256;
+  output_84->dim[2] = 3;
+  output_84->dim[3] = 3;
+  output_84->dim_count = 4;
+  output_84->qinfo = (struct csinn_quant_info *)(params_base + 4793864);
+  output_84->quant_channel = 1;
+  struct csinn_tensor *kernel_84 = csinn_alloc_tensor(sess);
+  kernel_84->name = "kernel_84";
+  kernel_84->data = params_base + 4793912;
+  kernel_84->is_const = 1;
+  kernel_84->dtype = CSINN_DTYPE_UINT8;
+  kernel_84->layout = CSINN_LAYOUT_OIHW;
+  kernel_84->dim[0] = 256;
+  kernel_84->dim[1] = 128;
+  kernel_84->dim[2] = 3;
+  kernel_84->dim[3] = 3;
+  kernel_84->dim_count = 4;
+  kernel_84->qinfo = (struct csinn_quant_info *)(params_base + 4793888);
+  kernel_84->quant_channel = 1;
+  struct csinn_tensor *bias_84 = csinn_alloc_tensor(sess);
+  bias_84->name = "bias_84";
+  bias_84->data = params_base + 5088848;
+  bias_84->is_const = 1;
+  bias_84->dtype = CSINN_DTYPE_INT32;
+  bias_84->layout = CSINN_LAYOUT_O;
+  bias_84->dim[0] = 256;
+  bias_84->dim_count = 1;
+  bias_84->qinfo = (struct csinn_quant_info *)(params_base + 5088824);
+  bias_84->quant_channel = 1;
+  struct csinn_conv2d_params *params_84 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_84->group = 1;
+  params_84->stride_height = 2;
+  params_84->stride_width = 2;
+  params_84->dilation_height = 1;
+  params_84->dilation_width = 1;
+  params_84->conv_extra.kernel_tm = NULL;
+  params_84->conv_extra.conv_mode = CSINN_DIRECT;
+  params_84->pad_top = 1;
+  params_84->pad_left = 1;
+  params_84->pad_down = 1;
+  params_84->pad_right = 1;
+  params_84->base.name = "conv2d_135_fuse_multiply_136_fuse_add_137";
+  csinn_conv2d_init(output_83, output_84, kernel_84, bias_84, params_84);
+  struct csinn_tensor *output_85 = csinn_alloc_tensor(sess);
+  output_85->name = "output_85";
+  output_85->dtype = CSINN_DTYPE_UINT8;
+  output_85->layout = CSINN_LAYOUT_NCHW;
+  output_85->dim[0] = 1;
+  output_85->dim[1] = 256;
+  output_85->dim[2] = 3;
+  output_85->dim[3] = 3;
+  output_85->dim_count = 4;
+  output_85->qinfo = (struct csinn_quant_info *)(params_base + 5089872);
+  output_85->quant_channel = 1;
+  struct csinn_relu_params *params_85 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_85->base.name = "relu_138";
+  csinn_relu_init(output_84, output_85, params_85);
+  struct csinn_tensor *output_86 = csinn_alloc_tensor(sess);
+  output_86->name = "output_86";
+  output_86->dtype = CSINN_DTYPE_UINT8;
+  output_86->layout = CSINN_LAYOUT_NCHW;
+  output_86->dim[0] = 1;
+  output_86->dim[1] = 24;
+  output_86->dim[2] = 3;
+  output_86->dim[3] = 3;
+  output_86->dim_count = 4;
+  output_86->qinfo = (struct csinn_quant_info *)(params_base + 5089896);
+  output_86->quant_channel = 1;
+  struct csinn_tensor *kernel_86 = csinn_alloc_tensor(sess);
+  kernel_86->name = "kernel_86";
+  kernel_86->data = params_base + 5089944;
+  kernel_86->is_const = 1;
+  kernel_86->dtype = CSINN_DTYPE_UINT8;
+  kernel_86->layout = CSINN_LAYOUT_OIHW;
+  kernel_86->dim[0] = 24;
+  kernel_86->dim[1] = 256;
+  kernel_86->dim[2] = 1;
+  kernel_86->dim[3] = 1;
+  kernel_86->dim_count = 4;
+  kernel_86->qinfo = (struct csinn_quant_info *)(params_base + 5089920);
+  kernel_86->quant_channel = 1;
+  struct csinn_tensor *bias_86 = csinn_alloc_tensor(sess);
+  bias_86->name = "bias_86";
+  bias_86->data = params_base + 5096112;
+  bias_86->is_const = 1;
+  bias_86->dtype = CSINN_DTYPE_INT32;
+  bias_86->layout = CSINN_LAYOUT_O;
+  bias_86->dim[0] = 24;
+  bias_86->dim_count = 1;
+  bias_86->qinfo = (struct csinn_quant_info *)(params_base + 5096088);
+  bias_86->quant_channel = 1;
+  struct csinn_conv2d_params *params_86 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_86->group = 1;
+  params_86->stride_height = 1;
+  params_86->stride_width = 1;
+  params_86->dilation_height = 1;
+  params_86->dilation_width = 1;
+  params_86->conv_extra.kernel_tm = NULL;
+  params_86->conv_extra.conv_mode = CSINN_DIRECT;
+  params_86->pad_top = 0;
+  params_86->pad_left = 0;
+  params_86->pad_down = 0;
+  params_86->pad_right = 0;
+  params_86->base.name = "conv2d_139_fuse_bias_add_140";
+  csinn_conv2d_init(output_85, output_86, kernel_86, bias_86, params_86);
+  int32_t *permute_87 = malloc(4 * 4);
+  permute_87[0] = 0;
+  permute_87[1] = 2;
+  permute_87[2] = 3;
+  permute_87[3] = 1;
+  struct csinn_tensor *output_87 = csinn_alloc_tensor(sess);
+  output_87->name = "output_87";
+  output_87->dtype = CSINN_DTYPE_UINT8;
+  output_87->layout = CSINN_LAYOUT_NCHW;
+  output_87->dim[0] = 1;
+  output_87->dim[1] = 3;
+  output_87->dim[2] = 3;
+  output_87->dim[3] = 24;
+  output_87->dim_count = 4;
+  output_87->qinfo = (struct csinn_quant_info *)(params_base + 5096208);
+  output_87->quant_channel = 1;
+  struct csinn_transpose_params *params_87 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_87->permute = permute_87;
+  params_87->permute_num = 4;
+  params_87->base.name = "transpose_141";
+  csinn_transpose_init(output_86, output_87, params_87);
+  int32_t *shape_88 = malloc(2 * 4);
+  shape_88[0] = 1;
+  shape_88[1] = 216;
+  struct csinn_tensor *output_88 = csinn_alloc_tensor(sess);
+  output_88->name = "output_88";
+  output_88->dtype = CSINN_DTYPE_UINT8;
+  output_88->layout = CSINN_LAYOUT_NC;
+  output_88->dim[0] = 1;
+  output_88->dim[1] = 216;
+  output_88->dim_count = 2;
+  output_88->qinfo = (struct csinn_quant_info *)(params_base + 5096232);
+  output_88->quant_channel = 1;
+  struct csinn_reshape_params *params_88 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_88->shape = shape_88;
+  params_88->shape_num = 2;
+  params_88->base.name = "batch_flatten_142";
+  csinn_reshape_init(output_87, output_88, params_88);
+  struct csinn_tensor *output_90 = csinn_alloc_tensor(sess);
+  output_90->name = "output_90";
+  output_90->dtype = CSINN_DTYPE_UINT8;
+  output_90->layout = CSINN_LAYOUT_NC;
+  output_90->dim[0] = 1;
+  output_90->dim[1] = 216;
+  output_90->dim_count = 2;
+  output_90->qinfo = (struct csinn_quant_info *)(params_base + 5096256);
+  output_90->quant_channel = 1;
+  struct csinn_tensor *rhs_90 = csinn_alloc_tensor(sess);
+  rhs_90->name = "rhs_90";
+  rhs_90->data = params_base + 5096304;
+  rhs_90->is_const = 1;
+  rhs_90->dtype = CSINN_DTYPE_UINT8;
+  rhs_90->layout = CSINN_LAYOUT_OI;
+  rhs_90->dim[0] = 1;
+  rhs_90->dim[1] = 216;
+  rhs_90->dim_count = 2;
+  rhs_90->qinfo = (struct csinn_quant_info *)(params_base + 5096280);
+  rhs_90->quant_channel = 1;
+  struct csinn_diso_params *params_90 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_90->base.name = "mul_170";
+  csinn_mul_init(output_88, rhs_90, output_90, params_90);
+  struct csinn_tensor *output_92 = csinn_alloc_tensor(sess);
+  output_92->name = "output_92";
+  output_92->dtype = CSINN_DTYPE_UINT8;
+  output_92->layout = CSINN_LAYOUT_NCHW;
+  output_92->dim[0] = 1;
+  output_92->dim[1] = 128;
+  output_92->dim[2] = 3;
+  output_92->dim[3] = 3;
+  output_92->dim_count = 4;
+  output_92->qinfo = (struct csinn_quant_info *)(params_base + 5096520);
+  output_92->quant_channel = 1;
+  struct csinn_tensor *kernel_92 = csinn_alloc_tensor(sess);
+  kernel_92->name = "kernel_92";
+  kernel_92->data = params_base + 5096568;
+  kernel_92->is_const = 1;
+  kernel_92->dtype = CSINN_DTYPE_UINT8;
+  kernel_92->layout = CSINN_LAYOUT_OIHW;
+  kernel_92->dim[0] = 128;
+  kernel_92->dim[1] = 256;
+  kernel_92->dim[2] = 1;
+  kernel_92->dim[3] = 1;
+  kernel_92->dim_count = 4;
+  kernel_92->qinfo = (struct csinn_quant_info *)(params_base + 5096544);
+  kernel_92->quant_channel = 1;
+  struct csinn_tensor *bias_92 = csinn_alloc_tensor(sess);
+  bias_92->name = "bias_92";
+  bias_92->data = params_base + 5129360;
+  bias_92->is_const = 1;
+  bias_92->dtype = CSINN_DTYPE_INT32;
+  bias_92->layout = CSINN_LAYOUT_O;
+  bias_92->dim[0] = 128;
+  bias_92->dim_count = 1;
+  bias_92->qinfo = (struct csinn_quant_info *)(params_base + 5129336);
+  bias_92->quant_channel = 1;
+  struct csinn_conv2d_params *params_92 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_92->group = 1;
+  params_92->stride_height = 1;
+  params_92->stride_width = 1;
+  params_92->dilation_height = 1;
+  params_92->dilation_width = 1;
+  params_92->conv_extra.kernel_tm = NULL;
+  params_92->conv_extra.conv_mode = CSINN_DIRECT;
+  params_92->pad_top = 0;
+  params_92->pad_left = 0;
+  params_92->pad_down = 0;
+  params_92->pad_right = 0;
+  params_92->base.name = "conv2d_143_fuse_multiply_144_fuse_add_145";
+  csinn_conv2d_init(output_85, output_92, kernel_92, bias_92, params_92);
+  struct csinn_tensor *output_93 = csinn_alloc_tensor(sess);
+  output_93->name = "output_93";
+  output_93->dtype = CSINN_DTYPE_UINT8;
+  output_93->layout = CSINN_LAYOUT_NCHW;
+  output_93->dim[0] = 1;
+  output_93->dim[1] = 128;
+  output_93->dim[2] = 3;
+  output_93->dim[3] = 3;
+  output_93->dim_count = 4;
+  output_93->qinfo = (struct csinn_quant_info *)(params_base + 5129872);
+  output_93->quant_channel = 1;
+  struct csinn_relu_params *params_93 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_93->base.name = "relu_146";
+  csinn_relu_init(output_92, output_93, params_93);
+  struct csinn_tensor *output_94 = csinn_alloc_tensor(sess);
+  output_94->name = "output_94";
+  output_94->dtype = CSINN_DTYPE_UINT8;
+  output_94->layout = CSINN_LAYOUT_NCHW;
+  output_94->dim[0] = 1;
+  output_94->dim[1] = 256;
+  output_94->dim[2] = 2;
+  output_94->dim[3] = 2;
+  output_94->dim_count = 4;
+  output_94->qinfo = (struct csinn_quant_info *)(params_base + 5129896);
+  output_94->quant_channel = 1;
+  struct csinn_tensor *kernel_94 = csinn_alloc_tensor(sess);
+  kernel_94->name = "kernel_94";
+  kernel_94->data = params_base + 5129944;
+  kernel_94->is_const = 1;
+  kernel_94->dtype = CSINN_DTYPE_UINT8;
+  kernel_94->layout = CSINN_LAYOUT_OIHW;
+  kernel_94->dim[0] = 256;
+  kernel_94->dim[1] = 128;
+  kernel_94->dim[2] = 3;
+  kernel_94->dim[3] = 3;
+  kernel_94->dim_count = 4;
+  kernel_94->qinfo = (struct csinn_quant_info *)(params_base + 5129920);
+  kernel_94->quant_channel = 1;
+  struct csinn_tensor *bias_94 = csinn_alloc_tensor(sess);
+  bias_94->name = "bias_94";
+  bias_94->data = params_base + 5424880;
+  bias_94->is_const = 1;
+  bias_94->dtype = CSINN_DTYPE_INT32;
+  bias_94->layout = CSINN_LAYOUT_O;
+  bias_94->dim[0] = 256;
+  bias_94->dim_count = 1;
+  bias_94->qinfo = (struct csinn_quant_info *)(params_base + 5424856);
+  bias_94->quant_channel = 1;
+  struct csinn_conv2d_params *params_94 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_94->group = 1;
+  params_94->stride_height = 2;
+  params_94->stride_width = 2;
+  params_94->dilation_height = 1;
+  params_94->dilation_width = 1;
+  params_94->conv_extra.kernel_tm = NULL;
+  params_94->conv_extra.conv_mode = CSINN_DIRECT;
+  params_94->pad_top = 1;
+  params_94->pad_left = 1;
+  params_94->pad_down = 1;
+  params_94->pad_right = 1;
+  params_94->base.name = "conv2d_147_fuse_multiply_148_fuse_add_149";
+  csinn_conv2d_init(output_93, output_94, kernel_94, bias_94, params_94);
+  struct csinn_tensor *output_95 = csinn_alloc_tensor(sess);
+  output_95->name = "output_95";
+  output_95->dtype = CSINN_DTYPE_UINT8;
+  output_95->layout = CSINN_LAYOUT_NCHW;
+  output_95->dim[0] = 1;
+  output_95->dim[1] = 256;
+  output_95->dim[2] = 2;
+  output_95->dim[3] = 2;
+  output_95->dim_count = 4;
+  output_95->qinfo = (struct csinn_quant_info *)(params_base + 5425904);
+  output_95->quant_channel = 1;
+  struct csinn_relu_params *params_95 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_95->base.name = "relu_150";
+  csinn_relu_init(output_94, output_95, params_95);
+  struct csinn_tensor *output_96 = csinn_alloc_tensor(sess);
+  output_96->name = "output_96";
+  output_96->dtype = CSINN_DTYPE_UINT8;
+  output_96->layout = CSINN_LAYOUT_NCHW;
+  output_96->dim[0] = 1;
+  output_96->dim[1] = 24;
+  output_96->dim[2] = 2;
+  output_96->dim[3] = 2;
+  output_96->dim_count = 4;
+  output_96->qinfo = (struct csinn_quant_info *)(params_base + 5425928);
+  output_96->quant_channel = 1;
+  struct csinn_tensor *kernel_96 = csinn_alloc_tensor(sess);
+  kernel_96->name = "kernel_96";
+  kernel_96->data = params_base + 5425976;
+  kernel_96->is_const = 1;
+  kernel_96->dtype = CSINN_DTYPE_UINT8;
+  kernel_96->layout = CSINN_LAYOUT_OIHW;
+  kernel_96->dim[0] = 24;
+  kernel_96->dim[1] = 256;
+  kernel_96->dim[2] = 1;
+  kernel_96->dim[3] = 1;
+  kernel_96->dim_count = 4;
+  kernel_96->qinfo = (struct csinn_quant_info *)(params_base + 5425952);
+  kernel_96->quant_channel = 1;
+  struct csinn_tensor *bias_96 = csinn_alloc_tensor(sess);
+  bias_96->name = "bias_96";
+  bias_96->data = params_base + 5432144;
+  bias_96->is_const = 1;
+  bias_96->dtype = CSINN_DTYPE_INT32;
+  bias_96->layout = CSINN_LAYOUT_O;
+  bias_96->dim[0] = 24;
+  bias_96->dim_count = 1;
+  bias_96->qinfo = (struct csinn_quant_info *)(params_base + 5432120);
+  bias_96->quant_channel = 1;
+  struct csinn_conv2d_params *params_96 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_96->group = 1;
+  params_96->stride_height = 1;
+  params_96->stride_width = 1;
+  params_96->dilation_height = 1;
+  params_96->dilation_width = 1;
+  params_96->conv_extra.kernel_tm = NULL;
+  params_96->conv_extra.conv_mode = CSINN_DIRECT;
+  params_96->pad_top = 0;
+  params_96->pad_left = 0;
+  params_96->pad_down = 0;
+  params_96->pad_right = 0;
+  params_96->base.name = "conv2d_151_fuse_bias_add_152";
+  csinn_conv2d_init(output_95, output_96, kernel_96, bias_96, params_96);
+  int32_t *permute_97 = malloc(4 * 4);
+  permute_97[0] = 0;
+  permute_97[1] = 2;
+  permute_97[2] = 3;
+  permute_97[3] = 1;
+  struct csinn_tensor *output_97 = csinn_alloc_tensor(sess);
+  output_97->name = "output_97";
+  output_97->dtype = CSINN_DTYPE_UINT8;
+  output_97->layout = CSINN_LAYOUT_NCHW;
+  output_97->dim[0] = 1;
+  output_97->dim[1] = 2;
+  output_97->dim[2] = 2;
+  output_97->dim[3] = 24;
+  output_97->dim_count = 4;
+  output_97->qinfo = (struct csinn_quant_info *)(params_base + 5432240);
+  output_97->quant_channel = 1;
+  struct csinn_transpose_params *params_97 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_97->permute = permute_97;
+  params_97->permute_num = 4;
+  params_97->base.name = "transpose_153";
+  csinn_transpose_init(output_96, output_97, params_97);
+  int32_t *shape_98 = malloc(2 * 4);
+  shape_98[0] = 1;
+  shape_98[1] = 96;
+  struct csinn_tensor *output_98 = csinn_alloc_tensor(sess);
+  output_98->name = "output_98";
+  output_98->dtype = CSINN_DTYPE_UINT8;
+  output_98->layout = CSINN_LAYOUT_NC;
+  output_98->dim[0] = 1;
+  output_98->dim[1] = 96;
+  output_98->dim_count = 2;
+  output_98->qinfo = (struct csinn_quant_info *)(params_base + 5432264);
+  output_98->quant_channel = 1;
+  struct csinn_reshape_params *params_98 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_98->shape = shape_98;
+  params_98->shape_num = 2;
+  params_98->base.name = "batch_flatten_154";
+  csinn_reshape_init(output_97, output_98, params_98);
+  struct csinn_tensor *output_100 = csinn_alloc_tensor(sess);
+  output_100->name = "output_100";
+  output_100->dtype = CSINN_DTYPE_UINT8;
+  output_100->layout = CSINN_LAYOUT_NC;
+  output_100->dim[0] = 1;
+  output_100->dim[1] = 96;
+  output_100->dim_count = 2;
+  output_100->qinfo = (struct csinn_quant_info *)(params_base + 5432288);
+  output_100->quant_channel = 1;
+  struct csinn_tensor *rhs_100 = csinn_alloc_tensor(sess);
+  rhs_100->name = "rhs_100";
+  rhs_100->data = params_base + 5432336;
+  rhs_100->is_const = 1;
+  rhs_100->dtype = CSINN_DTYPE_UINT8;
+  rhs_100->layout = CSINN_LAYOUT_OI;
+  rhs_100->dim[0] = 1;
+  rhs_100->dim[1] = 96;
+  rhs_100->dim_count = 2;
+  rhs_100->qinfo = (struct csinn_quant_info *)(params_base + 5432312);
+  rhs_100->quant_channel = 1;
+  struct csinn_diso_params *params_100 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_100->base.name = "mul_171";
+  csinn_mul_init(output_98, rhs_100, output_100, params_100);
+  struct csinn_tensor *output_102 = csinn_alloc_tensor(sess);
+  output_102->name = "output_102";
+  output_102->dtype = CSINN_DTYPE_UINT8;
+  output_102->layout = CSINN_LAYOUT_NCHW;
+  output_102->dim[0] = 1;
+  output_102->dim[1] = 64;
+  output_102->dim[2] = 2;
+  output_102->dim[3] = 2;
+  output_102->dim_count = 4;
+  output_102->qinfo = (struct csinn_quant_info *)(params_base + 5432432);
+  output_102->quant_channel = 1;
+  struct csinn_tensor *kernel_102 = csinn_alloc_tensor(sess);
+  kernel_102->name = "kernel_102";
+  kernel_102->data = params_base + 5432480;
+  kernel_102->is_const = 1;
+  kernel_102->dtype = CSINN_DTYPE_UINT8;
+  kernel_102->layout = CSINN_LAYOUT_OIHW;
+  kernel_102->dim[0] = 64;
+  kernel_102->dim[1] = 256;
+  kernel_102->dim[2] = 1;
+  kernel_102->dim[3] = 1;
+  kernel_102->dim_count = 4;
+  kernel_102->qinfo = (struct csinn_quant_info *)(params_base + 5432456);
+  kernel_102->quant_channel = 1;
+  struct csinn_tensor *bias_102 = csinn_alloc_tensor(sess);
+  bias_102->name = "bias_102";
+  bias_102->data = params_base + 5448888;
+  bias_102->is_const = 1;
+  bias_102->dtype = CSINN_DTYPE_INT32;
+  bias_102->layout = CSINN_LAYOUT_O;
+  bias_102->dim[0] = 64;
+  bias_102->dim_count = 1;
+  bias_102->qinfo = (struct csinn_quant_info *)(params_base + 5448864);
+  bias_102->quant_channel = 1;
+  struct csinn_conv2d_params *params_102 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_102->group = 1;
+  params_102->stride_height = 1;
+  params_102->stride_width = 1;
+  params_102->dilation_height = 1;
+  params_102->dilation_width = 1;
+  params_102->conv_extra.kernel_tm = NULL;
+  params_102->conv_extra.conv_mode = CSINN_DIRECT;
+  params_102->pad_top = 0;
+  params_102->pad_left = 0;
+  params_102->pad_down = 0;
+  params_102->pad_right = 0;
+  params_102->base.name = "conv2d_155_fuse_multiply_156_fuse_add_157";
+  csinn_conv2d_init(output_95, output_102, kernel_102, bias_102, params_102);
+  struct csinn_tensor *output_103 = csinn_alloc_tensor(sess);
+  output_103->name = "output_103";
+  output_103->dtype = CSINN_DTYPE_UINT8;
+  output_103->layout = CSINN_LAYOUT_NCHW;
+  output_103->dim[0] = 1;
+  output_103->dim[1] = 64;
+  output_103->dim[2] = 2;
+  output_103->dim[3] = 2;
+  output_103->dim_count = 4;
+  output_103->qinfo = (struct csinn_quant_info *)(params_base + 5449144);
+  output_103->quant_channel = 1;
+  struct csinn_relu_params *params_103 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_103->base.name = "relu_158";
+  csinn_relu_init(output_102, output_103, params_103);
+  struct csinn_tensor *output_104 = csinn_alloc_tensor(sess);
+  output_104->name = "output_104";
+  output_104->dtype = CSINN_DTYPE_UINT8;
+  output_104->layout = CSINN_LAYOUT_NCHW;
+  output_104->dim[0] = 1;
+  output_104->dim[1] = 128;
+  output_104->dim[2] = 1;
+  output_104->dim[3] = 1;
+  output_104->dim_count = 4;
+  output_104->qinfo = (struct csinn_quant_info *)(params_base + 5449168);
+  output_104->quant_channel = 1;
+  struct csinn_tensor *kernel_104 = csinn_alloc_tensor(sess);
+  kernel_104->name = "kernel_104";
+  kernel_104->data = params_base + 5449216;
+  kernel_104->is_const = 1;
+  kernel_104->dtype = CSINN_DTYPE_UINT8;
+  kernel_104->layout = CSINN_LAYOUT_OIHW;
+  kernel_104->dim[0] = 128;
+  kernel_104->dim[1] = 64;
+  kernel_104->dim[2] = 3;
+  kernel_104->dim[3] = 3;
+  kernel_104->dim_count = 4;
+  kernel_104->qinfo = (struct csinn_quant_info *)(params_base + 5449192);
+  kernel_104->quant_channel = 1;
+  struct csinn_tensor *bias_104 = csinn_alloc_tensor(sess);
+  bias_104->name = "bias_104";
+  bias_104->data = params_base + 5522968;
+  bias_104->is_const = 1;
+  bias_104->dtype = CSINN_DTYPE_INT32;
+  bias_104->layout = CSINN_LAYOUT_O;
+  bias_104->dim[0] = 128;
+  bias_104->dim_count = 1;
+  bias_104->qinfo = (struct csinn_quant_info *)(params_base + 5522944);
+  bias_104->quant_channel = 1;
+  struct csinn_conv2d_params *params_104 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_104->group = 1;
+  params_104->stride_height = 2;
+  params_104->stride_width = 2;
+  params_104->dilation_height = 1;
+  params_104->dilation_width = 1;
+  params_104->conv_extra.kernel_tm = NULL;
+  params_104->conv_extra.conv_mode = CSINN_DIRECT;
+  params_104->pad_top = 1;
+  params_104->pad_left = 1;
+  params_104->pad_down = 1;
+  params_104->pad_right = 1;
+  params_104->base.name = "conv2d_159_fuse_multiply_160_fuse_add_161";
+  csinn_conv2d_init(output_103, output_104, kernel_104, bias_104, params_104);
+  struct csinn_tensor *output_105 = csinn_alloc_tensor(sess);
+  output_105->name = "output_105";
+  output_105->dtype = CSINN_DTYPE_UINT8;
+  output_105->layout = CSINN_LAYOUT_NCHW;
+  output_105->dim[0] = 1;
+  output_105->dim[1] = 128;
+  output_105->dim[2] = 1;
+  output_105->dim[3] = 1;
+  output_105->dim_count = 4;
+  output_105->qinfo = (struct csinn_quant_info *)(params_base + 5523480);
+  output_105->quant_channel = 1;
+  struct csinn_relu_params *params_105 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
+  params_105->base.name = "relu_162";
+  csinn_relu_init(output_104, output_105, params_105);
+  struct csinn_tensor *output_106 = csinn_alloc_tensor(sess);
+  output_106->name = "output_106";
+  output_106->dtype = CSINN_DTYPE_UINT8;
+  output_106->layout = CSINN_LAYOUT_NCHW;
+  output_106->dim[0] = 1;
+  output_106->dim[1] = 24;
+  output_106->dim[2] = 1;
+  output_106->dim[3] = 1;
+  output_106->dim_count = 4;
+  output_106->qinfo = (struct csinn_quant_info *)(params_base + 5523504);
+  output_106->quant_channel = 1;
+  struct csinn_tensor *kernel_106 = csinn_alloc_tensor(sess);
+  kernel_106->name = "kernel_106";
+  kernel_106->data = params_base + 5523552;
+  kernel_106->is_const = 1;
+  kernel_106->dtype = CSINN_DTYPE_UINT8;
+  kernel_106->layout = CSINN_LAYOUT_OIHW;
+  kernel_106->dim[0] = 24;
+  kernel_106->dim[1] = 128;
+  kernel_106->dim[2] = 1;
+  kernel_106->dim[3] = 1;
+  kernel_106->dim_count = 4;
+  kernel_106->qinfo = (struct csinn_quant_info *)(params_base + 5523528);
+  kernel_106->quant_channel = 1;
+  struct csinn_tensor *bias_106 = csinn_alloc_tensor(sess);
+  bias_106->name = "bias_106";
+  bias_106->data = params_base + 5526648;
+  bias_106->is_const = 1;
+  bias_106->dtype = CSINN_DTYPE_INT32;
+  bias_106->layout = CSINN_LAYOUT_O;
+  bias_106->dim[0] = 24;
+  bias_106->dim_count = 1;
+  bias_106->qinfo = (struct csinn_quant_info *)(params_base + 5526624);
+  bias_106->quant_channel = 1;
+  struct csinn_conv2d_params *params_106 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_106->group = 1;
+  params_106->stride_height = 1;
+  params_106->stride_width = 1;
+  params_106->dilation_height = 1;
+  params_106->dilation_width = 1;
+  params_106->conv_extra.kernel_tm = NULL;
+  params_106->conv_extra.conv_mode = CSINN_DIRECT;
+  params_106->pad_top = 0;
+  params_106->pad_left = 0;
+  params_106->pad_down = 0;
+  params_106->pad_right = 0;
+  params_106->base.name = "conv2d_163_fuse_bias_add_164";
+  csinn_conv2d_init(output_105, output_106, kernel_106, bias_106, params_106);
+  int32_t *permute_107 = malloc(4 * 4);
+  permute_107[0] = 0;
+  permute_107[1] = 2;
+  permute_107[2] = 3;
+  permute_107[3] = 1;
+  struct csinn_tensor *output_107 = csinn_alloc_tensor(sess);
+  output_107->name = "output_107";
+  output_107->dtype = CSINN_DTYPE_UINT8;
+  output_107->layout = CSINN_LAYOUT_NCHW;
+  output_107->dim[0] = 1;
+  output_107->dim[1] = 1;
+  output_107->dim[2] = 1;
+  output_107->dim[3] = 24;
+  output_107->dim_count = 4;
+  output_107->qinfo = (struct csinn_quant_info *)(params_base + 5526744);
+  output_107->quant_channel = 1;
+  struct csinn_transpose_params *params_107 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_107->permute = permute_107;
+  params_107->permute_num = 4;
+  params_107->base.name = "transpose_165";
+  csinn_transpose_init(output_106, output_107, params_107);
+  int32_t *shape_108 = malloc(2 * 4);
+  shape_108[0] = 1;
+  shape_108[1] = 24;
+  struct csinn_tensor *output_108 = csinn_alloc_tensor(sess);
+  output_108->name = "output_108";
+  output_108->dtype = CSINN_DTYPE_UINT8;
+  output_108->layout = CSINN_LAYOUT_NC;
+  output_108->dim[0] = 1;
+  output_108->dim[1] = 24;
+  output_108->dim_count = 2;
+  output_108->qinfo = (struct csinn_quant_info *)(params_base + 5526768);
+  output_108->quant_channel = 1;
+  struct csinn_reshape_params *params_108 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_108->shape = shape_108;
+  params_108->shape_num = 2;
+  params_108->base.name = "batch_flatten_166";
+  csinn_reshape_init(output_107, output_108, params_108);
+  struct csinn_tensor *output_110 = csinn_alloc_tensor(sess);
+  output_110->name = "output_110";
+  output_110->dtype = CSINN_DTYPE_UINT8;
+  output_110->layout = CSINN_LAYOUT_NC;
+  output_110->dim[0] = 1;
+  output_110->dim[1] = 24;
+  output_110->dim_count = 2;
+  output_110->qinfo = (struct csinn_quant_info *)(params_base + 5526792);
+  output_110->quant_channel = 1;
+  struct csinn_tensor *rhs_110 = csinn_alloc_tensor(sess);
+  rhs_110->name = "rhs_110";
+  rhs_110->data = params_base + 5526840;
+  rhs_110->is_const = 1;
+  rhs_110->dtype = CSINN_DTYPE_UINT8;
+  rhs_110->layout = CSINN_LAYOUT_OI;
+  rhs_110->dim[0] = 1;
+  rhs_110->dim[1] = 24;
+  rhs_110->dim_count = 2;
+  rhs_110->qinfo = (struct csinn_quant_info *)(params_base + 5526816);
+  rhs_110->quant_channel = 1;
+  struct csinn_diso_params *params_110 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_110->base.name = "mul_172";
+  csinn_mul_init(output_108, rhs_110, output_110, params_110);
+  struct csinn_tensor *input_112[6];
+  struct csinn_tensor *output_112 = csinn_alloc_tensor(sess);
+  output_112->name = "concatenate_167_112";
+  output_112->dtype = CSINN_DTYPE_UINT8;
+  output_112->layout = CSINN_LAYOUT_NC;
+  output_112->dim[0] = 1;
+  output_112->dim[1] = 7668;
+  output_112->dim_count = 2;
+  output_112->qinfo = (struct csinn_quant_info *)(params_base + 5526864);
+  output_112->quant_channel = 1;
+  struct csinn_concat_params *params_112 = csinn_alloc_params(sizeof(struct csinn_concat_params), sess);
+  params_112->inputs_count = 6;
+  params_112->axis = 1;
+  params_112->base.name = "concatenate_167";
+  csinn_concat_init(input_112, output_112, params_112);
+  struct csinn_tensor *output_113 = csinn_alloc_tensor(sess);
+  output_113->name = "output_113";
+  output_113->dtype = CSINN_DTYPE_UINT8;
+  output_113->layout = CSINN_LAYOUT_NCHW;
+  output_113->dim[0] = 1;
+  output_113->dim[1] = 63;
+  output_113->dim[2] = 19;
+  output_113->dim[3] = 19;
+  output_113->dim_count = 4;
+  output_113->qinfo = (struct csinn_quant_info *)(params_base + 5526888);
+  output_113->quant_channel = 1;
+  struct csinn_tensor *kernel_113 = csinn_alloc_tensor(sess);
+  kernel_113->name = "kernel_113";
+  kernel_113->data = params_base + 5526936;
+  kernel_113->is_const = 1;
+  kernel_113->dtype = CSINN_DTYPE_UINT8;
+  kernel_113->layout = CSINN_LAYOUT_OIHW;
+  kernel_113->dim[0] = 63;
+  kernel_113->dim[1] = 512;
+  kernel_113->dim[2] = 1;
+  kernel_113->dim[3] = 1;
+  kernel_113->dim_count = 4;
+  kernel_113->qinfo = (struct csinn_quant_info *)(params_base + 5526912);
+  kernel_113->quant_channel = 1;
+  struct csinn_tensor *bias_113 = csinn_alloc_tensor(sess);
+  bias_113->name = "bias_113";
+  bias_113->data = params_base + 5559216;
+  bias_113->is_const = 1;
+  bias_113->dtype = CSINN_DTYPE_INT32;
+  bias_113->layout = CSINN_LAYOUT_O;
+  bias_113->dim[0] = 63;
+  bias_113->dim_count = 1;
+  bias_113->qinfo = (struct csinn_quant_info *)(params_base + 5559192);
+  bias_113->quant_channel = 1;
+  struct csinn_conv2d_params *params_113 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_113->group = 1;
+  params_113->stride_height = 1;
+  params_113->stride_width = 1;
+  params_113->dilation_height = 1;
+  params_113->dilation_width = 1;
+  params_113->conv_extra.kernel_tm = NULL;
+  params_113->conv_extra.conv_mode = CSINN_DIRECT;
+  params_113->pad_top = 0;
+  params_113->pad_left = 0;
+  params_113->pad_down = 0;
+  params_113->pad_right = 0;
+  params_113->base.name = "conv2d_168_fuse_bias_add_169";
+  csinn_conv2d_init(output_51, output_113, kernel_113, bias_113, params_113);
+  int32_t *permute_114 = malloc(4 * 4);
+  permute_114[0] = 0;
+  permute_114[1] = 2;
+  permute_114[2] = 3;
+  permute_114[3] = 1;
+  struct csinn_tensor *output_114 = csinn_alloc_tensor(sess);
+  output_114->name = "output_114";
+  output_114->dtype = CSINN_DTYPE_UINT8;
+  output_114->layout = CSINN_LAYOUT_NCHW;
+  output_114->dim[0] = 1;
+  output_114->dim[1] = 19;
+  output_114->dim[2] = 19;
+  output_114->dim[3] = 63;
+  output_114->dim_count = 4;
+  output_114->qinfo = (struct csinn_quant_info *)(params_base + 5559468);
+  output_114->quant_channel = 1;
+  struct csinn_transpose_params *params_114 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_114->permute = permute_114;
+  params_114->permute_num = 4;
+  params_114->base.name = "transpose_170";
+  csinn_transpose_init(output_113, output_114, params_114);
+  int32_t *shape_115 = malloc(2 * 4);
+  shape_115[0] = 1;
+  shape_115[1] = 22743;
+  struct csinn_tensor *output_115 = csinn_alloc_tensor(sess);
+  output_115->name = "output_115";
+  output_115->dtype = CSINN_DTYPE_UINT8;
+  output_115->layout = CSINN_LAYOUT_NC;
+  output_115->dim[0] = 1;
+  output_115->dim[1] = 22743;
+  output_115->dim_count = 2;
+  output_115->qinfo = (struct csinn_quant_info *)(params_base + 5559492);
+  output_115->quant_channel = 1;
+  struct csinn_reshape_params *params_115 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_115->shape = shape_115;
+  params_115->shape_num = 2;
+  params_115->base.name = "batch_flatten_171";
+  csinn_reshape_init(output_114, output_115, params_115);
+  struct csinn_tensor *output_117 = csinn_alloc_tensor(sess);
+  output_117->name = "output_117";
+  output_117->dtype = CSINN_DTYPE_UINT8;
+  output_117->layout = CSINN_LAYOUT_NC;
+  output_117->dim[0] = 1;
+  output_117->dim[1] = 22743;
+  output_117->dim_count = 2;
+  output_117->qinfo = (struct csinn_quant_info *)(params_base + 5559516);
+  output_117->quant_channel = 1;
+  struct csinn_tensor *rhs_117 = csinn_alloc_tensor(sess);
+  rhs_117->name = "rhs_117";
+  rhs_117->data = params_base + 5559564;
+  rhs_117->is_const = 1;
+  rhs_117->dtype = CSINN_DTYPE_UINT8;
+  rhs_117->layout = CSINN_LAYOUT_OI;
+  rhs_117->dim[0] = 1;
+  rhs_117->dim[1] = 22743;
+  rhs_117->dim_count = 2;
+  rhs_117->qinfo = (struct csinn_quant_info *)(params_base + 5559540);
+  rhs_117->quant_channel = 1;
+  struct csinn_diso_params *params_117 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_117->base.name = "mul_173";
+  csinn_mul_init(output_115, rhs_117, output_117, params_117);
+  struct csinn_tensor *output_119 = csinn_alloc_tensor(sess);
+  output_119->name = "output_119";
+  output_119->dtype = CSINN_DTYPE_UINT8;
+  output_119->layout = CSINN_LAYOUT_NCHW;
+  output_119->dim[0] = 1;
+  output_119->dim[1] = 126;
+  output_119->dim[2] = 10;
+  output_119->dim[3] = 10;
+  output_119->dim_count = 4;
+  output_119->qinfo = (struct csinn_quant_info *)(params_base + 5582307);
+  output_119->quant_channel = 1;
+  struct csinn_tensor *kernel_119 = csinn_alloc_tensor(sess);
+  kernel_119->name = "kernel_119";
+  kernel_119->data = params_base + 5582355;
+  kernel_119->is_const = 1;
+  kernel_119->dtype = CSINN_DTYPE_UINT8;
+  kernel_119->layout = CSINN_LAYOUT_OIHW;
+  kernel_119->dim[0] = 126;
+  kernel_119->dim[1] = 1024;
+  kernel_119->dim[2] = 1;
+  kernel_119->dim[3] = 1;
+  kernel_119->dim_count = 4;
+  kernel_119->qinfo = (struct csinn_quant_info *)(params_base + 5582331);
+  kernel_119->quant_channel = 1;
+  struct csinn_tensor *bias_119 = csinn_alloc_tensor(sess);
+  bias_119->name = "bias_119";
+  bias_119->data = params_base + 5711403;
+  bias_119->is_const = 1;
+  bias_119->dtype = CSINN_DTYPE_INT32;
+  bias_119->layout = CSINN_LAYOUT_O;
+  bias_119->dim[0] = 126;
+  bias_119->dim_count = 1;
+  bias_119->qinfo = (struct csinn_quant_info *)(params_base + 5711379);
+  bias_119->quant_channel = 1;
+  struct csinn_conv2d_params *params_119 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_119->group = 1;
+  params_119->stride_height = 1;
+  params_119->stride_width = 1;
+  params_119->dilation_height = 1;
+  params_119->dilation_width = 1;
+  params_119->conv_extra.kernel_tm = NULL;
+  params_119->conv_extra.conv_mode = CSINN_DIRECT;
+  params_119->pad_top = 0;
+  params_119->pad_left = 0;
+  params_119->pad_down = 0;
+  params_119->pad_right = 0;
+  params_119->base.name = "conv2d_172_fuse_bias_add_173";
+  csinn_conv2d_init(output_65, output_119, kernel_119, bias_119, params_119);
+  int32_t *permute_120 = malloc(4 * 4);
+  permute_120[0] = 0;
+  permute_120[1] = 2;
+  permute_120[2] = 3;
+  permute_120[3] = 1;
+  struct csinn_tensor *output_120 = csinn_alloc_tensor(sess);
+  output_120->name = "output_120";
+  output_120->dtype = CSINN_DTYPE_UINT8;
+  output_120->layout = CSINN_LAYOUT_NCHW;
+  output_120->dim[0] = 1;
+  output_120->dim[1] = 10;
+  output_120->dim[2] = 10;
+  output_120->dim[3] = 126;
+  output_120->dim_count = 4;
+  output_120->qinfo = (struct csinn_quant_info *)(params_base + 5711907);
+  output_120->quant_channel = 1;
+  struct csinn_transpose_params *params_120 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_120->permute = permute_120;
+  params_120->permute_num = 4;
+  params_120->base.name = "transpose_174";
+  csinn_transpose_init(output_119, output_120, params_120);
+  int32_t *shape_121 = malloc(2 * 4);
+  shape_121[0] = 1;
+  shape_121[1] = 12600;
+  struct csinn_tensor *output_121 = csinn_alloc_tensor(sess);
+  output_121->name = "output_121";
+  output_121->dtype = CSINN_DTYPE_UINT8;
+  output_121->layout = CSINN_LAYOUT_NC;
+  output_121->dim[0] = 1;
+  output_121->dim[1] = 12600;
+  output_121->dim_count = 2;
+  output_121->qinfo = (struct csinn_quant_info *)(params_base + 5711931);
+  output_121->quant_channel = 1;
+  struct csinn_reshape_params *params_121 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_121->shape = shape_121;
+  params_121->shape_num = 2;
+  params_121->base.name = "batch_flatten_175";
+  csinn_reshape_init(output_120, output_121, params_121);
+  struct csinn_tensor *output_123 = csinn_alloc_tensor(sess);
+  output_123->name = "output_123";
+  output_123->dtype = CSINN_DTYPE_UINT8;
+  output_123->layout = CSINN_LAYOUT_NC;
+  output_123->dim[0] = 1;
+  output_123->dim[1] = 12600;
+  output_123->dim_count = 2;
+  output_123->qinfo = (struct csinn_quant_info *)(params_base + 5711955);
+  output_123->quant_channel = 1;
+  struct csinn_tensor *rhs_123 = csinn_alloc_tensor(sess);
+  rhs_123->name = "rhs_123";
+  rhs_123->data = params_base + 5712003;
+  rhs_123->is_const = 1;
+  rhs_123->dtype = CSINN_DTYPE_UINT8;
+  rhs_123->layout = CSINN_LAYOUT_OI;
+  rhs_123->dim[0] = 1;
+  rhs_123->dim[1] = 12600;
+  rhs_123->dim_count = 2;
+  rhs_123->qinfo = (struct csinn_quant_info *)(params_base + 5711979);
+  rhs_123->quant_channel = 1;
+  struct csinn_diso_params *params_123 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_123->base.name = "mul_174";
+  csinn_mul_init(output_121, rhs_123, output_123, params_123);
+  struct csinn_tensor *output_125 = csinn_alloc_tensor(sess);
+  output_125->name = "output_125";
+  output_125->dtype = CSINN_DTYPE_UINT8;
+  output_125->layout = CSINN_LAYOUT_NCHW;
+  output_125->dim[0] = 1;
+  output_125->dim[1] = 126;
+  output_125->dim[2] = 5;
+  output_125->dim[3] = 5;
+  output_125->dim_count = 4;
+  output_125->qinfo = (struct csinn_quant_info *)(params_base + 5724603);
+  output_125->quant_channel = 1;
+  struct csinn_tensor *kernel_125 = csinn_alloc_tensor(sess);
+  kernel_125->name = "kernel_125";
+  kernel_125->data = params_base + 5724651;
+  kernel_125->is_const = 1;
+  kernel_125->dtype = CSINN_DTYPE_UINT8;
+  kernel_125->layout = CSINN_LAYOUT_OIHW;
+  kernel_125->dim[0] = 126;
+  kernel_125->dim[1] = 512;
+  kernel_125->dim[2] = 1;
+  kernel_125->dim[3] = 1;
+  kernel_125->dim_count = 4;
+  kernel_125->qinfo = (struct csinn_quant_info *)(params_base + 5724627);
+  kernel_125->quant_channel = 1;
+  struct csinn_tensor *bias_125 = csinn_alloc_tensor(sess);
+  bias_125->name = "bias_125";
+  bias_125->data = params_base + 5789187;
+  bias_125->is_const = 1;
+  bias_125->dtype = CSINN_DTYPE_INT32;
+  bias_125->layout = CSINN_LAYOUT_O;
+  bias_125->dim[0] = 126;
+  bias_125->dim_count = 1;
+  bias_125->qinfo = (struct csinn_quant_info *)(params_base + 5789163);
+  bias_125->quant_channel = 1;
+  struct csinn_conv2d_params *params_125 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_125->group = 1;
+  params_125->stride_height = 1;
+  params_125->stride_width = 1;
+  params_125->dilation_height = 1;
+  params_125->dilation_width = 1;
+  params_125->conv_extra.kernel_tm = NULL;
+  params_125->conv_extra.conv_mode = CSINN_DIRECT;
+  params_125->pad_top = 0;
+  params_125->pad_left = 0;
+  params_125->pad_down = 0;
+  params_125->pad_right = 0;
+  params_125->base.name = "conv2d_176_fuse_bias_add_177";
+  csinn_conv2d_init(output_75, output_125, kernel_125, bias_125, params_125);
+  int32_t *permute_126 = malloc(4 * 4);
+  permute_126[0] = 0;
+  permute_126[1] = 2;
+  permute_126[2] = 3;
+  permute_126[3] = 1;
+  struct csinn_tensor *output_126 = csinn_alloc_tensor(sess);
+  output_126->name = "output_126";
+  output_126->dtype = CSINN_DTYPE_UINT8;
+  output_126->layout = CSINN_LAYOUT_NCHW;
+  output_126->dim[0] = 1;
+  output_126->dim[1] = 5;
+  output_126->dim[2] = 5;
+  output_126->dim[3] = 126;
+  output_126->dim_count = 4;
+  output_126->qinfo = (struct csinn_quant_info *)(params_base + 5789691);
+  output_126->quant_channel = 1;
+  struct csinn_transpose_params *params_126 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_126->permute = permute_126;
+  params_126->permute_num = 4;
+  params_126->base.name = "transpose_178";
+  csinn_transpose_init(output_125, output_126, params_126);
+  int32_t *shape_127 = malloc(2 * 4);
+  shape_127[0] = 1;
+  shape_127[1] = 3150;
+  struct csinn_tensor *output_127 = csinn_alloc_tensor(sess);
+  output_127->name = "output_127";
+  output_127->dtype = CSINN_DTYPE_UINT8;
+  output_127->layout = CSINN_LAYOUT_NC;
+  output_127->dim[0] = 1;
+  output_127->dim[1] = 3150;
+  output_127->dim_count = 2;
+  output_127->qinfo = (struct csinn_quant_info *)(params_base + 5789715);
+  output_127->quant_channel = 1;
+  struct csinn_reshape_params *params_127 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_127->shape = shape_127;
+  params_127->shape_num = 2;
+  params_127->base.name = "batch_flatten_179";
+  csinn_reshape_init(output_126, output_127, params_127);
+  struct csinn_tensor *output_129 = csinn_alloc_tensor(sess);
+  output_129->name = "output_129";
+  output_129->dtype = CSINN_DTYPE_UINT8;
+  output_129->layout = CSINN_LAYOUT_NC;
+  output_129->dim[0] = 1;
+  output_129->dim[1] = 3150;
+  output_129->dim_count = 2;
+  output_129->qinfo = (struct csinn_quant_info *)(params_base + 5789739);
+  output_129->quant_channel = 1;
+  struct csinn_tensor *rhs_129 = csinn_alloc_tensor(sess);
+  rhs_129->name = "rhs_129";
+  rhs_129->data = params_base + 5789787;
+  rhs_129->is_const = 1;
+  rhs_129->dtype = CSINN_DTYPE_UINT8;
+  rhs_129->layout = CSINN_LAYOUT_OI;
+  rhs_129->dim[0] = 1;
+  rhs_129->dim[1] = 3150;
+  rhs_129->dim_count = 2;
+  rhs_129->qinfo = (struct csinn_quant_info *)(params_base + 5789763);
+  rhs_129->quant_channel = 1;
+  struct csinn_diso_params *params_129 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_129->base.name = "mul_175";
+  csinn_mul_init(output_127, rhs_129, output_129, params_129);
+  struct csinn_tensor *output_131 = csinn_alloc_tensor(sess);
+  output_131->name = "output_131";
+  output_131->dtype = CSINN_DTYPE_UINT8;
+  output_131->layout = CSINN_LAYOUT_NCHW;
+  output_131->dim[0] = 1;
+  output_131->dim[1] = 126;
+  output_131->dim[2] = 3;
+  output_131->dim[3] = 3;
+  output_131->dim_count = 4;
+  output_131->qinfo = (struct csinn_quant_info *)(params_base + 5792937);
+  output_131->quant_channel = 1;
+  struct csinn_tensor *kernel_131 = csinn_alloc_tensor(sess);
+  kernel_131->name = "kernel_131";
+  kernel_131->data = params_base + 5792985;
+  kernel_131->is_const = 1;
+  kernel_131->dtype = CSINN_DTYPE_UINT8;
+  kernel_131->layout = CSINN_LAYOUT_OIHW;
+  kernel_131->dim[0] = 126;
+  kernel_131->dim[1] = 256;
+  kernel_131->dim[2] = 1;
+  kernel_131->dim[3] = 1;
+  kernel_131->dim_count = 4;
+  kernel_131->qinfo = (struct csinn_quant_info *)(params_base + 5792961);
+  kernel_131->quant_channel = 1;
+  struct csinn_tensor *bias_131 = csinn_alloc_tensor(sess);
+  bias_131->name = "bias_131";
+  bias_131->data = params_base + 5825265;
+  bias_131->is_const = 1;
+  bias_131->dtype = CSINN_DTYPE_INT32;
+  bias_131->layout = CSINN_LAYOUT_O;
+  bias_131->dim[0] = 126;
+  bias_131->dim_count = 1;
+  bias_131->qinfo = (struct csinn_quant_info *)(params_base + 5825241);
+  bias_131->quant_channel = 1;
+  struct csinn_conv2d_params *params_131 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_131->group = 1;
+  params_131->stride_height = 1;
+  params_131->stride_width = 1;
+  params_131->dilation_height = 1;
+  params_131->dilation_width = 1;
+  params_131->conv_extra.kernel_tm = NULL;
+  params_131->conv_extra.conv_mode = CSINN_DIRECT;
+  params_131->pad_top = 0;
+  params_131->pad_left = 0;
+  params_131->pad_down = 0;
+  params_131->pad_right = 0;
+  params_131->base.name = "conv2d_180_fuse_bias_add_181";
+  csinn_conv2d_init(output_85, output_131, kernel_131, bias_131, params_131);
+  int32_t *permute_132 = malloc(4 * 4);
+  permute_132[0] = 0;
+  permute_132[1] = 2;
+  permute_132[2] = 3;
+  permute_132[3] = 1;
+  struct csinn_tensor *output_132 = csinn_alloc_tensor(sess);
+  output_132->name = "output_132";
+  output_132->dtype = CSINN_DTYPE_UINT8;
+  output_132->layout = CSINN_LAYOUT_NCHW;
+  output_132->dim[0] = 1;
+  output_132->dim[1] = 3;
+  output_132->dim[2] = 3;
+  output_132->dim[3] = 126;
+  output_132->dim_count = 4;
+  output_132->qinfo = (struct csinn_quant_info *)(params_base + 5825769);
+  output_132->quant_channel = 1;
+  struct csinn_transpose_params *params_132 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_132->permute = permute_132;
+  params_132->permute_num = 4;
+  params_132->base.name = "transpose_182";
+  csinn_transpose_init(output_131, output_132, params_132);
+  int32_t *shape_133 = malloc(2 * 4);
+  shape_133[0] = 1;
+  shape_133[1] = 1134;
+  struct csinn_tensor *output_133 = csinn_alloc_tensor(sess);
+  output_133->name = "output_133";
+  output_133->dtype = CSINN_DTYPE_UINT8;
+  output_133->layout = CSINN_LAYOUT_NC;
+  output_133->dim[0] = 1;
+  output_133->dim[1] = 1134;
+  output_133->dim_count = 2;
+  output_133->qinfo = (struct csinn_quant_info *)(params_base + 5825793);
+  output_133->quant_channel = 1;
+  struct csinn_reshape_params *params_133 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_133->shape = shape_133;
+  params_133->shape_num = 2;
+  params_133->base.name = "batch_flatten_183";
+  csinn_reshape_init(output_132, output_133, params_133);
+  struct csinn_tensor *output_135 = csinn_alloc_tensor(sess);
+  output_135->name = "output_135";
+  output_135->dtype = CSINN_DTYPE_UINT8;
+  output_135->layout = CSINN_LAYOUT_NC;
+  output_135->dim[0] = 1;
+  output_135->dim[1] = 1134;
+  output_135->dim_count = 2;
+  output_135->qinfo = (struct csinn_quant_info *)(params_base + 5825817);
+  output_135->quant_channel = 1;
+  struct csinn_tensor *rhs_135 = csinn_alloc_tensor(sess);
+  rhs_135->name = "rhs_135";
+  rhs_135->data = params_base + 5825865;
+  rhs_135->is_const = 1;
+  rhs_135->dtype = CSINN_DTYPE_UINT8;
+  rhs_135->layout = CSINN_LAYOUT_OI;
+  rhs_135->dim[0] = 1;
+  rhs_135->dim[1] = 1134;
+  rhs_135->dim_count = 2;
+  rhs_135->qinfo = (struct csinn_quant_info *)(params_base + 5825841);
+  rhs_135->quant_channel = 1;
+  struct csinn_diso_params *params_135 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_135->base.name = "mul_176";
+  csinn_mul_init(output_133, rhs_135, output_135, params_135);
+  struct csinn_tensor *output_137 = csinn_alloc_tensor(sess);
+  output_137->name = "output_137";
+  output_137->dtype = CSINN_DTYPE_UINT8;
+  output_137->layout = CSINN_LAYOUT_NCHW;
+  output_137->dim[0] = 1;
+  output_137->dim[1] = 126;
+  output_137->dim[2] = 2;
+  output_137->dim[3] = 2;
+  output_137->dim_count = 4;
+  output_137->qinfo = (struct csinn_quant_info *)(params_base + 5826999);
+  output_137->quant_channel = 1;
+  struct csinn_tensor *kernel_137 = csinn_alloc_tensor(sess);
+  kernel_137->name = "kernel_137";
+  kernel_137->data = params_base + 5827047;
+  kernel_137->is_const = 1;
+  kernel_137->dtype = CSINN_DTYPE_UINT8;
+  kernel_137->layout = CSINN_LAYOUT_OIHW;
+  kernel_137->dim[0] = 126;
+  kernel_137->dim[1] = 256;
+  kernel_137->dim[2] = 1;
+  kernel_137->dim[3] = 1;
+  kernel_137->dim_count = 4;
+  kernel_137->qinfo = (struct csinn_quant_info *)(params_base + 5827023);
+  kernel_137->quant_channel = 1;
+  struct csinn_tensor *bias_137 = csinn_alloc_tensor(sess);
+  bias_137->name = "bias_137";
+  bias_137->data = params_base + 5859327;
+  bias_137->is_const = 1;
+  bias_137->dtype = CSINN_DTYPE_INT32;
+  bias_137->layout = CSINN_LAYOUT_O;
+  bias_137->dim[0] = 126;
+  bias_137->dim_count = 1;
+  bias_137->qinfo = (struct csinn_quant_info *)(params_base + 5859303);
+  bias_137->quant_channel = 1;
+  struct csinn_conv2d_params *params_137 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_137->group = 1;
+  params_137->stride_height = 1;
+  params_137->stride_width = 1;
+  params_137->dilation_height = 1;
+  params_137->dilation_width = 1;
+  params_137->conv_extra.kernel_tm = NULL;
+  params_137->conv_extra.conv_mode = CSINN_DIRECT;
+  params_137->pad_top = 0;
+  params_137->pad_left = 0;
+  params_137->pad_down = 0;
+  params_137->pad_right = 0;
+  params_137->base.name = "conv2d_184_fuse_bias_add_185";
+  csinn_conv2d_init(output_95, output_137, kernel_137, bias_137, params_137);
+  int32_t *permute_138 = malloc(4 * 4);
+  permute_138[0] = 0;
+  permute_138[1] = 2;
+  permute_138[2] = 3;
+  permute_138[3] = 1;
+  struct csinn_tensor *output_138 = csinn_alloc_tensor(sess);
+  output_138->name = "output_138";
+  output_138->dtype = CSINN_DTYPE_UINT8;
+  output_138->layout = CSINN_LAYOUT_NCHW;
+  output_138->dim[0] = 1;
+  output_138->dim[1] = 2;
+  output_138->dim[2] = 2;
+  output_138->dim[3] = 126;
+  output_138->dim_count = 4;
+  output_138->qinfo = (struct csinn_quant_info *)(params_base + 5859831);
+  output_138->quant_channel = 1;
+  struct csinn_transpose_params *params_138 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_138->permute = permute_138;
+  params_138->permute_num = 4;
+  params_138->base.name = "transpose_186";
+  csinn_transpose_init(output_137, output_138, params_138);
+  int32_t *shape_139 = malloc(2 * 4);
+  shape_139[0] = 1;
+  shape_139[1] = 504;
+  struct csinn_tensor *output_139 = csinn_alloc_tensor(sess);
+  output_139->name = "output_139";
+  output_139->dtype = CSINN_DTYPE_UINT8;
+  output_139->layout = CSINN_LAYOUT_NC;
+  output_139->dim[0] = 1;
+  output_139->dim[1] = 504;
+  output_139->dim_count = 2;
+  output_139->qinfo = (struct csinn_quant_info *)(params_base + 5859855);
+  output_139->quant_channel = 1;
+  struct csinn_reshape_params *params_139 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_139->shape = shape_139;
+  params_139->shape_num = 2;
+  params_139->base.name = "batch_flatten_187";
+  csinn_reshape_init(output_138, output_139, params_139);
+  struct csinn_tensor *output_141 = csinn_alloc_tensor(sess);
+  output_141->name = "output_141";
+  output_141->dtype = CSINN_DTYPE_UINT8;
+  output_141->layout = CSINN_LAYOUT_NC;
+  output_141->dim[0] = 1;
+  output_141->dim[1] = 504;
+  output_141->dim_count = 2;
+  output_141->qinfo = (struct csinn_quant_info *)(params_base + 5859879);
+  output_141->quant_channel = 1;
+  struct csinn_tensor *rhs_141 = csinn_alloc_tensor(sess);
+  rhs_141->name = "rhs_141";
+  rhs_141->data = params_base + 5859927;
+  rhs_141->is_const = 1;
+  rhs_141->dtype = CSINN_DTYPE_UINT8;
+  rhs_141->layout = CSINN_LAYOUT_OI;
+  rhs_141->dim[0] = 1;
+  rhs_141->dim[1] = 504;
+  rhs_141->dim_count = 2;
+  rhs_141->qinfo = (struct csinn_quant_info *)(params_base + 5859903);
+  rhs_141->quant_channel = 1;
+  struct csinn_diso_params *params_141 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_141->base.name = "mul_177";
+  csinn_mul_init(output_139, rhs_141, output_141, params_141);
+  struct csinn_tensor *output_143 = csinn_alloc_tensor(sess);
+  output_143->name = "output_143";
+  output_143->dtype = CSINN_DTYPE_UINT8;
+  output_143->layout = CSINN_LAYOUT_NCHW;
+  output_143->dim[0] = 1;
+  output_143->dim[1] = 126;
+  output_143->dim[2] = 1;
+  output_143->dim[3] = 1;
+  output_143->dim_count = 4;
+  output_143->qinfo = (struct csinn_quant_info *)(params_base + 5860431);
+  output_143->quant_channel = 1;
+  struct csinn_tensor *kernel_143 = csinn_alloc_tensor(sess);
+  kernel_143->name = "kernel_143";
+  kernel_143->data = params_base + 5860479;
+  kernel_143->is_const = 1;
+  kernel_143->dtype = CSINN_DTYPE_UINT8;
+  kernel_143->layout = CSINN_LAYOUT_OIHW;
+  kernel_143->dim[0] = 126;
+  kernel_143->dim[1] = 128;
+  kernel_143->dim[2] = 1;
+  kernel_143->dim[3] = 1;
+  kernel_143->dim_count = 4;
+  kernel_143->qinfo = (struct csinn_quant_info *)(params_base + 5860455);
+  kernel_143->quant_channel = 1;
+  struct csinn_tensor *bias_143 = csinn_alloc_tensor(sess);
+  bias_143->name = "bias_143";
+  bias_143->data = params_base + 5876631;
+  bias_143->is_const = 1;
+  bias_143->dtype = CSINN_DTYPE_INT32;
+  bias_143->layout = CSINN_LAYOUT_O;
+  bias_143->dim[0] = 126;
+  bias_143->dim_count = 1;
+  bias_143->qinfo = (struct csinn_quant_info *)(params_base + 5876607);
+  bias_143->quant_channel = 1;
+  struct csinn_conv2d_params *params_143 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
+  params_143->group = 1;
+  params_143->stride_height = 1;
+  params_143->stride_width = 1;
+  params_143->dilation_height = 1;
+  params_143->dilation_width = 1;
+  params_143->conv_extra.kernel_tm = NULL;
+  params_143->conv_extra.conv_mode = CSINN_DIRECT;
+  params_143->pad_top = 0;
+  params_143->pad_left = 0;
+  params_143->pad_down = 0;
+  params_143->pad_right = 0;
+  params_143->base.name = "conv2d_188_fuse_bias_add_189";
+  csinn_conv2d_init(output_105, output_143, kernel_143, bias_143, params_143);
+  int32_t *permute_144 = malloc(4 * 4);
+  permute_144[0] = 0;
+  permute_144[1] = 2;
+  permute_144[2] = 3;
+  permute_144[3] = 1;
+  struct csinn_tensor *output_144 = csinn_alloc_tensor(sess);
+  output_144->name = "output_144";
+  output_144->dtype = CSINN_DTYPE_UINT8;
+  output_144->layout = CSINN_LAYOUT_NCHW;
+  output_144->dim[0] = 1;
+  output_144->dim[1] = 1;
+  output_144->dim[2] = 1;
+  output_144->dim[3] = 126;
+  output_144->dim_count = 4;
+  output_144->qinfo = (struct csinn_quant_info *)(params_base + 5877135);
+  output_144->quant_channel = 1;
+  struct csinn_transpose_params *params_144 = csinn_alloc_params(sizeof(struct csinn_transpose_params), sess);
+  params_144->permute = permute_144;
+  params_144->permute_num = 4;
+  params_144->base.name = "transpose_190";
+  csinn_transpose_init(output_143, output_144, params_144);
+  int32_t *shape_145 = malloc(2 * 4);
+  shape_145[0] = 1;
+  shape_145[1] = 126;
+  struct csinn_tensor *output_145 = csinn_alloc_tensor(sess);
+  output_145->name = "output_145";
+  output_145->dtype = CSINN_DTYPE_UINT8;
+  output_145->layout = CSINN_LAYOUT_NC;
+  output_145->dim[0] = 1;
+  output_145->dim[1] = 126;
+  output_145->dim_count = 2;
+  output_145->qinfo = (struct csinn_quant_info *)(params_base + 5877159);
+  output_145->quant_channel = 1;
+  struct csinn_reshape_params *params_145 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_145->shape = shape_145;
+  params_145->shape_num = 2;
+  params_145->base.name = "batch_flatten_191";
+  csinn_reshape_init(output_144, output_145, params_145);
+  struct csinn_tensor *output_147 = csinn_alloc_tensor(sess);
+  output_147->name = "output_147";
+  output_147->dtype = CSINN_DTYPE_UINT8;
+  output_147->layout = CSINN_LAYOUT_NC;
+  output_147->dim[0] = 1;
+  output_147->dim[1] = 126;
+  output_147->dim_count = 2;
+  output_147->qinfo = (struct csinn_quant_info *)(params_base + 5877183);
+  output_147->quant_channel = 1;
+  struct csinn_tensor *rhs_147 = csinn_alloc_tensor(sess);
+  rhs_147->name = "rhs_147";
+  rhs_147->data = params_base + 5877231;
+  rhs_147->is_const = 1;
+  rhs_147->dtype = CSINN_DTYPE_UINT8;
+  rhs_147->layout = CSINN_LAYOUT_OI;
+  rhs_147->dim[0] = 1;
+  rhs_147->dim[1] = 126;
+  rhs_147->dim_count = 2;
+  rhs_147->qinfo = (struct csinn_quant_info *)(params_base + 5877207);
+  rhs_147->quant_channel = 1;
+  struct csinn_diso_params *params_147 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
+  params_147->base.name = "mul_178";
+  csinn_mul_init(output_145, rhs_147, output_147, params_147);
+  struct csinn_tensor *input_149[6];
+  struct csinn_tensor *output_149 = csinn_alloc_tensor(sess);
+  output_149->name = "output_149";
+  output_149->dtype = CSINN_DTYPE_UINT8;
+  output_149->layout = CSINN_LAYOUT_NC;
+  output_149->dim[0] = 1;
+  output_149->dim[1] = 40257;
+  output_149->dim_count = 2;
+  output_149->qinfo = (struct csinn_quant_info *)(params_base + 5877357);
+  output_149->quant_channel = 1;
+  struct csinn_concat_params *params_149 = csinn_alloc_params(sizeof(struct csinn_concat_params), sess);
+  params_149->inputs_count = 6;
+  params_149->axis = 1;
+  params_149->base.name = "concatenate_192";
+  csinn_concat_init(input_149, output_149, params_149);
+  int32_t *shape_150 = malloc(3 * 4);
+  shape_150[0] = 1;
+  shape_150[1] = 1917;
+  shape_150[2] = 21;
+  struct csinn_tensor *output_150 = csinn_alloc_tensor(sess);
+  output_150->name = "output_150";
+  output_150->dtype = CSINN_DTYPE_UINT8;
+  output_150->layout = CSINN_LAYOUT_NCW;
+  output_150->dim[0] = 1;
+  output_150->dim[1] = 1917;
+  output_150->dim[2] = 21;
+  output_150->dim_count = 3;
+  output_150->qinfo = (struct csinn_quant_info *)(params_base + 5877381);
+  output_150->quant_channel = 1;
+  struct csinn_reshape_params *params_150 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_150->shape = shape_150;
+  params_150->shape_num = 3;
+  params_150->base.name = "reshape_193";
+  csinn_reshape_init(output_149, output_150, params_150);
+  struct csinn_tensor *output_151 = csinn_alloc_tensor(sess);
+  output_151->name = "output_151";
+  output_151->dtype = CSINN_DTYPE_UINT8;
+  output_151->layout = CSINN_LAYOUT_NCW;
+  output_151->dim[0] = 1;
+  output_151->dim[1] = 1917;
+  output_151->dim[2] = 21;
+  output_151->dim_count = 3;
+  output_151->qinfo = (struct csinn_quant_info *)(params_base + 5877405);
+  output_151->quant_channel = 1;
+  struct csinn_softmax_params *params_151 = csinn_alloc_params(sizeof(struct csinn_softmax_params), sess);
+  params_151->axis = 2;
+  params_151->base.name = "softmax_194";
+  csinn_softmax_init(output_150, output_151, params_151);
+  int32_t *shape_152 = malloc(2 * 4);
+  shape_152[0] = 1;
+  shape_152[1] = 40257;
+  struct csinn_tensor *output_152 = csinn_alloc_tensor(sess);
+  output_152->name = "batch_flatten_195_152";
+  output_152->dtype = CSINN_DTYPE_UINT8;
+  output_152->layout = CSINN_LAYOUT_NC;
+  output_152->dim[0] = 1;
+  output_152->dim[1] = 40257;
+  output_152->dim_count = 2;
+  output_152->qinfo = (struct csinn_quant_info *)(params_base + 5877429);
+  output_152->quant_channel = 1;
+  struct csinn_reshape_params *params_152 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
+  params_152->shape = shape_152;
+  params_152->shape_num = 2;
+  params_152->base.name = "batch_flatten_195";
+  csinn_reshape_init(output_151, output_152, params_152);
+  data->mtype = CSINN_MEM_TYPE_DMABUF;
+  csinn_set_tensor_entry(data, sess);
+  csinn_set_input(0, data, sess);
+
+  csinn_mul(data, rhs_1, output_1, params_1);
+  csinn_add(output_1, rhs_4, output_4, params_4);
+  csinn_conv2d(output_4, output_6, kernel_6, bias_6, params_6);
+  csinn_relu(output_6, output_7, params_7);
+  csinn_conv2d(output_7, output_8, kernel_8, bias_8, params_8);
+  csinn_relu(output_8, output_9, params_9);
+  csinn_conv2d(output_9, output_10, kernel_10, bias_10, params_10);
+  csinn_relu(output_10, output_11, params_11);
+  csinn_conv2d(output_11, output_12, kernel_12, bias_12, params_12);
+  csinn_relu(output_12, output_13, params_13);
+  csinn_conv2d(output_13, output_14, kernel_14, bias_14, params_14);
+  csinn_relu(output_14, output_15, params_15);
+  csinn_conv2d(output_15, output_16, kernel_16, bias_16, params_16);
+  csinn_relu(output_16, output_17, params_17);
+  csinn_conv2d(output_17, output_18, kernel_18, bias_18, params_18);
+  csinn_relu(output_18, output_19, params_19);
+  csinn_conv2d(output_19, output_20, kernel_20, bias_20, params_20);
+  csinn_relu(output_20, output_21, params_21);
+  csinn_conv2d(output_21, output_22, kernel_22, bias_22, params_22);
+  csinn_relu(output_22, output_23, params_23);
+  csinn_conv2d(output_23, output_24, kernel_24, bias_24, params_24);
+  csinn_relu(output_24, output_25, params_25);
+  csinn_conv2d(output_25, output_26, kernel_26, bias_26, params_26);
+  csinn_relu(output_26, output_27, params_27);
+  csinn_conv2d(output_27, output_28, kernel_28, bias_28, params_28);
+  csinn_relu(output_28, output_29, params_29);
+  csinn_conv2d(output_29, output_30, kernel_30, bias_30, params_30);
+  csinn_relu(output_30, output_31, params_31);
+  csinn_conv2d(output_31, output_32, kernel_32, bias_32, params_32);
+  csinn_relu(output_32, output_33, params_33);
+  csinn_conv2d(output_33, output_34, kernel_34, bias_34, params_34);
+  csinn_relu(output_34, output_35, params_35);
+  csinn_conv2d(output_35, output_36, kernel_36, bias_36, params_36);
+  csinn_relu(output_36, output_37, params_37);
+  csinn_conv2d(output_37, output_38, kernel_38, bias_38, params_38);
+  csinn_relu(output_38, output_39, params_39);
+  csinn_conv2d(output_39, output_40, kernel_40, bias_40, params_40);
+  csinn_relu(output_40, output_41, params_41);
+  csinn_conv2d(output_41, output_42, kernel_42, bias_42, params_42);
+  csinn_relu(output_42, output_43, params_43);
+  csinn_conv2d(output_43, output_44, kernel_44, bias_44, params_44);
+  csinn_relu(output_44, output_45, params_45);
+  csinn_conv2d(output_45, output_46, kernel_46, bias_46, params_46);
+  csinn_relu(output_46, output_47, params_47);
+  csinn_conv2d(output_47, output_48, kernel_48, bias_48, params_48);
+  csinn_relu(output_48, output_49, params_49);
+  csinn_conv2d(output_49, output_50, kernel_50, bias_50, params_50);
+  csinn_relu(output_50, output_51, params_51);
+  csinn_conv2d(output_51, output_52, kernel_52, bias_52, params_52);
+  csinn_transpose(output_52, output_53, params_53);
+  csinn_reshape(output_53, output_54, params_54);
+  csinn_mul(output_54, rhs_56, output_56, params_56);
+  csinn_conv2d(output_51, output_58, kernel_58, bias_58, params_58);
+  csinn_relu(output_58, output_59, params_59);
+  csinn_conv2d(output_59, output_60, kernel_60, bias_60, params_60);
+  csinn_relu(output_60, output_61, params_61);
+  csinn_conv2d(output_61, output_62, kernel_62, bias_62, params_62);
+  csinn_relu(output_62, output_63, params_63);
+  csinn_conv2d(output_63, output_64, kernel_64, bias_64, params_64);
+  csinn_relu(output_64, output_65, params_65);
+  csinn_conv2d(output_65, output_66, kernel_66, bias_66, params_66);
+  csinn_transpose(output_66, output_67, params_67);
+  csinn_reshape(output_67, output_68, params_68);
+  csinn_mul(output_68, rhs_70, output_70, params_70);
+  csinn_conv2d(output_65, output_72, kernel_72, bias_72, params_72);
+  csinn_relu(output_72, output_73, params_73);
+  csinn_conv2d(output_73, output_74, kernel_74, bias_74, params_74);
+  csinn_relu(output_74, output_75, params_75);
+  csinn_conv2d(output_75, output_76, kernel_76, bias_76, params_76);
+  csinn_transpose(output_76, output_77, params_77);
+  csinn_reshape(output_77, output_78, params_78);
+  csinn_mul(output_78, rhs_80, output_80, params_80);
+  csinn_conv2d(output_75, output_82, kernel_82, bias_82, params_82);
+  csinn_relu(output_82, output_83, params_83);
+  csinn_conv2d(output_83, output_84, kernel_84, bias_84, params_84);
+  csinn_relu(output_84, output_85, params_85);
+  csinn_conv2d(output_85, output_86, kernel_86, bias_86, params_86);
+  csinn_transpose(output_86, output_87, params_87);
+  csinn_reshape(output_87, output_88, params_88);
+  csinn_mul(output_88, rhs_90, output_90, params_90);
+  csinn_conv2d(output_85, output_92, kernel_92, bias_92, params_92);
+  csinn_relu(output_92, output_93, params_93);
+  csinn_conv2d(output_93, output_94, kernel_94, bias_94, params_94);
+  csinn_relu(output_94, output_95, params_95);
+  csinn_conv2d(output_95, output_96, kernel_96, bias_96, params_96);
+  csinn_transpose(output_96, output_97, params_97);
+  csinn_reshape(output_97, output_98, params_98);
+  csinn_mul(output_98, rhs_100, output_100, params_100);
+  csinn_conv2d(output_95, output_102, kernel_102, bias_102, params_102);
+  csinn_relu(output_102, output_103, params_103);
+  csinn_conv2d(output_103, output_104, kernel_104, bias_104, params_104);
+  csinn_relu(output_104, output_105, params_105);
+  csinn_conv2d(output_105, output_106, kernel_106, bias_106, params_106);
+  csinn_transpose(output_106, output_107, params_107);
+  csinn_reshape(output_107, output_108, params_108);
+  csinn_mul(output_108, rhs_110, output_110, params_110);
+  input_112[0] = output_56;
+  input_112[1] = output_70;
+  input_112[2] = output_80;
+  input_112[3] = output_90;
+  input_112[4] = output_100;
+  input_112[5] = output_110;
+  csinn_concat(input_112, output_112, params_112);
+  csinn_conv2d(output_51, output_113, kernel_113, bias_113, params_113);
+  csinn_transpose(output_113, output_114, params_114);
+  csinn_reshape(output_114, output_115, params_115);
+  csinn_mul(output_115, rhs_117, output_117, params_117);
+  csinn_conv2d(output_65, output_119, kernel_119, bias_119, params_119);
+  csinn_transpose(output_119, output_120, params_120);
+  csinn_reshape(output_120, output_121, params_121);
+  csinn_mul(output_121, rhs_123, output_123, params_123);
+  csinn_conv2d(output_75, output_125, kernel_125, bias_125, params_125);
+  csinn_transpose(output_125, output_126, params_126);
+  csinn_reshape(output_126, output_127, params_127);
+  csinn_mul(output_127, rhs_129, output_129, params_129);
+  csinn_conv2d(output_85, output_131, kernel_131, bias_131, params_131);
+  csinn_transpose(output_131, output_132, params_132);
+  csinn_reshape(output_132, output_133, params_133);
+  csinn_mul(output_133, rhs_135, output_135, params_135);
+  csinn_conv2d(output_95, output_137, kernel_137, bias_137, params_137);
+  csinn_transpose(output_137, output_138, params_138);
+  csinn_reshape(output_138, output_139, params_139);
+  csinn_mul(output_139, rhs_141, output_141, params_141);
+  csinn_conv2d(output_105, output_143, kernel_143, bias_143, params_143);
+  csinn_transpose(output_143, output_144, params_144);
+  csinn_reshape(output_144, output_145, params_145);
+  csinn_mul(output_145, rhs_147, output_147, params_147);
+  input_149[0] = output_117;
+  input_149[1] = output_123;
+  input_149[2] = output_129;
+  input_149[3] = output_135;
+  input_149[4] = output_141;
+  input_149[5] = output_147;
+  csinn_concat(input_149, output_149, params_149);
+  csinn_reshape(output_149, output_150, params_150);
+  csinn_softmax(output_150, output_151, params_151);
+  csinn_reshape(output_151, output_152, params_152);
+  csinn_set_output(0, output_112, sess);
+  csinn_set_output(1, output_152, sess);
+  shl_pnna_set_input_strides(sess, 1, 304 ,304);
+
+  csinn_session_setup(sess);
+  return sess;
+}
+void csinn_run(void* data0, void *sess) {
+  struct csinn_tensor input_tensor;
+  input_tensor.data = data0;
+  csinn_update_input(0, &input_tensor, sess);
+  csinn_session_run(sess);
+}
+
+struct csinn_session *csinn_import_binary_model(char *bm_addr) {
+  struct shl_binary_model_section_info *sinfo = (struct shl_binary_model_section_info *)(bm_addr + 4096);
+  struct csinn_session *bm_sess = (struct csinn_session *)(bm_addr + sinfo->sections->info_offset * 4096);
+  struct csinn_session *sess = csinn_alloc_session();
+  shl_bm_session_load(sess, bm_sess);
+  sess->model.bm_addr = bm_addr + sinfo->sections->graph_offset * 4096;
+  sess->model.bm_size = sinfo->sections->graph_size;
+  shl_pnna_set_input_strides(sess, 1, 304 ,304);
+  csinn_load_binary_model(sess);
+  return sess;
+}

+ 622 - 0
test/face_detect/npu_sink_src_test.c

@@ -0,0 +1,622 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <memory.h>
+#include "process_linker_types.h"
+#include "io.h"
+#include "shl_ref.h"
+#include "process.h"
+#include "video_mem.h"
+
+#include "detect.h"
+#include "output_120_out0_nchw_1_2_7668_1.h"
+
+#define MODULE_NAME "npu_sink_src_test"
+
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+#define FILE_LENGTH         1028
+#define SHAPE_LENGHT        128
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+void *csinn_(char *params);
+void csinn_run(void *data0,  void *td);
+void *csinn_nbg(const char *nbg_file_name);
+
+#define OUT_SIZE1 (1*7668)
+#define OUT_SIZE2 (1917*21)
+int input_size[] = {1 * 3 * 304 * 304, };
+int output_size[] = {OUT_SIZE1, OUT_SIZE1 * 2, 1 * OUT_SIZE2, };
+const char model_name[] = "network";
+
+#define R_MEAN  127.5
+#define G_MEAN  127.5
+#define B_MEAN  127.5
+#define SCALE   (1.0/127.5)
+
+float mean[] = {B_MEAN, G_MEAN, R_MEAN};
+
+const char class_name[][FILE_LENGTH] = {
+    "background", "aeroplane", "bicycle", "bird", "boat",
+    "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
+    "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
+    "sofa", "train", "tvmonitor"
+};
+
+const char shm_sink_name[] = "/ispnpu";
+const char shm_src_name[] = "/npu_g2d";
+
+#define BASE_MEMORY 0xD8000000
+
+typedef struct _ServerParams
+{
+    char *plinkname;
+    int count;
+    int frames;
+} ServerParams;
+
+typedef struct _PlinkChannel
+{
+    PlinkChannelID id;
+    PlinkHandle plink;
+    PlinkPacket pkt;
+    int sendid;
+    int backid;
+    int exit;
+    int available_bufs;
+} PlinkChannel;
+
+typedef struct _FeatureBuffer {
+    unsigned int bus_address;
+    void *virtual_address;
+    unsigned int size;
+} FeatureBuffer;
+
+
+void parseParams(int argc, char **argv, ServerParams *params)
+{
+    int i = 1;
+    memset(params, 0, sizeof(*params));
+    params->plinkname = "/tmp/plink_npu_featuremap.test";
+    params->frames = 3;
+    while (i < argc)
+    {
+        if (argv[i][0] != '-' || strlen(argv[i]) < 2)
+        {
+            i++;
+            continue;
+        }
+
+        if (argv[i][1] == 'l')
+        {
+            if (++i < argc)
+            {
+                params->plinkname = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'n')
+        {
+            if (++i < argc)
+            {
+                params->frames = atoi(argv[i++]);
+            }
+        }
+    };
+}
+
+int checkParams(ServerParams *params)
+{
+    if (params->plinkname == NULL)
+        return -1;
+    return 0;
+}
+
+void printUsage(char *name)
+{
+    printf("usage: %s [options]\n"
+           "\n"
+           "  Available options:\n"
+           "    -l      plink file name (default: /tmp/plink.test)\n"
+           "    -i      input YUV file name (mandatory)\n"
+           "    -f      input color format (default: 2)\n"
+           "                2 - I420\n"
+           "                3 - NV12\n"
+           "    -w      video width (mandatory)\n"
+           "    -h      video height (mandatory)\n"
+           "    -s      video buffer stride (default: video width)\n"
+           "    -n      number of frames to send (default: 10)\n"
+           "\n", name);
+}
+
+int getBufferSize(ServerParams *params)
+{
+    return sizeof(PlinkObjectDetect);
+}
+
+void constructFeatureInfo(PlinkObjectInfo *info, ServerParams *params, unsigned int bus_address, int face_cnt, int id)
+{
+    info->header.type = PLINK_TYPE_OBJECT;
+    info->header.size = DATA_SIZE(*info);
+    info->header.id = id + 1;
+
+    info->bus_address = bus_address;
+    info->object_cnt = face_cnt;
+}
+
+int getBufferCount(PlinkPacket *pkt)
+{
+    int ret = 0;
+    for (int i = 0; i < pkt->num; i++)
+    {
+        PlinkDescHdr *hdr = (PlinkDescHdr *)(pkt->list[i]);
+        if (hdr->type == PLINK_TYPE_MESSAGE)
+        {
+            int *data = (int *)(pkt->list[i] + DATA_HEADER_SIZE);
+            if (*data == PLINK_EXIT_CODE)
+            {
+                ret |= 0x80000000; // set bit 31 to 1 to indicate 'exit'
+            }
+            else if (*data >= 0)
+                ret++;
+        }
+    }
+
+    return ret;
+}
+
+void retreiveSentBuffers(PlinkHandle plink, PlinkChannel *channel)
+{
+    PlinkStatus sts = PLINK_STATUS_OK;
+    while (channel->available_bufs < NUM_OF_BUFFERS)
+    {
+        do
+        {
+            sts = PLINK_recv(plink, channel->id, &channel->pkt);
+            int count = getBufferCount(&channel->pkt);
+            if (count > 0)
+            {
+                channel->available_bufs += count;
+            }
+        } while (sts == PLINK_STATUS_MORE_DATA);
+    }
+}
+
+
+void AllocateBuffers(FeatureBuffer featurebuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  unsigned int bus_address = BASE_MEMORY;
+  unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    featurebuffers[i].virtual_address = mmap(0, buffer_size, PROT_READ | PROT_WRITE,
+                                        MAP_SHARED, fd_mem,
+                                        bus_address);
+    printf("mmap %p from %x with size %d\n", featurebuffers[i].virtual_address, bus_address, size);
+    featurebuffers[i].bus_address = bus_address;
+    featurebuffers[i].size = buffer_size;
+    bus_address += buffer_size;
+  }
+}
+
+void FreeBuffers(FeatureBuffer featurebuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    munmap(featurebuffers[i].virtual_address, featurebuffers[i].size);
+  }
+}
+
+static void print_tensor_info(struct csinn_tensor *t) {
+    printf("\n=== tensor info ===\n");
+    printf("shape: ");
+    for (int j = 0; j < t->dim_count; j++) {
+        printf("%d ", t->dim[j]);
+    }
+    printf("\n");
+    if (t->dtype == CSINN_DTYPE_UINT8) {
+        printf("scale: %f\n", t->qinfo->scale);
+        printf("zero point: %d\n", t->qinfo->zero_point);
+    }
+    printf("data pointer: %p\n", t->data);
+}
+
+
+/*
+ * Postprocess function
+ */
+static void postprocess(void *sess, const char *filename_prefix, BBoxOut *out, int *num) {
+    int output_num, input_num;
+    struct csinn_tensor *input = csinn_alloc_tensor(NULL);
+    struct csinn_tensor *output = csinn_alloc_tensor(NULL);
+
+    //input_num = csinn_get_input_number(sess);
+    //for (int i = 0; i < input_num; i++) {
+    //    input->data = NULL;
+    //    csinn_get_input(i, input, sess);
+    //    //print_tensor_info(input);
+        
+    //    struct csi_tensor *finput = shl_ref_tensor_transform_f32(input);
+    //    char filename[FILE_LENGTH] = {0};
+    //    char shape[SHAPE_LENGHT] = {0};
+    //    shape2string(input->dim, input->dim_count, shape, SHAPE_LENGHT);
+    //    snprintf(filename, FILE_LENGTH, "%s_input%u_%s.txt", filename_prefix, i, shape);
+    //    int input_size = csi_tensor_size(input);
+    	//printf("input_size: %d\n", input_size);
+        //save_data_to_file(filename, (float*)finput->data, input_size);
+    //}
+
+    float *location = (float*)malloc(OUT_SIZE1*sizeof(float));
+    float *confidence = (float*)malloc(OUT_SIZE2*sizeof(float));
+
+    output_num = csinn_get_output_number(sess);
+    for (int i = 0; i < output_num; i++) {
+        output->data = NULL;
+        csinn_get_output(i, output, sess);
+        //print_tensor_info(output);
+
+        struct csinn_tensor *foutput = shl_ref_tensor_transform_f32(output);
+        //shl_show_top5(foutput, sess);
+        //char filename[FILE_LENGTH] = {0};
+        //char shape[SHAPE_LENGHT] = {0};
+        //shape2string(output->dim, output->dim_count, shape, SHAPE_LENGHT);
+        //snprintf(filename, FILE_LENGTH, "%s_output%u_%s.txt", filename_prefix, i, shape);
+        //int output_size = csinn_tensor_size(foutput);
+        //save_data_to_file(filename, (float*)foutput->data, output_size);
+
+        if (i == 0)
+            memcpy(location, (float *)foutput->data, OUT_SIZE1*sizeof(float));
+        if (i == 1)
+            memcpy(confidence, (float *)foutput->data, OUT_SIZE2*sizeof(float));
+
+        shl_ref_tensor_transform_free_f32(foutput);
+    }
+
+    //BBoxOut out[100];
+    BBox gbboxes[num_prior];
+
+    *num = ssdforward(location, confidence, priorbox, gbboxes, out);
+
+    free(location);
+    free(confidence);
+
+    printf("%d\n", *num);
+    for (int i = 0; i < *num; i++) {
+        fprintf(stderr, "%d, label=%s, score=%f, x1=%f, y1=%f, x2=%f, y2=%f\n", out[i].label, class_name[out[i].label],
+             out[i].score, out[i].xmin, out[i].ymin, out[i].xmax, out[i].ymax);
+    }
+
+    csinn_free_tensor(input);
+    csinn_free_tensor(output);
+}
+
+void *create_graph(char *params_path) {
+    void *ret;
+    int binary_size;
+    char *params = get_binary_from_file(params_path, &binary_size);
+    if (params == NULL) {
+        return NULL;
+    }
+
+    char *suffix = params_path + (strlen(params_path) - 7);
+    if (strcmp(suffix, ".params") == 0) {
+        // create general graph
+        ret = csinn_(params);
+        free(params);
+        return ret;
+    }
+
+    suffix = params_path + (strlen(params_path) - 3);
+    if (strcmp(suffix, ".bm") == 0) {
+        struct shl_bm_sections *section = (struct shl_bm_sections *)(params + 4128);
+        if (section->graph_offset) {
+            ret = csinn_import_binary_model(params);
+            free(params);
+            return ret;
+        } else {
+            ret = csinn_(params + section->params_offset * 4096);
+            free(params);
+            return ret;
+        }
+    } else {
+        free(params);
+        return NULL;
+    }
+}
+
+bool is_looping = true;
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    char *params_path = NULL;
+    int input_num = 1;
+    int output_num = 3;
+    int input_group_num = 1;
+    int index = 0;
+    int export_index = 0;
+    PlinkStatus sts = PLINK_STATUS_OK;
+    PlinkPacket sendpkt, recvpkt;
+    PlinkMsg msg;
+    PlinkHandle plink_npu_sink = NULL;
+    PlinkHandle plink_npu_src = NULL;
+    ServerParams params;
+    VmemParams vmem_params;
+    void *vmem = NULL;
+    PlinkChannel channel[2];
+    PlinkObjectInfo feat_map;
+    FILE *fp = NULL;
+    int exitcode = 0;
+    int i;
+
+    if (argc < (2 + input_num)) {
+        printf("Please set valide args: ./model.elf model.params "
+                "[tensor1/image1 ...] [tensor2/image2 ...]\n");
+        return -1;
+    } else {
+        if (argc == 3 && get_file_type(argv[2]) == FILE_TXT) {
+            data_path = read_string_from_file(argv[2], &input_group_num);
+            input_group_num /= input_num;
+        } else {
+            data_path = argv + 2;
+            input_group_num = (argc - 2) / input_num;
+        }
+    }
+
+    parseParams(argc, argv, &params);
+    if (checkParams(&params) != 0)
+    {
+        printUsage(argv[0]);
+        return 0;
+    }
+
+	void *sess = create_graph(argv[1]);
+
+    uint8_t *input[input_num];
+    float *finput[input_num];
+    char filename_prefix[FILE_LENGTH] = {0};
+    void *input_aligned[input_num];
+    for (i = 0; i < input_num; i++) {
+        input_aligned[i] = shl_mem_alloc_aligned(input_size[i], 0);
+    }
+    uint64_t start_time, end_time;
+    uint64_t _start_time, _end_time;
+    
+    //int frames = argc > 1 ? atoi(argv[1]) : 1000;
+    int frames = 1000;
+    //char *dumpname = argc > 2 ? argv[2] : NULL;
+    char *dumpname = NULL;
+    if (dumpname != NULL)
+    {
+        fp = fopen(dumpname, "wb");
+        if (fp == NULL)
+            errExit("fopen");
+    }
+
+    if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+        errExit("Failed to create VMEM.");
+
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem == -1) {
+      printf("ERROR: failed to open: %s\n", "/dev/mem");
+      return -1;
+    }
+
+    int fd_src_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_src_mem < 0) {
+        printf("%s: failed to open /dev/mem", MODULE_NAME);
+        return -1;
+    }
+
+    BBoxOut out[100];
+    int out_num = 0;
+    FeatureBuffer featurebuffers[NUM_OF_BUFFERS];
+    AllocateBuffers(featurebuffers, 100 * sizeof(struct _FeatureBuffer), fd_mem); 
+
+    printf("PLINK begin to create & connect /tmp/plink_npu_rgb.test\n");
+    if (PLINK_create(&plink_npu_sink, "/tmp/plink_npu_rgb.test", PLINK_MODE_CLIENT) != PLINK_STATUS_OK)
+        errExit("Failed to create PLINK.");
+
+    if (PLINK_connect(plink_npu_sink, NULL) != PLINK_STATUS_OK)
+        errExit("Failed to connect to server.");
+
+    printf("PLINK begin to create & connect %s\n", params.plinkname);
+    sts = PLINK_create(&plink_npu_src, params.plinkname, PLINK_MODE_SERVER);
+
+    memset(&channel[0], 0, sizeof(channel[0]));
+    channel[0].available_bufs = NUM_OF_BUFFERS;
+    sts = PLINK_connect(plink_npu_src, &channel[0].id);
+    
+    printf("begin to loops\n");
+    int frmcnt = 0;
+    // while loop to receive shm picture
+    do {
+        sts = PLINK_recv(plink_npu_sink, 0, &recvpkt);
+        memset(&vmem_params, 0, sizeof(vmem_params));
+        if (recvpkt.fd != PLINK_INVALID_FD) { // dmabuf fd
+            vmem_params.fd = recvpkt.fd;
+            if (VMEM_import(vmem, &vmem_params) != VMEM_STATUS_OK)
+                errExit("Failed to import fd.");
+            printf("%s: recvpkt.fd<%d> phy<0x%lx>\n", MODULE_NAME, vmem_params.fd, vmem_params.phy_address);
+        }
+
+        for (i = 0; i < recvpkt.num; i++) {
+            PlinkDescHdr *hdr = (PlinkDescHdr *)(recvpkt.list[i]);
+            if (hdr->type == PLINK_TYPE_2D_RGB) {
+                uint8_t *vaddr = 0;
+                PlinkRGBInfo *pic = (PlinkRGBInfo *)(recvpkt.list[i]);
+                printf("[CLIENT] Received frame %d 0x%010llx: %dx%d, stride = %d\n", 
+                        pic->header.id, pic->bus_address_b, 
+                        pic->img_width, pic->img_height,
+                        pic->stride_b);
+                int size_stride = 3 * pic->stride_b * pic->stride_b;
+                if (recvpkt.fd == PLINK_INVALID_FD) { // physical address
+                    vaddr = (uint8_t *) mmap(0, size_stride, PROT_READ | PROT_WRITE,
+                        MAP_SHARED, fd_mem, pic->bus_address_b);
+                }
+
+                // return the buffer to source
+                msg.header.type = PLINK_TYPE_MESSAGE;
+                msg.header.size = DATA_SIZE(PlinkMsg);
+                msg.msg = hdr->id;
+                sendpkt.list[0] = &msg;
+                sendpkt.num = 1;
+                sendpkt.fd = PLINK_INVALID_FD;
+                if (PLINK_send(plink_npu_sink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                    errExit("Failed to send data.");
+
+                _start_time = shl_get_timespec();
+                if (recvpkt.fd == PLINK_INVALID_FD) { // physical address
+                    csinn_run(vaddr,  sess);
+                } else {
+                    csinn_run(&vmem_params.fd,  sess);
+                }
+                _end_time = shl_get_timespec();
+                printf("Run graph execution time: %.5fms, FPS=%.2f\n", ((float)(_end_time-_start_time))/1000000,
+                    1000000000.0/((float)(_end_time-_start_time)));
+
+                //snprintf(filename_prefix, FILE_LENGTH, "%s", basename(data_path[i * input_num]));
+                _start_time = shl_get_timespec();
+                postprocess(sess, filename_prefix, out, &out_num);
+                _end_time = shl_get_timespec();
+                printf("postProcess execution time: %.5fms\n", ((float)(_end_time-_start_time))/1000000);
+
+                if (recvpkt.fd == PLINK_INVALID_FD) // physical address
+                    munmap(vaddr, size_stride);
+
+                int sendid = channel[0].sendid;
+                int face_cnt = 0;
+                float *ptr = (float *)featurebuffers[sendid].virtual_address;
+                for (int i = 0; i < out_num; i++) {
+                   if (strcmp(class_name[out[i].label], "person") == 0) {
+		       // ignore rects too low or too fat
+		       //if ((out[i].ymin > 0.3) || ((out[i].ymax - out[i].ymin) < (out[i].xmax - out[i].xmin)))
+			//       continue;
+                       face_cnt++;
+                       *ptr++ = out[i].score;
+                       *ptr++ = out[i].xmin;
+                       *ptr++ = out[i].ymin;
+                       *ptr++ = out[i].xmax;
+                       *ptr++ = out[i].ymax;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                       *ptr++ = -1;
+                   }
+                }
+
+                if (face_cnt > 0) {
+                    constructFeatureInfo(&feat_map, &params, featurebuffers[sendid].bus_address, face_cnt, sendid);
+                    printf("[FeatureMap SERVER] Processed frame %d 0x%08x: %d\n",
+                        sendid, feat_map.bus_address, face_cnt);
+
+                    channel[0].pkt.list[0] = &feat_map;
+                    channel[0].pkt.num = 1;
+                    //channel[0].pkt.fd = picbuffers[sendid].fd;
+                    channel[0].pkt.fd = PLINK_INVALID_FD; // physical address
+                    sts = PLINK_send(plink_npu_src, channel[0].id, &channel[0].pkt);
+                    channel[0].sendid = (channel[0].sendid + 1) % NUM_OF_BUFFERS;
+                    channel[0].available_bufs -= 1;
+                    // Notify npu one picture is ready for inference
+                    int timeout = 0;
+                    if (channel[0].available_bufs == 0)
+                        timeout = 60000; // wait up to 60 seconds if buffers are used up
+
+                    if (PLINK_wait(plink_npu_src, channel[0].id, timeout) == PLINK_STATUS_OK)
+                    {
+                        do
+                        {
+                            sts = PLINK_recv(plink_npu_src, channel[0].id, &channel[0].pkt);
+                            int count = getBufferCount(&channel[0].pkt);
+                            if (count < 0)
+                                channel[0].exit = 1;
+                            channel[0].available_bufs += count;
+                        } while (sts == PLINK_STATUS_MORE_DATA);
+                    }
+                }
+	        }
+            else if (hdr->type == PLINK_TYPE_MESSAGE)
+            {
+                PlinkMsg *msg = (PlinkMsg *)(recvpkt.list[i]);
+                if (msg->msg == PLINK_EXIT_CODE)
+                {
+                    exitcode = 1;
+                    printf("Exit\n");
+                    break;
+                }
+            }
+        }
+        if (recvpkt.fd != PLINK_INVALID_FD) { // dmabuf fd
+            if (VMEM_release(vmem, &vmem_params) != VMEM_STATUS_OK)
+                errExit("Failed to release buffer.");
+            close(recvpkt.fd);
+        }
+
+        frmcnt++;
+
+#if 0
+        if (frmcnt >= frames)
+        {
+            msg.header.type = PLINK_TYPE_MESSAGE;
+            msg.header.size = DATA_SIZE(PlinkMsg);
+            msg.msg = PLINK_EXIT_CODE;
+            sendpkt.list[0] = &msg;
+            sendpkt.num = 1;
+            sendpkt.fd = PLINK_INVALID_FD;
+            if (PLINK_send(plink_npu_sink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                errExit("Failed to send data.");
+            break;
+        }
+#endif
+    } while (exitcode == 0);
+
+    retreiveSentBuffers(plink_npu_src, &channel[0]);
+
+cleanup:
+    sleep(1); // Sleep one second to make sure server is ready for exit
+    PLINK_close(plink_npu_sink, 0);
+    if (fp != NULL)
+        fclose(fp);
+    
+    for (i = 0; i < input_num; i++) {
+        shl_mem_free(input_aligned[i]);
+    }
+    csinn_session_deinit(sess);
+    csinn_free_session(sess);
+
+    return 0;
+}
+

+ 550 - 0
test/face_detect/npu_sink_test.c

@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <memory.h>
+#include "process_linker_types.h"
+#include "io.h"
+#include "shl_ref.h"
+#include "process.h"
+#include "video_mem.h"
+
+#include "detect.h"
+#include "output_120_out0_nchw_1_2_7668_1.h"
+
+#define MODULE_NAME "npu_sink_test"
+
+#define MIN(x, y)           ((x) < (y) ? (x) : (y))
+#define FILE_LENGTH         1028
+#define SHAPE_LENGHT        128
+
+#ifndef NULL
+#define NULL    ((void *)0)
+#endif
+
+#define NUM_OF_BUFFERS  5
+#define errExit(msg)    do { perror(msg); exit(EXIT_FAILURE); \
+                        } while (0)
+
+void *csinn_(char *params);
+void csinn_run(void *data0,  void *td);
+void *csinn_nbg(const char *nbg_file_name);
+
+#define OUT_SIZE1 (1*7668)
+#define OUT_SIZE2 (1917*21)
+int input_size[] = {1 * 3 * 304 * 304, };
+int output_size[] = {OUT_SIZE1, OUT_SIZE1 * 2, 1 * OUT_SIZE2, };
+const char model_name[] = "network";
+
+#define R_MEAN  127.5
+#define G_MEAN  127.5
+#define B_MEAN  127.5
+#define SCALE   (1.0/127.5)
+
+float mean[] = {B_MEAN, G_MEAN, R_MEAN};
+
+const char class_name[][FILE_LENGTH] = {
+    "background", "aeroplane", "bicycle", "bird", "boat",
+    "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
+    "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
+    "sofa", "train", "tvmonitor"
+};
+
+const char shm_sink_name[] = "/ispnpu";
+const char shm_src_name[] = "/npu_g2d";
+
+#define BASE_MEMORY 0xD8000000
+
+typedef struct _ServerParams
+{
+    char *plinkname;
+    int count;
+    int frames;
+} ServerParams;
+
+typedef struct _PlinkChannel
+{
+    PlinkChannelID id;
+    PlinkHandle plink;
+    PlinkPacket pkt;
+    int sendid;
+    int backid;
+    int exit;
+    int available_bufs;
+} PlinkChannel;
+
+typedef struct _FeatureBuffer {
+    unsigned int bus_address;
+    void *virtual_address;
+    unsigned int size;
+} FeatureBuffer;
+
+
+void parseParams(int argc, char **argv, ServerParams *params)
+{
+    int i = 1;
+    memset(params, 0, sizeof(*params));
+    params->plinkname = "/tmp/plink_npu_featuremap.test";
+    params->frames = 3;
+    while (i < argc)
+    {
+        if (argv[i][0] != '-' || strlen(argv[i]) < 2)
+        {
+            i++;
+            continue;
+        }
+
+        if (argv[i][1] == 'l')
+        {
+            if (++i < argc)
+            {
+                params->plinkname = argv[i++];
+            }
+        }
+        else if (argv[i][1] == 'n')
+        {
+            if (++i < argc)
+            {
+                params->frames = atoi(argv[i++]);
+            }
+        }
+    };
+}
+
+int checkParams(ServerParams *params)
+{
+    if (params->plinkname == NULL)
+        return -1;
+    return 0;
+}
+
+void printUsage(char *name)
+{
+    printf("usage: %s [options]\n"
+           "\n"
+           "  Available options:\n"
+           "    -l      plink file name (default: /tmp/plink.test)\n"
+           "    -i      input YUV file name (mandatory)\n"
+           "    -f      input color format (default: 2)\n"
+           "                2 - I420\n"
+           "                3 - NV12\n"
+           "    -w      video width (mandatory)\n"
+           "    -h      video height (mandatory)\n"
+           "    -s      video buffer stride (default: video width)\n"
+           "    -n      number of frames to send (default: 10)\n"
+           "\n", name);
+}
+
+int getBufferSize(ServerParams *params)
+{
+    return sizeof(PlinkObjectDetect);
+}
+
+void constructFeatureInfo(PlinkObjectInfo *info, ServerParams *params, unsigned int bus_address, int face_cnt, int id)
+{
+    info->header.type = PLINK_TYPE_OBJECT;
+    info->header.size = DATA_SIZE(*info);
+    info->header.id = id + 1;
+
+    info->bus_address = bus_address;
+    info->object_cnt = face_cnt;
+}
+
+int getBufferCount(PlinkPacket *pkt)
+{
+    int ret = 0;
+    for (int i = 0; i < pkt->num; i++)
+    {
+        PlinkDescHdr *hdr = (PlinkDescHdr *)(pkt->list[i]);
+        if (hdr->type == PLINK_TYPE_MESSAGE)
+        {
+            int *data = (int *)(pkt->list[i] + DATA_HEADER_SIZE);
+            if (*data == PLINK_EXIT_CODE)
+            {
+                ret |= 0x80000000; // set bit 31 to 1 to indicate 'exit'
+            }
+            else if (*data >= 0)
+                ret++;
+        }
+    }
+
+    return ret;
+}
+
+void retreiveSentBuffers(PlinkHandle plink, PlinkChannel *channel)
+{
+    PlinkStatus sts = PLINK_STATUS_OK;
+    while (channel->available_bufs < NUM_OF_BUFFERS)
+    {
+        do
+        {
+            sts = PLINK_recv(plink, channel->id, &channel->pkt);
+            int count = getBufferCount(&channel->pkt);
+            if (count > 0)
+            {
+                channel->available_bufs += count;
+            }
+        } while (sts == PLINK_STATUS_MORE_DATA);
+    }
+}
+
+
+void AllocateBuffers(FeatureBuffer featurebuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  unsigned int bus_address = BASE_MEMORY;
+  unsigned int buffer_size = (size + 0xFFF) & ~0xFFF;
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    featurebuffers[i].virtual_address = mmap(0, buffer_size, PROT_READ | PROT_WRITE,
+                                        MAP_SHARED, fd_mem,
+                                        bus_address);
+    printf("mmap %p from %x with size %d\n", featurebuffers[i].virtual_address, bus_address, size);
+    featurebuffers[i].bus_address = bus_address;
+    featurebuffers[i].size = buffer_size;
+    bus_address += buffer_size;
+  }
+}
+
+void FreeBuffers(FeatureBuffer featurebuffers[NUM_OF_BUFFERS], unsigned int size, int fd_mem) {
+  for (int i = 0; i < NUM_OF_BUFFERS; i++) {
+    munmap(featurebuffers[i].virtual_address, featurebuffers[i].size);
+  }
+}
+
+static void print_tensor_info(struct csinn_tensor *t) {
+    printf("\n=== tensor info ===\n");
+    printf("shape: ");
+    for (int j = 0; j < t->dim_count; j++) {
+        printf("%d ", t->dim[j]);
+    }
+    printf("\n");
+    if (t->dtype == CSINN_DTYPE_UINT8) {
+        printf("scale: %f\n", t->qinfo->scale);
+        printf("zero point: %d\n", t->qinfo->zero_point);
+    }
+    printf("data pointer: %p\n", t->data);
+}
+
+
+/*
+ * Postprocess function
+ */
+static void postprocess(void *sess, const char *filename_prefix, BBoxOut *out, int *num) {
+    int output_num, input_num;
+    struct csinn_tensor *input = csinn_alloc_tensor(NULL);
+    struct csinn_tensor *output = csinn_alloc_tensor(NULL);
+
+    //input_num = csinn_get_input_number(sess);
+    //for (int i = 0; i < input_num; i++) {
+    //    input->data = NULL;
+    //    csinn_get_input(i, input, sess);
+    //    //print_tensor_info(input);
+        
+    //    struct csi_tensor *finput = shl_ref_tensor_transform_f32(input);
+    //    char filename[FILE_LENGTH] = {0};
+    //    char shape[SHAPE_LENGHT] = {0};
+    //    shape2string(input->dim, input->dim_count, shape, SHAPE_LENGHT);
+    //    snprintf(filename, FILE_LENGTH, "%s_input%u_%s.txt", filename_prefix, i, shape);
+    //    int input_size = csi_tensor_size(input);
+    	//printf("input_size: %d\n", input_size);
+        //save_data_to_file(filename, (float*)finput->data, input_size);
+    //}
+
+    float *location = (float*)malloc(OUT_SIZE1*sizeof(float));
+    float *confidence = (float*)malloc(OUT_SIZE2*sizeof(float));
+
+    output_num = csinn_get_output_number(sess);
+    for (int i = 0; i < output_num; i++) {
+        output->data = NULL;
+        csinn_get_output(i, output, sess);
+        //print_tensor_info(output);
+
+        struct csinn_tensor *foutput = shl_ref_tensor_transform_f32(output);
+        //shl_show_top5(foutput, sess);
+        //char filename[FILE_LENGTH] = {0};
+        //char shape[SHAPE_LENGHT] = {0};
+        //shape2string(output->dim, output->dim_count, shape, SHAPE_LENGHT);
+        //snprintf(filename, FILE_LENGTH, "%s_output%u_%s.txt", filename_prefix, i, shape);
+        //int output_size = csinn_tensor_size(foutput);
+        //save_data_to_file(filename, (float*)foutput->data, output_size);
+
+        if (i == 0)
+            memcpy(location, (float *)foutput->data, OUT_SIZE1*sizeof(float));
+        if (i == 1)
+            memcpy(confidence, (float *)foutput->data, OUT_SIZE2*sizeof(float));
+
+        shl_ref_tensor_transform_free_f32(foutput);
+    }
+
+    //BBoxOut out[100];
+    BBox gbboxes[num_prior];
+
+    *num = ssdforward(location, confidence, priorbox, gbboxes, out);
+
+    free(location);
+    free(confidence);
+
+    printf("%d\n", *num);
+    for (int i = 0; i < *num; i++) {
+        fprintf(stderr, "%d, label=%s, score=%f, x1=%f, y1=%f, x2=%f, y2=%f\n", out[i].label, class_name[out[i].label],
+             out[i].score, out[i].xmin, out[i].ymin, out[i].xmax, out[i].ymax);
+    }
+
+    csinn_free_tensor(input);
+    csinn_free_tensor(output);
+}
+
+void *create_graph(char *params_path) {
+    void *ret;
+    int binary_size;
+    char *params = get_binary_from_file(params_path, &binary_size);
+    if (params == NULL) {
+        return NULL;
+    }
+
+    char *suffix = params_path + (strlen(params_path) - 7);
+    if (strcmp(suffix, ".params") == 0) {
+        // create general graph
+        ret = csinn_(params);
+        free(params);
+        return ret;
+    }
+
+    suffix = params_path + (strlen(params_path) - 3);
+    if (strcmp(suffix, ".bm") == 0) {
+        struct shl_bm_sections *section = (struct shl_bm_sections *)(params + 4128);
+        if (section->graph_offset) {
+            ret = csinn_import_binary_model(params);
+            free(params);
+            return ret;
+        } else {
+            ret = csinn_(params + section->params_offset * 4096);
+            free(params);
+            return ret;
+        }
+    } else {
+        free(params);
+        return NULL;
+    }
+}
+
+bool is_looping = true;
+
+int main(int argc, char **argv) {
+    char **data_path = NULL;
+    char *params_path = NULL;
+    int input_num = 1;
+    int output_num = 3;
+    int input_group_num = 1;
+    int index = 0;
+    int export_index = 0;
+    PlinkStatus sts = PLINK_STATUS_OK;
+    PlinkPacket sendpkt, recvpkt;
+    PlinkMsg msg;
+    PlinkHandle plink_npu_sink = NULL;
+    ServerParams params;
+    VmemParams vmem_params;
+    void *vmem = NULL;
+    PlinkChannel channel[2];
+    PlinkObjectInfo feat_map;
+    FILE *fp = NULL;
+    int exitcode = 0;
+    int i;
+
+    if (argc < (2 + input_num)) {
+        printf("Please set valide args: ./model.elf model.params "
+                "[tensor1/image1 ...] [tensor2/image2 ...]\n");
+        return -1;
+    } else {
+        if (argc == 3 && get_file_type(argv[2]) == FILE_TXT) {
+            data_path = read_string_from_file(argv[2], &input_group_num);
+            input_group_num /= input_num;
+        } else {
+            data_path = argv + 2;
+            input_group_num = (argc - 2) / input_num;
+        }
+    }
+
+    parseParams(argc, argv, &params);
+    if (checkParams(&params) != 0)
+    {
+        printUsage(argv[0]);
+        return 0;
+    }
+
+	void *sess = create_graph(argv[1]);
+
+    uint8_t *input[input_num];
+    float *finput[input_num];
+    char filename_prefix[FILE_LENGTH] = {0};
+    void *input_aligned[input_num];
+    for (i = 0; i < input_num; i++) {
+        input_aligned[i] = shl_mem_alloc_aligned(input_size[i], 0);
+    }
+    uint64_t start_time, end_time;
+    uint64_t _start_time, _end_time;
+    
+    //int frames = argc > 1 ? atoi(argv[1]) : 1000;
+    int frames = 1000;
+    //char *dumpname = argc > 2 ? argv[2] : NULL;
+    char *dumpname = NULL;
+    if (dumpname != NULL)
+    {
+        fp = fopen(dumpname, "wb");
+        if (fp == NULL)
+            errExit("fopen");
+    }
+
+    if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+        errExit("Failed to create VMEM.");
+
+    int fd_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_mem == -1) {
+      printf("ERROR: failed to open: %s\n", "/dev/mem");
+      return -1;
+    }
+
+    int fd_src_mem = open("/dev/mem", O_RDWR | O_SYNC);
+    if (fd_src_mem < 0) {
+        printf("%s: failed to open /dev/mem", MODULE_NAME);
+        return -1;
+    }
+
+    BBoxOut out[100];
+    int out_num = 0;
+
+    if (PLINK_create(&plink_npu_sink, "/tmp/plink_npu_rgb.test", PLINK_MODE_CLIENT) != PLINK_STATUS_OK)
+        errExit("Failed to create PLINK.");
+
+    if (PLINK_connect(plink_npu_sink, NULL) != PLINK_STATUS_OK)
+        errExit("Failed to connect to server.");
+
+    int frmcnt = 0;
+    // while loop to receive shm picture
+    do {
+        sts = PLINK_recv(plink_npu_sink, 0, &recvpkt);
+	memset(&vmem_params, 0, sizeof(vmem_params));
+
+	if (recvpkt.fd != PLINK_INVALID_FD) { // dmabuf fd
+            vmem_params.fd = recvpkt.fd;
+	    if (VMEM_import(vmem, &vmem_params) != VMEM_STATUS_OK)
+                errExit("Failed to import fd.");
+    	    printf("%s: recvpkt.fd<%d> phy<0x%lx>\n", MODULE_NAME, vmem_params.fd, vmem_params.phy_address);
+        }
+
+        for (i = 0; i < recvpkt.num; i++) {
+            PlinkDescHdr *hdr = (PlinkDescHdr *)(recvpkt.list[i]);
+            if (hdr->type == PLINK_TYPE_2D_RGB) {
+                uint8_t *vaddr = 0;
+                PlinkRGBInfo *pic = (PlinkRGBInfo *)(recvpkt.list[i]);
+                printf("[CLIENT] Received frame %d 0x%010llx: %dx%d, stride = %d\n", 
+                        pic->header.id, pic->bus_address_b, 
+                        pic->img_width, pic->img_height,
+                        pic->stride_b);
+                int size_stride = 3 * pic->stride_b * pic->stride_b;
+                if (recvpkt.fd == PLINK_INVALID_FD) { // physical address
+                    vaddr = (uint8_t *) mmap(0, size_stride, PROT_READ | PROT_WRITE,
+                        MAP_SHARED, fd_mem, pic->bus_address_b);
+		}
+    
+                // return the buffer to source
+                msg.header.type = PLINK_TYPE_MESSAGE;
+                msg.header.size = DATA_SIZE(PlinkMsg);
+                msg.msg = hdr->id;
+                sendpkt.list[0] = &msg;
+                sendpkt.num = 1;
+                sendpkt.fd = PLINK_INVALID_FD;
+                if (PLINK_send(plink_npu_sink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                    errExit("Failed to send data.");
+           
+                _start_time = shl_get_timespec();
+                if (recvpkt.fd == PLINK_INVALID_FD) { // physical address
+                    csinn_run(vaddr,  sess);
+		} else {
+		    csinn_run(&vmem_params.fd,  sess);
+		}
+                _end_time = shl_get_timespec();
+                printf("Run graph execution time: %.5fms, FPS=%.2f\n", ((float)(_end_time-_start_time))/1000000,
+                    1000000000.0/((float)(_end_time-_start_time)));
+
+                //snprintf(filename_prefix, FILE_LENGTH, "%s", basename(data_path[i * input_num]));
+                _start_time = shl_get_timespec();
+                postprocess(sess, filename_prefix, out, &out_num);
+                _end_time = shl_get_timespec();
+                printf("postProcess execution time: %.5fms\n", ((float)(_end_time-_start_time))/1000000);
+
+                if (recvpkt.fd == PLINK_INVALID_FD) // physical address
+   	           munmap(vaddr, size_stride);
+           }
+           else if (hdr->type == PLINK_TYPE_MESSAGE) {
+               PlinkMsg *msg = (PlinkMsg *)(recvpkt.list[i]);
+               if (msg->msg == PLINK_EXIT_CODE) {
+                   exitcode = 1;
+                   printf("Exit\n");
+                   break;
+               }
+           }
+       
+        }
+        if (recvpkt.fd != PLINK_INVALID_FD) { // dmabuf fd
+            if (VMEM_release(vmem, &vmem_params) != VMEM_STATUS_OK)
+                errExit("Failed to release buffer.");
+	    close(recvpkt.fd);
+        }
+
+        frmcnt++;
+#if 0
+        if (frmcnt >= frames)
+        {
+            msg.header.type = PLINK_TYPE_MESSAGE;
+            msg.header.size = DATA_SIZE(PlinkMsg);
+            msg.msg = PLINK_EXIT_CODE;
+            sendpkt.list[0] = &msg;
+            sendpkt.num = 1;
+            sendpkt.fd = PLINK_INVALID_FD;
+            if (PLINK_send(plink_npu_sink, 0, &sendpkt) == PLINK_STATUS_ERROR)
+                errExit("Failed to send data.");
+            break;
+        }
+#endif
+    } while (exitcode == 0);
+
+cleanup:
+    sleep(1); // Sleep one second to make sure server is ready for exit
+    PLINK_close(plink_npu_sink, 0);
+    if (fp != NULL)
+        fclose(fp);
+    
+    for (i = 0; i < input_num; i++) {
+        shl_mem_free(input_aligned[i]);
+    }
+    csinn_session_deinit(sess);
+    csinn_free_session(sess);
+
+    return 0;
+}
+

+ 15338 - 0
test/face_detect/output_120_out0_nchw_1_2_7668_1.h

@@ -0,0 +1,15338 @@
+float priorbox[] = {
+-0.0736842080950737,
+-0.0736842080950737,
+0.12631578743457794,
+0.12631578743457794,
+-0.11510556936264038,
+-0.04439488798379898,
+0.16773714125156403,
+0.09702646732330322,
+-0.04439488798379898,
+-0.11510556936264038,
+0.09702646732330322,
+0.16773714125156403,
+-0.021052632480859756,
+-0.0736842080950737,
+0.17894737422466278,
+0.12631578743457794,
+-0.06247398629784584,
+-0.04439488798379898,
+0.22036872804164886,
+0.09702646732330322,
+0.008236690424382687,
+-0.11510556936264038,
+0.14965803921222687,
+0.16773714125156403,
+0.031578946858644485,
+-0.0736842080950737,
+0.23157894611358643,
+0.12631578743457794,
+-0.009842408820986748,
+-0.04439488798379898,
+0.2730002999305725,
+0.09702646732330322,
+0.0608682706952095,
+-0.11510556936264038,
+0.2022896260023117,
+0.16773714125156403,
+0.08421052992343903,
+-0.0736842080950737,
+0.28421053290367126,
+0.12631578743457794,
+0.042789168655872345,
+-0.04439488798379898,
+0.32563188672065735,
+0.09702646732330322,
+0.11349985003471375,
+-0.11510556936264038,
+0.25492119789123535,
+0.16773714125156403,
+0.13684210181236267,
+-0.0736842080950737,
+0.3368421196937561,
+0.12631578743457794,
+0.09542074799537659,
+-0.04439488798379898,
+0.3782634735107422,
+0.09702646732330322,
+0.1661314219236374,
+-0.11510556936264038,
+0.3075527846813202,
+0.16773714125156403,
+0.1894736886024475,
+-0.0736842080950737,
+0.38947367668151855,
+0.12631578743457794,
+0.14805233478546143,
+-0.04439488798379898,
+0.43089503049850464,
+0.09702646732330322,
+0.21876300871372223,
+-0.11510556936264038,
+0.36018437147140503,
+0.16773714125156403,
+0.24210526049137115,
+-0.0736842080950737,
+0.4421052634716034,
+0.12631578743457794,
+0.20068390667438507,
+-0.04439488798379898,
+0.4835266172885895,
+0.09702646732330322,
+0.2713945806026459,
+-0.11510556936264038,
+0.4128159284591675,
+0.16773714125156403,
+0.2947368323802948,
+-0.0736842080950737,
+0.49473685026168823,
+0.12631578743457794,
+0.2533154785633087,
+-0.04439488798379898,
+0.5361582040786743,
+0.09702646732330322,
+0.3240261673927307,
+-0.11510556936264038,
+0.4654475152492523,
+0.16773714125156403,
+0.34736841917037964,
+-0.0736842080950737,
+0.5473684072494507,
+0.12631578743457794,
+0.30594706535339355,
+-0.04439488798379898,
+0.5887897610664368,
+0.09702646732330322,
+0.37665775418281555,
+-0.11510556936264038,
+0.5180791020393372,
+0.16773714125156403,
+0.4000000059604645,
+-0.0736842080950737,
+0.6000000238418579,
+0.12631578743457794,
+0.3585786521434784,
+-0.04439488798379898,
+0.641421377658844,
+0.09702646732330322,
+0.429289311170578,
+-0.11510556936264038,
+0.5707106590270996,
+0.16773714125156403,
+0.4526315927505493,
+-0.0736842080950737,
+0.6526315808296204,
+0.12631578743457794,
+0.41121020913124084,
+-0.04439488798379898,
+0.6940529346466064,
+0.09702646732330322,
+0.48192089796066284,
+-0.11510556936264038,
+0.6233422756195068,
+0.16773714125156403,
+0.5052631497383118,
+-0.0736842080950737,
+0.7052631378173828,
+0.12631578743457794,
+0.4638417959213257,
+-0.04439488798379898,
+0.7466844916343689,
+0.09702646732330322,
+0.5345524549484253,
+-0.11510556936264038,
+0.6759738326072693,
+0.16773714125156403,
+0.557894766330719,
+-0.0736842080950737,
+0.75789475440979,
+0.12631578743457794,
+0.5164733529090881,
+-0.04439488798379898,
+0.7993161082267761,
+0.09702646732330322,
+0.5871840715408325,
+-0.11510556936264038,
+0.7286053895950317,
+0.16773714125156403,
+0.6105263233184814,
+-0.0736842080950737,
+0.8105263113975525,
+0.12631578743457794,
+0.5691049695014954,
+-0.04439488798379898,
+0.8519476652145386,
+0.09702646732330322,
+0.639815628528595,
+-0.11510556936264038,
+0.781237006187439,
+0.16773714125156403,
+0.6631578803062439,
+-0.0736842080950737,
+0.8631578683853149,
+0.12631578743457794,
+0.6217365264892578,
+-0.04439488798379898,
+0.904579222202301,
+0.09702646732330322,
+0.6924472451210022,
+-0.11510556936264038,
+0.8338685631752014,
+0.16773714125156403,
+0.7157894968986511,
+-0.0736842080950737,
+0.9157894849777222,
+0.12631578743457794,
+0.674368143081665,
+-0.04439488798379898,
+0.9572108387947083,
+0.09702646732330322,
+0.7450788021087646,
+-0.11510556936264038,
+0.8865001797676086,
+0.16773714125156403,
+0.7684210538864136,
+-0.0736842080950737,
+0.9684210419654846,
+0.12631578743457794,
+0.7269997000694275,
+-0.04439488798379898,
+1.0098423957824707,
+0.09702646732330322,
+0.7977103590965271,
+-0.11510556936264038,
+0.9391317367553711,
+0.16773714125156403,
+0.821052610874176,
+-0.0736842080950737,
+1.021052598953247,
+0.12631578743457794,
+0.7796312570571899,
+-0.04439488798379898,
+1.062474012374878,
+0.09702646732330322,
+0.8503419756889343,
+-0.11510556936264038,
+0.9917632937431335,
+0.16773714125156403,
+0.8736842274665833,
+-0.0736842080950737,
+1.0736842155456543,
+0.12631578743457794,
+0.8322628736495972,
+-0.04439488798379898,
+1.1151055097579956,
+0.09702646732330322,
+0.9029735326766968,
+-0.11510556936264038,
+1.044394850730896,
+0.16773714125156403,
+-0.0736842080950737,
+-0.021052632480859756,
+0.12631578743457794,
+0.17894737422466278,
+-0.11510556936264038,
+0.008236690424382687,
+0.16773714125156403,
+0.14965803921222687,
+-0.04439488798379898,
+-0.06247398629784584,
+0.09702646732330322,
+0.22036872804164886,
+-0.021052632480859756,
+-0.021052632480859756,
+0.17894737422466278,
+0.17894737422466278,
+-0.06247398629784584,
+0.008236690424382687,
+0.22036872804164886,
+0.14965803921222687,
+0.008236690424382687,
+-0.06247398629784584,
+0.14965803921222687,
+0.22036872804164886,
+0.031578946858644485,
+-0.021052632480859756,
+0.23157894611358643,
+0.17894737422466278,
+-0.009842408820986748,
+0.008236690424382687,
+0.2730002999305725,
+0.14965803921222687,
+0.0608682706952095,
+-0.06247398629784584,
+0.2022896260023117,
+0.22036872804164886,
+0.08421052992343903,
+-0.021052632480859756,
+0.28421053290367126,
+0.17894737422466278,
+0.042789168655872345,
+0.008236690424382687,
+0.32563188672065735,
+0.14965803921222687,
+0.11349985003471375,
+-0.06247398629784584,
+0.25492119789123535,
+0.22036872804164886,
+0.13684210181236267,
+-0.021052632480859756,
+0.3368421196937561,
+0.17894737422466278,
+0.09542074799537659,
+0.008236690424382687,
+0.3782634735107422,
+0.14965803921222687,
+0.1661314219236374,
+-0.06247398629784584,
+0.3075527846813202,
+0.22036872804164886,
+0.1894736886024475,
+-0.021052632480859756,
+0.38947367668151855,
+0.17894737422466278,
+0.14805233478546143,
+0.008236690424382687,
+0.43089503049850464,
+0.14965803921222687,
+0.21876300871372223,
+-0.06247398629784584,
+0.36018437147140503,
+0.22036872804164886,
+0.24210526049137115,
+-0.021052632480859756,
+0.4421052634716034,
+0.17894737422466278,
+0.20068390667438507,
+0.008236690424382687,
+0.4835266172885895,
+0.14965803921222687,
+0.2713945806026459,
+-0.06247398629784584,
+0.4128159284591675,
+0.22036872804164886,
+0.2947368323802948,
+-0.021052632480859756,
+0.49473685026168823,
+0.17894737422466278,
+0.2533154785633087,
+0.008236690424382687,
+0.5361582040786743,
+0.14965803921222687,
+0.3240261673927307,
+-0.06247398629784584,
+0.4654475152492523,
+0.22036872804164886,
+0.34736841917037964,
+-0.021052632480859756,
+0.5473684072494507,
+0.17894737422466278,
+0.30594706535339355,
+0.008236690424382687,
+0.5887897610664368,
+0.14965803921222687,
+0.37665775418281555,
+-0.06247398629784584,
+0.5180791020393372,
+0.22036872804164886,
+0.4000000059604645,
+-0.021052632480859756,
+0.6000000238418579,
+0.17894737422466278,
+0.3585786521434784,
+0.008236690424382687,
+0.641421377658844,
+0.14965803921222687,
+0.429289311170578,
+-0.06247398629784584,
+0.5707106590270996,
+0.22036872804164886,
+0.4526315927505493,
+-0.021052632480859756,
+0.6526315808296204,
+0.17894737422466278,
+0.41121020913124084,
+0.008236690424382687,
+0.6940529346466064,
+0.14965803921222687,
+0.48192089796066284,
+-0.06247398629784584,
+0.6233422756195068,
+0.22036872804164886,
+0.5052631497383118,
+-0.021052632480859756,
+0.7052631378173828,
+0.17894737422466278,
+0.4638417959213257,
+0.008236690424382687,
+0.7466844916343689,
+0.14965803921222687,
+0.5345524549484253,
+-0.06247398629784584,
+0.6759738326072693,
+0.22036872804164886,
+0.557894766330719,
+-0.021052632480859756,
+0.75789475440979,
+0.17894737422466278,
+0.5164733529090881,
+0.008236690424382687,
+0.7993161082267761,
+0.14965803921222687,
+0.5871840715408325,
+-0.06247398629784584,
+0.7286053895950317,
+0.22036872804164886,
+0.6105263233184814,
+-0.021052632480859756,
+0.8105263113975525,
+0.17894737422466278,
+0.5691049695014954,
+0.008236690424382687,
+0.8519476652145386,
+0.14965803921222687,
+0.639815628528595,
+-0.06247398629784584,
+0.781237006187439,
+0.22036872804164886,
+0.6631578803062439,
+-0.021052632480859756,
+0.8631578683853149,
+0.17894737422466278,
+0.6217365264892578,
+0.008236690424382687,
+0.904579222202301,
+0.14965803921222687,
+0.6924472451210022,
+-0.06247398629784584,
+0.8338685631752014,
+0.22036872804164886,
+0.7157894968986511,
+-0.021052632480859756,
+0.9157894849777222,
+0.17894737422466278,
+0.674368143081665,
+0.008236690424382687,
+0.9572108387947083,
+0.14965803921222687,
+0.7450788021087646,
+-0.06247398629784584,
+0.8865001797676086,
+0.22036872804164886,
+0.7684210538864136,
+-0.021052632480859756,
+0.9684210419654846,
+0.17894737422466278,
+0.7269997000694275,
+0.008236690424382687,
+1.0098423957824707,
+0.14965803921222687,
+0.7977103590965271,
+-0.06247398629784584,
+0.9391317367553711,
+0.22036872804164886,
+0.821052610874176,
+-0.021052632480859756,
+1.021052598953247,
+0.17894737422466278,
+0.7796312570571899,
+0.008236690424382687,
+1.062474012374878,
+0.14965803921222687,
+0.8503419756889343,
+-0.06247398629784584,
+0.9917632937431335,
+0.22036872804164886,
+0.8736842274665833,
+-0.021052632480859756,
+1.0736842155456543,
+0.17894737422466278,
+0.8322628736495972,
+0.008236690424382687,
+1.1151055097579956,
+0.14965803921222687,
+0.9029735326766968,
+-0.06247398629784584,
+1.044394850730896,
+0.22036872804164886,
+-0.0736842080950737,
+0.031578946858644485,
+0.12631578743457794,
+0.23157894611358643,
+-0.11510556936264038,
+0.0608682706952095,
+0.16773714125156403,
+0.2022896260023117,
+-0.04439488798379898,
+-0.009842408820986748,
+0.09702646732330322,
+0.2730002999305725,
+-0.021052632480859756,
+0.031578946858644485,
+0.17894737422466278,
+0.23157894611358643,
+-0.06247398629784584,
+0.0608682706952095,
+0.22036872804164886,
+0.2022896260023117,
+0.008236690424382687,
+-0.009842408820986748,
+0.14965803921222687,
+0.2730002999305725,
+0.031578946858644485,
+0.031578946858644485,
+0.23157894611358643,
+0.23157894611358643,
+-0.009842408820986748,
+0.0608682706952095,
+0.2730002999305725,
+0.2022896260023117,
+0.0608682706952095,
+-0.009842408820986748,
+0.2022896260023117,
+0.2730002999305725,
+0.08421052992343903,
+0.031578946858644485,
+0.28421053290367126,
+0.23157894611358643,
+0.042789168655872345,
+0.0608682706952095,
+0.32563188672065735,
+0.2022896260023117,
+0.11349985003471375,
+-0.009842408820986748,
+0.25492119789123535,
+0.2730002999305725,
+0.13684210181236267,
+0.031578946858644485,
+0.3368421196937561,
+0.23157894611358643,
+0.09542074799537659,
+0.0608682706952095,
+0.3782634735107422,
+0.2022896260023117,
+0.1661314219236374,
+-0.009842408820986748,
+0.3075527846813202,
+0.2730002999305725,
+0.1894736886024475,
+0.031578946858644485,
+0.38947367668151855,
+0.23157894611358643,
+0.14805233478546143,
+0.0608682706952095,
+0.43089503049850464,
+0.2022896260023117,
+0.21876300871372223,
+-0.009842408820986748,
+0.36018437147140503,
+0.2730002999305725,
+0.24210526049137115,
+0.031578946858644485,
+0.4421052634716034,
+0.23157894611358643,
+0.20068390667438507,
+0.0608682706952095,
+0.4835266172885895,
+0.2022896260023117,
+0.2713945806026459,
+-0.009842408820986748,
+0.4128159284591675,
+0.2730002999305725,
+0.2947368323802948,
+0.031578946858644485,
+0.49473685026168823,
+0.23157894611358643,
+0.2533154785633087,
+0.0608682706952095,
+0.5361582040786743,
+0.2022896260023117,
+0.3240261673927307,
+-0.009842408820986748,
+0.4654475152492523,
+0.2730002999305725,
+0.34736841917037964,
+0.031578946858644485,
+0.5473684072494507,
+0.23157894611358643,
+0.30594706535339355,
+0.0608682706952095,
+0.5887897610664368,
+0.2022896260023117,
+0.37665775418281555,
+-0.009842408820986748,
+0.5180791020393372,
+0.2730002999305725,
+0.4000000059604645,
+0.031578946858644485,
+0.6000000238418579,
+0.23157894611358643,
+0.3585786521434784,
+0.0608682706952095,
+0.641421377658844,
+0.2022896260023117,
+0.429289311170578,
+-0.009842408820986748,
+0.5707106590270996,
+0.2730002999305725,
+0.4526315927505493,
+0.031578946858644485,
+0.6526315808296204,
+0.23157894611358643,
+0.41121020913124084,
+0.0608682706952095,
+0.6940529346466064,
+0.2022896260023117,
+0.48192089796066284,
+-0.009842408820986748,
+0.6233422756195068,
+0.2730002999305725,
+0.5052631497383118,
+0.031578946858644485,
+0.7052631378173828,
+0.23157894611358643,
+0.4638417959213257,
+0.0608682706952095,
+0.7466844916343689,
+0.2022896260023117,
+0.5345524549484253,
+-0.009842408820986748,
+0.6759738326072693,
+0.2730002999305725,
+0.557894766330719,
+0.031578946858644485,
+0.75789475440979,
+0.23157894611358643,
+0.5164733529090881,
+0.0608682706952095,
+0.7993161082267761,
+0.2022896260023117,
+0.5871840715408325,
+-0.009842408820986748,
+0.7286053895950317,
+0.2730002999305725,
+0.6105263233184814,
+0.031578946858644485,
+0.8105263113975525,
+0.23157894611358643,
+0.5691049695014954,
+0.0608682706952095,
+0.8519476652145386,
+0.2022896260023117,
+0.639815628528595,
+-0.009842408820986748,
+0.781237006187439,
+0.2730002999305725,
+0.6631578803062439,
+0.031578946858644485,
+0.8631578683853149,
+0.23157894611358643,
+0.6217365264892578,
+0.0608682706952095,
+0.904579222202301,
+0.2022896260023117,
+0.6924472451210022,
+-0.009842408820986748,
+0.8338685631752014,
+0.2730002999305725,
+0.7157894968986511,
+0.031578946858644485,
+0.9157894849777222,
+0.23157894611358643,
+0.674368143081665,
+0.0608682706952095,
+0.9572108387947083,
+0.2022896260023117,
+0.7450788021087646,
+-0.009842408820986748,
+0.8865001797676086,
+0.2730002999305725,
+0.7684210538864136,
+0.031578946858644485,
+0.9684210419654846,
+0.23157894611358643,
+0.7269997000694275,
+0.0608682706952095,
+1.0098423957824707,
+0.2022896260023117,
+0.7977103590965271,
+-0.009842408820986748,
+0.9391317367553711,
+0.2730002999305725,
+0.821052610874176,
+0.031578946858644485,
+1.021052598953247,
+0.23157894611358643,
+0.7796312570571899,
+0.0608682706952095,
+1.062474012374878,
+0.2022896260023117,
+0.8503419756889343,
+-0.009842408820986748,
+0.9917632937431335,
+0.2730002999305725,
+0.8736842274665833,
+0.031578946858644485,
+1.0736842155456543,
+0.23157894611358643,
+0.8322628736495972,
+0.0608682706952095,
+1.1151055097579956,
+0.2022896260023117,
+0.9029735326766968,
+-0.009842408820986748,
+1.044394850730896,
+0.2730002999305725,
+-0.0736842080950737,
+0.08421052992343903,
+0.12631578743457794,
+0.28421053290367126,
+-0.11510556936264038,
+0.11349985003471375,
+0.16773714125156403,
+0.25492119789123535,
+-0.04439488798379898,
+0.042789168655872345,
+0.09702646732330322,
+0.32563188672065735,
+-0.021052632480859756,
+0.08421052992343903,
+0.17894737422466278,
+0.28421053290367126,
+-0.06247398629784584,
+0.11349985003471375,
+0.22036872804164886,
+0.25492119789123535,
+0.008236690424382687,
+0.042789168655872345,
+0.14965803921222687,
+0.32563188672065735,
+0.031578946858644485,
+0.08421052992343903,
+0.23157894611358643,
+0.28421053290367126,
+-0.009842408820986748,
+0.11349985003471375,
+0.2730002999305725,
+0.25492119789123535,
+0.0608682706952095,
+0.042789168655872345,
+0.2022896260023117,
+0.32563188672065735,
+0.08421052992343903,
+0.08421052992343903,
+0.28421053290367126,
+0.28421053290367126,
+0.042789168655872345,
+0.11349985003471375,
+0.32563188672065735,
+0.25492119789123535,
+0.11349985003471375,
+0.042789168655872345,
+0.25492119789123535,
+0.32563188672065735,
+0.13684210181236267,
+0.08421052992343903,
+0.3368421196937561,
+0.28421053290367126,
+0.09542074799537659,
+0.11349985003471375,
+0.3782634735107422,
+0.25492119789123535,
+0.1661314219236374,
+0.042789168655872345,
+0.3075527846813202,
+0.32563188672065735,
+0.1894736886024475,
+0.08421052992343903,
+0.38947367668151855,
+0.28421053290367126,
+0.14805233478546143,
+0.11349985003471375,
+0.43089503049850464,
+0.25492119789123535,
+0.21876300871372223,
+0.042789168655872345,
+0.36018437147140503,
+0.32563188672065735,
+0.24210526049137115,
+0.08421052992343903,
+0.4421052634716034,
+0.28421053290367126,
+0.20068390667438507,
+0.11349985003471375,
+0.4835266172885895,
+0.25492119789123535,
+0.2713945806026459,
+0.042789168655872345,
+0.4128159284591675,
+0.32563188672065735,
+0.2947368323802948,
+0.08421052992343903,
+0.49473685026168823,
+0.28421053290367126,
+0.2533154785633087,
+0.11349985003471375,
+0.5361582040786743,
+0.25492119789123535,
+0.3240261673927307,
+0.042789168655872345,
+0.4654475152492523,
+0.32563188672065735,
+0.34736841917037964,
+0.08421052992343903,
+0.5473684072494507,
+0.28421053290367126,
+0.30594706535339355,
+0.11349985003471375,
+0.5887897610664368,
+0.25492119789123535,
+0.37665775418281555,
+0.042789168655872345,
+0.5180791020393372,
+0.32563188672065735,
+0.4000000059604645,
+0.08421052992343903,
+0.6000000238418579,
+0.28421053290367126,
+0.3585786521434784,
+0.11349985003471375,
+0.641421377658844,
+0.25492119789123535,
+0.429289311170578,
+0.042789168655872345,
+0.5707106590270996,
+0.32563188672065735,
+0.4526315927505493,
+0.08421052992343903,
+0.6526315808296204,
+0.28421053290367126,
+0.41121020913124084,
+0.11349985003471375,
+0.6940529346466064,
+0.25492119789123535,
+0.48192089796066284,
+0.042789168655872345,
+0.6233422756195068,
+0.32563188672065735,
+0.5052631497383118,
+0.08421052992343903,
+0.7052631378173828,
+0.28421053290367126,
+0.4638417959213257,
+0.11349985003471375,
+0.7466844916343689,
+0.25492119789123535,
+0.5345524549484253,
+0.042789168655872345,
+0.6759738326072693,
+0.32563188672065735,
+0.557894766330719,
+0.08421052992343903,
+0.75789475440979,
+0.28421053290367126,
+0.5164733529090881,
+0.11349985003471375,
+0.7993161082267761,
+0.25492119789123535,
+0.5871840715408325,
+0.042789168655872345,
+0.7286053895950317,
+0.32563188672065735,
+0.6105263233184814,
+0.08421052992343903,
+0.8105263113975525,
+0.28421053290367126,
+0.5691049695014954,
+0.11349985003471375,
+0.8519476652145386,
+0.25492119789123535,
+0.639815628528595,
+0.042789168655872345,
+0.781237006187439,
+0.32563188672065735,
+0.6631578803062439,
+0.08421052992343903,
+0.8631578683853149,
+0.28421053290367126,
+0.6217365264892578,
+0.11349985003471375,
+0.904579222202301,
+0.25492119789123535,
+0.6924472451210022,
+0.042789168655872345,
+0.8338685631752014,
+0.32563188672065735,
+0.7157894968986511,
+0.08421052992343903,
+0.9157894849777222,
+0.28421053290367126,
+0.674368143081665,
+0.11349985003471375,
+0.9572108387947083,
+0.25492119789123535,
+0.7450788021087646,
+0.042789168655872345,
+0.8865001797676086,
+0.32563188672065735,
+0.7684210538864136,
+0.08421052992343903,
+0.9684210419654846,
+0.28421053290367126,
+0.7269997000694275,
+0.11349985003471375,
+1.0098423957824707,
+0.25492119789123535,
+0.7977103590965271,
+0.042789168655872345,
+0.9391317367553711,
+0.32563188672065735,
+0.821052610874176,
+0.08421052992343903,
+1.021052598953247,
+0.28421053290367126,
+0.7796312570571899,
+0.11349985003471375,
+1.062474012374878,
+0.25492119789123535,
+0.8503419756889343,
+0.042789168655872345,
+0.9917632937431335,
+0.32563188672065735,
+0.8736842274665833,
+0.08421052992343903,
+1.0736842155456543,
+0.28421053290367126,
+0.8322628736495972,
+0.11349985003471375,
+1.1151055097579956,
+0.25492119789123535,
+0.9029735326766968,
+0.042789168655872345,
+1.044394850730896,
+0.32563188672065735,
+-0.0736842080950737,
+0.13684210181236267,
+0.12631578743457794,
+0.3368421196937561,
+-0.11510556936264038,
+0.1661314219236374,
+0.16773714125156403,
+0.3075527846813202,
+-0.04439488798379898,
+0.09542074799537659,
+0.09702646732330322,
+0.3782634735107422,
+-0.021052632480859756,
+0.13684210181236267,
+0.17894737422466278,
+0.3368421196937561,
+-0.06247398629784584,
+0.1661314219236374,
+0.22036872804164886,
+0.3075527846813202,
+0.008236690424382687,
+0.09542074799537659,
+0.14965803921222687,
+0.3782634735107422,
+0.031578946858644485,
+0.13684210181236267,
+0.23157894611358643,
+0.3368421196937561,
+-0.009842408820986748,
+0.1661314219236374,
+0.2730002999305725,
+0.3075527846813202,
+0.0608682706952095,
+0.09542074799537659,
+0.2022896260023117,
+0.3782634735107422,
+0.08421052992343903,
+0.13684210181236267,
+0.28421053290367126,
+0.3368421196937561,
+0.042789168655872345,
+0.1661314219236374,
+0.32563188672065735,
+0.3075527846813202,
+0.11349985003471375,
+0.09542074799537659,
+0.25492119789123535,
+0.3782634735107422,
+0.13684210181236267,
+0.13684210181236267,
+0.3368421196937561,
+0.3368421196937561,
+0.09542074799537659,
+0.1661314219236374,
+0.3782634735107422,
+0.3075527846813202,
+0.1661314219236374,
+0.09542074799537659,
+0.3075527846813202,
+0.3782634735107422,
+0.1894736886024475,
+0.13684210181236267,
+0.38947367668151855,
+0.3368421196937561,
+0.14805233478546143,
+0.1661314219236374,
+0.43089503049850464,
+0.3075527846813202,
+0.21876300871372223,
+0.09542074799537659,
+0.36018437147140503,
+0.3782634735107422,
+0.24210526049137115,
+0.13684210181236267,
+0.4421052634716034,
+0.3368421196937561,
+0.20068390667438507,
+0.1661314219236374,
+0.4835266172885895,
+0.3075527846813202,
+0.2713945806026459,
+0.09542074799537659,
+0.4128159284591675,
+0.3782634735107422,
+0.2947368323802948,
+0.13684210181236267,
+0.49473685026168823,
+0.3368421196937561,
+0.2533154785633087,
+0.1661314219236374,
+0.5361582040786743,
+0.3075527846813202,
+0.3240261673927307,
+0.09542074799537659,
+0.4654475152492523,
+0.3782634735107422,
+0.34736841917037964,
+0.13684210181236267,
+0.5473684072494507,
+0.3368421196937561,
+0.30594706535339355,
+0.1661314219236374,
+0.5887897610664368,
+0.3075527846813202,
+0.37665775418281555,
+0.09542074799537659,
+0.5180791020393372,
+0.3782634735107422,
+0.4000000059604645,
+0.13684210181236267,
+0.6000000238418579,
+0.3368421196937561,
+0.3585786521434784,
+0.1661314219236374,
+0.641421377658844,
+0.3075527846813202,
+0.429289311170578,
+0.09542074799537659,
+0.5707106590270996,
+0.3782634735107422,
+0.4526315927505493,
+0.13684210181236267,
+0.6526315808296204,
+0.3368421196937561,
+0.41121020913124084,
+0.1661314219236374,
+0.6940529346466064,
+0.3075527846813202,
+0.48192089796066284,
+0.09542074799537659,
+0.6233422756195068,
+0.3782634735107422,
+0.5052631497383118,
+0.13684210181236267,
+0.7052631378173828,
+0.3368421196937561,
+0.4638417959213257,
+0.1661314219236374,
+0.7466844916343689,
+0.3075527846813202,
+0.5345524549484253,
+0.09542074799537659,
+0.6759738326072693,
+0.3782634735107422,
+0.557894766330719,
+0.13684210181236267,
+0.75789475440979,
+0.3368421196937561,
+0.5164733529090881,
+0.1661314219236374,
+0.7993161082267761,
+0.3075527846813202,
+0.5871840715408325,
+0.09542074799537659,
+0.7286053895950317,
+0.3782634735107422,
+0.6105263233184814,
+0.13684210181236267,
+0.8105263113975525,
+0.3368421196937561,
+0.5691049695014954,
+0.1661314219236374,
+0.8519476652145386,
+0.3075527846813202,
+0.639815628528595,
+0.09542074799537659,
+0.781237006187439,
+0.3782634735107422,
+0.6631578803062439,
+0.13684210181236267,
+0.8631578683853149,
+0.3368421196937561,
+0.6217365264892578,
+0.1661314219236374,
+0.904579222202301,
+0.3075527846813202,
+0.6924472451210022,
+0.09542074799537659,
+0.8338685631752014,
+0.3782634735107422,
+0.7157894968986511,
+0.13684210181236267,
+0.9157894849777222,
+0.3368421196937561,
+0.674368143081665,
+0.1661314219236374,
+0.9572108387947083,
+0.3075527846813202,
+0.7450788021087646,
+0.09542074799537659,
+0.8865001797676086,
+0.3782634735107422,
+0.7684210538864136,
+0.13684210181236267,
+0.9684210419654846,
+0.3368421196937561,
+0.7269997000694275,
+0.1661314219236374,
+1.0098423957824707,
+0.3075527846813202,
+0.7977103590965271,
+0.09542074799537659,
+0.9391317367553711,
+0.3782634735107422,
+0.821052610874176,
+0.13684210181236267,
+1.021052598953247,
+0.3368421196937561,
+0.7796312570571899,
+0.1661314219236374,
+1.062474012374878,
+0.3075527846813202,
+0.8503419756889343,
+0.09542074799537659,
+0.9917632937431335,
+0.3782634735107422,
+0.8736842274665833,
+0.13684210181236267,
+1.0736842155456543,
+0.3368421196937561,
+0.8322628736495972,
+0.1661314219236374,
+1.1151055097579956,
+0.3075527846813202,
+0.9029735326766968,
+0.09542074799537659,
+1.044394850730896,
+0.3782634735107422,
+-0.0736842080950737,
+0.1894736886024475,
+0.12631578743457794,
+0.38947367668151855,
+-0.11510556936264038,
+0.21876300871372223,
+0.16773714125156403,
+0.36018437147140503,
+-0.04439488798379898,
+0.14805233478546143,
+0.09702646732330322,
+0.43089503049850464,
+-0.021052632480859756,
+0.1894736886024475,
+0.17894737422466278,
+0.38947367668151855,
+-0.06247398629784584,
+0.21876300871372223,
+0.22036872804164886,
+0.36018437147140503,
+0.008236690424382687,
+0.14805233478546143,
+0.14965803921222687,
+0.43089503049850464,
+0.031578946858644485,
+0.1894736886024475,
+0.23157894611358643,
+0.38947367668151855,
+-0.009842408820986748,
+0.21876300871372223,
+0.2730002999305725,
+0.36018437147140503,
+0.0608682706952095,
+0.14805233478546143,
+0.2022896260023117,
+0.43089503049850464,
+0.08421052992343903,
+0.1894736886024475,
+0.28421053290367126,
+0.38947367668151855,
+0.042789168655872345,
+0.21876300871372223,
+0.32563188672065735,
+0.36018437147140503,
+0.11349985003471375,
+0.14805233478546143,
+0.25492119789123535,
+0.43089503049850464,
+0.13684210181236267,
+0.1894736886024475,
+0.3368421196937561,
+0.38947367668151855,
+0.09542074799537659,
+0.21876300871372223,
+0.3782634735107422,
+0.36018437147140503,
+0.1661314219236374,
+0.14805233478546143,
+0.3075527846813202,
+0.43089503049850464,
+0.1894736886024475,
+0.1894736886024475,
+0.38947367668151855,
+0.38947367668151855,
+0.14805233478546143,
+0.21876300871372223,
+0.43089503049850464,
+0.36018437147140503,
+0.21876300871372223,
+0.14805233478546143,
+0.36018437147140503,
+0.43089503049850464,
+0.24210526049137115,
+0.1894736886024475,
+0.4421052634716034,
+0.38947367668151855,
+0.20068390667438507,
+0.21876300871372223,
+0.4835266172885895,
+0.36018437147140503,
+0.2713945806026459,
+0.14805233478546143,
+0.4128159284591675,
+0.43089503049850464,
+0.2947368323802948,
+0.1894736886024475,
+0.49473685026168823,
+0.38947367668151855,
+0.2533154785633087,
+0.21876300871372223,
+0.5361582040786743,
+0.36018437147140503,
+0.3240261673927307,
+0.14805233478546143,
+0.4654475152492523,
+0.43089503049850464,
+0.34736841917037964,
+0.1894736886024475,
+0.5473684072494507,
+0.38947367668151855,
+0.30594706535339355,
+0.21876300871372223,
+0.5887897610664368,
+0.36018437147140503,
+0.37665775418281555,
+0.14805233478546143,
+0.5180791020393372,
+0.43089503049850464,
+0.4000000059604645,
+0.1894736886024475,
+0.6000000238418579,
+0.38947367668151855,
+0.3585786521434784,
+0.21876300871372223,
+0.641421377658844,
+0.36018437147140503,
+0.429289311170578,
+0.14805233478546143,
+0.5707106590270996,
+0.43089503049850464,
+0.4526315927505493,
+0.1894736886024475,
+0.6526315808296204,
+0.38947367668151855,
+0.41121020913124084,
+0.21876300871372223,
+0.6940529346466064,
+0.36018437147140503,
+0.48192089796066284,
+0.14805233478546143,
+0.6233422756195068,
+0.43089503049850464,
+0.5052631497383118,
+0.1894736886024475,
+0.7052631378173828,
+0.38947367668151855,
+0.4638417959213257,
+0.21876300871372223,
+0.7466844916343689,
+0.36018437147140503,
+0.5345524549484253,
+0.14805233478546143,
+0.6759738326072693,
+0.43089503049850464,
+0.557894766330719,
+0.1894736886024475,
+0.75789475440979,
+0.38947367668151855,
+0.5164733529090881,
+0.21876300871372223,
+0.7993161082267761,
+0.36018437147140503,
+0.5871840715408325,
+0.14805233478546143,
+0.7286053895950317,
+0.43089503049850464,
+0.6105263233184814,
+0.1894736886024475,
+0.8105263113975525,
+0.38947367668151855,
+0.5691049695014954,
+0.21876300871372223,
+0.8519476652145386,
+0.36018437147140503,
+0.639815628528595,
+0.14805233478546143,
+0.781237006187439,
+0.43089503049850464,
+0.6631578803062439,
+0.1894736886024475,
+0.8631578683853149,
+0.38947367668151855,
+0.6217365264892578,
+0.21876300871372223,
+0.904579222202301,
+0.36018437147140503,
+0.6924472451210022,
+0.14805233478546143,
+0.8338685631752014,
+0.43089503049850464,
+0.7157894968986511,
+0.1894736886024475,
+0.9157894849777222,
+0.38947367668151855,
+0.674368143081665,
+0.21876300871372223,
+0.9572108387947083,
+0.36018437147140503,
+0.7450788021087646,
+0.14805233478546143,
+0.8865001797676086,
+0.43089503049850464,
+0.7684210538864136,
+0.1894736886024475,
+0.9684210419654846,
+0.38947367668151855,
+0.7269997000694275,
+0.21876300871372223,
+1.0098423957824707,
+0.36018437147140503,
+0.7977103590965271,
+0.14805233478546143,
+0.9391317367553711,
+0.43089503049850464,
+0.821052610874176,
+0.1894736886024475,
+1.021052598953247,
+0.38947367668151855,
+0.7796312570571899,
+0.21876300871372223,
+1.062474012374878,
+0.36018437147140503,
+0.8503419756889343,
+0.14805233478546143,
+0.9917632937431335,
+0.43089503049850464,
+0.8736842274665833,
+0.1894736886024475,
+1.0736842155456543,
+0.38947367668151855,
+0.8322628736495972,
+0.21876300871372223,
+1.1151055097579956,
+0.36018437147140503,
+0.9029735326766968,
+0.14805233478546143,
+1.044394850730896,
+0.43089503049850464,
+-0.0736842080950737,
+0.24210526049137115,
+0.12631578743457794,
+0.4421052634716034,
+-0.11510556936264038,
+0.2713945806026459,
+0.16773714125156403,
+0.4128159284591675,
+-0.04439488798379898,
+0.20068390667438507,
+0.09702646732330322,
+0.4835266172885895,
+-0.021052632480859756,
+0.24210526049137115,
+0.17894737422466278,
+0.4421052634716034,
+-0.06247398629784584,
+0.2713945806026459,
+0.22036872804164886,
+0.4128159284591675,
+0.008236690424382687,
+0.20068390667438507,
+0.14965803921222687,
+0.4835266172885895,
+0.031578946858644485,
+0.24210526049137115,
+0.23157894611358643,
+0.4421052634716034,
+-0.009842408820986748,
+0.2713945806026459,
+0.2730002999305725,
+0.4128159284591675,
+0.0608682706952095,
+0.20068390667438507,
+0.2022896260023117,
+0.4835266172885895,
+0.08421052992343903,
+0.24210526049137115,
+0.28421053290367126,
+0.4421052634716034,
+0.042789168655872345,
+0.2713945806026459,
+0.32563188672065735,
+0.4128159284591675,
+0.11349985003471375,
+0.20068390667438507,
+0.25492119789123535,
+0.4835266172885895,
+0.13684210181236267,
+0.24210526049137115,
+0.3368421196937561,
+0.4421052634716034,
+0.09542074799537659,
+0.2713945806026459,
+0.3782634735107422,
+0.4128159284591675,
+0.1661314219236374,
+0.20068390667438507,
+0.3075527846813202,
+0.4835266172885895,
+0.1894736886024475,
+0.24210526049137115,
+0.38947367668151855,
+0.4421052634716034,
+0.14805233478546143,
+0.2713945806026459,
+0.43089503049850464,
+0.4128159284591675,
+0.21876300871372223,
+0.20068390667438507,
+0.36018437147140503,
+0.4835266172885895,
+0.24210526049137115,
+0.24210526049137115,
+0.4421052634716034,
+0.4421052634716034,
+0.20068390667438507,
+0.2713945806026459,
+0.4835266172885895,
+0.4128159284591675,
+0.2713945806026459,
+0.20068390667438507,
+0.4128159284591675,
+0.4835266172885895,
+0.2947368323802948,
+0.24210526049137115,
+0.49473685026168823,
+0.4421052634716034,
+0.2533154785633087,
+0.2713945806026459,
+0.5361582040786743,
+0.4128159284591675,
+0.3240261673927307,
+0.20068390667438507,
+0.4654475152492523,
+0.4835266172885895,
+0.34736841917037964,
+0.24210526049137115,
+0.5473684072494507,
+0.4421052634716034,
+0.30594706535339355,
+0.2713945806026459,
+0.5887897610664368,
+0.4128159284591675,
+0.37665775418281555,
+0.20068390667438507,
+0.5180791020393372,
+0.4835266172885895,
+0.4000000059604645,
+0.24210526049137115,
+0.6000000238418579,
+0.4421052634716034,
+0.3585786521434784,
+0.2713945806026459,
+0.641421377658844,
+0.4128159284591675,
+0.429289311170578,
+0.20068390667438507,
+0.5707106590270996,
+0.4835266172885895,
+0.4526315927505493,
+0.24210526049137115,
+0.6526315808296204,
+0.4421052634716034,
+0.41121020913124084,
+0.2713945806026459,
+0.6940529346466064,
+0.4128159284591675,
+0.48192089796066284,
+0.20068390667438507,
+0.6233422756195068,
+0.4835266172885895,
+0.5052631497383118,
+0.24210526049137115,
+0.7052631378173828,
+0.4421052634716034,
+0.4638417959213257,
+0.2713945806026459,
+0.7466844916343689,
+0.4128159284591675,
+0.5345524549484253,
+0.20068390667438507,
+0.6759738326072693,
+0.4835266172885895,
+0.557894766330719,
+0.24210526049137115,
+0.75789475440979,
+0.4421052634716034,
+0.5164733529090881,
+0.2713945806026459,
+0.7993161082267761,
+0.4128159284591675,
+0.5871840715408325,
+0.20068390667438507,
+0.7286053895950317,
+0.4835266172885895,
+0.6105263233184814,
+0.24210526049137115,
+0.8105263113975525,
+0.4421052634716034,
+0.5691049695014954,
+0.2713945806026459,
+0.8519476652145386,
+0.4128159284591675,
+0.639815628528595,
+0.20068390667438507,
+0.781237006187439,
+0.4835266172885895,
+0.6631578803062439,
+0.24210526049137115,
+0.8631578683853149,
+0.4421052634716034,
+0.6217365264892578,
+0.2713945806026459,
+0.904579222202301,
+0.4128159284591675,
+0.6924472451210022,
+0.20068390667438507,
+0.8338685631752014,
+0.4835266172885895,
+0.7157894968986511,
+0.24210526049137115,
+0.9157894849777222,
+0.4421052634716034,
+0.674368143081665,
+0.2713945806026459,
+0.9572108387947083,
+0.4128159284591675,
+0.7450788021087646,
+0.20068390667438507,
+0.8865001797676086,
+0.4835266172885895,
+0.7684210538864136,
+0.24210526049137115,
+0.9684210419654846,
+0.4421052634716034,
+0.7269997000694275,
+0.2713945806026459,
+1.0098423957824707,
+0.4128159284591675,
+0.7977103590965271,
+0.20068390667438507,
+0.9391317367553711,
+0.4835266172885895,
+0.821052610874176,
+0.24210526049137115,
+1.021052598953247,
+0.4421052634716034,
+0.7796312570571899,
+0.2713945806026459,
+1.062474012374878,
+0.4128159284591675,
+0.8503419756889343,
+0.20068390667438507,
+0.9917632937431335,
+0.4835266172885895,
+0.8736842274665833,
+0.24210526049137115,
+1.0736842155456543,
+0.4421052634716034,
+0.8322628736495972,
+0.2713945806026459,
+1.1151055097579956,
+0.4128159284591675,
+0.9029735326766968,
+0.20068390667438507,
+1.044394850730896,
+0.4835266172885895,
+-0.0736842080950737,
+0.2947368323802948,
+0.12631578743457794,
+0.49473685026168823,
+-0.11510556936264038,
+0.3240261673927307,
+0.16773714125156403,
+0.4654475152492523,
+-0.04439488798379898,
+0.2533154785633087,
+0.09702646732330322,
+0.5361582040786743,
+-0.021052632480859756,
+0.2947368323802948,
+0.17894737422466278,
+0.49473685026168823,
+-0.06247398629784584,
+0.3240261673927307,
+0.22036872804164886,
+0.4654475152492523,
+0.008236690424382687,
+0.2533154785633087,
+0.14965803921222687,
+0.5361582040786743,
+0.031578946858644485,
+0.2947368323802948,
+0.23157894611358643,
+0.49473685026168823,
+-0.009842408820986748,
+0.3240261673927307,
+0.2730002999305725,
+0.4654475152492523,
+0.0608682706952095,
+0.2533154785633087,
+0.2022896260023117,
+0.5361582040786743,
+0.08421052992343903,
+0.2947368323802948,
+0.28421053290367126,
+0.49473685026168823,
+0.042789168655872345,
+0.3240261673927307,
+0.32563188672065735,
+0.4654475152492523,
+0.11349985003471375,
+0.2533154785633087,
+0.25492119789123535,
+0.5361582040786743,
+0.13684210181236267,
+0.2947368323802948,
+0.3368421196937561,
+0.49473685026168823,
+0.09542074799537659,
+0.3240261673927307,
+0.3782634735107422,
+0.4654475152492523,
+0.1661314219236374,
+0.2533154785633087,
+0.3075527846813202,
+0.5361582040786743,
+0.1894736886024475,
+0.2947368323802948,
+0.38947367668151855,
+0.49473685026168823,
+0.14805233478546143,
+0.3240261673927307,
+0.43089503049850464,
+0.4654475152492523,
+0.21876300871372223,
+0.2533154785633087,
+0.36018437147140503,
+0.5361582040786743,
+0.24210526049137115,
+0.2947368323802948,
+0.4421052634716034,
+0.49473685026168823,
+0.20068390667438507,
+0.3240261673927307,
+0.4835266172885895,
+0.4654475152492523,
+0.2713945806026459,
+0.2533154785633087,
+0.4128159284591675,
+0.5361582040786743,
+0.2947368323802948,
+0.2947368323802948,
+0.49473685026168823,
+0.49473685026168823,
+0.2533154785633087,
+0.3240261673927307,
+0.5361582040786743,
+0.4654475152492523,
+0.3240261673927307,
+0.2533154785633087,
+0.4654475152492523,
+0.5361582040786743,
+0.34736841917037964,
+0.2947368323802948,
+0.5473684072494507,
+0.49473685026168823,
+0.30594706535339355,
+0.3240261673927307,
+0.5887897610664368,
+0.4654475152492523,
+0.37665775418281555,
+0.2533154785633087,
+0.5180791020393372,
+0.5361582040786743,
+0.4000000059604645,
+0.2947368323802948,
+0.6000000238418579,
+0.49473685026168823,
+0.3585786521434784,
+0.3240261673927307,
+0.641421377658844,
+0.4654475152492523,
+0.429289311170578,
+0.2533154785633087,
+0.5707106590270996,
+0.5361582040786743,
+0.4526315927505493,
+0.2947368323802948,
+0.6526315808296204,
+0.49473685026168823,
+0.41121020913124084,
+0.3240261673927307,
+0.6940529346466064,
+0.4654475152492523,
+0.48192089796066284,
+0.2533154785633087,
+0.6233422756195068,
+0.5361582040786743,
+0.5052631497383118,
+0.2947368323802948,
+0.7052631378173828,
+0.49473685026168823,
+0.4638417959213257,
+0.3240261673927307,
+0.7466844916343689,
+0.4654475152492523,
+0.5345524549484253,
+0.2533154785633087,
+0.6759738326072693,
+0.5361582040786743,
+0.557894766330719,
+0.2947368323802948,
+0.75789475440979,
+0.49473685026168823,
+0.5164733529090881,
+0.3240261673927307,
+0.7993161082267761,
+0.4654475152492523,
+0.5871840715408325,
+0.2533154785633087,
+0.7286053895950317,
+0.5361582040786743,
+0.6105263233184814,
+0.2947368323802948,
+0.8105263113975525,
+0.49473685026168823,
+0.5691049695014954,
+0.3240261673927307,
+0.8519476652145386,
+0.4654475152492523,
+0.639815628528595,
+0.2533154785633087,
+0.781237006187439,
+0.5361582040786743,
+0.6631578803062439,
+0.2947368323802948,
+0.8631578683853149,
+0.49473685026168823,
+0.6217365264892578,
+0.3240261673927307,
+0.904579222202301,
+0.4654475152492523,
+0.6924472451210022,
+0.2533154785633087,
+0.8338685631752014,
+0.5361582040786743,
+0.7157894968986511,
+0.2947368323802948,
+0.9157894849777222,
+0.49473685026168823,
+0.674368143081665,
+0.3240261673927307,
+0.9572108387947083,
+0.4654475152492523,
+0.7450788021087646,
+0.2533154785633087,
+0.8865001797676086,
+0.5361582040786743,
+0.7684210538864136,
+0.2947368323802948,
+0.9684210419654846,
+0.49473685026168823,
+0.7269997000694275,
+0.3240261673927307,
+1.0098423957824707,
+0.4654475152492523,
+0.7977103590965271,
+0.2533154785633087,
+0.9391317367553711,
+0.5361582040786743,
+0.821052610874176,
+0.2947368323802948,
+1.021052598953247,
+0.49473685026168823,
+0.7796312570571899,
+0.3240261673927307,
+1.062474012374878,
+0.4654475152492523,
+0.8503419756889343,
+0.2533154785633087,
+0.9917632937431335,
+0.5361582040786743,
+0.8736842274665833,
+0.2947368323802948,
+1.0736842155456543,
+0.49473685026168823,
+0.8322628736495972,
+0.3240261673927307,
+1.1151055097579956,
+0.4654475152492523,
+0.9029735326766968,
+0.2533154785633087,
+1.044394850730896,
+0.5361582040786743,
+-0.0736842080950737,
+0.34736841917037964,
+0.12631578743457794,
+0.5473684072494507,
+-0.11510556936264038,
+0.37665775418281555,
+0.16773714125156403,
+0.5180791020393372,
+-0.04439488798379898,
+0.30594706535339355,
+0.09702646732330322,
+0.5887897610664368,
+-0.021052632480859756,
+0.34736841917037964,
+0.17894737422466278,
+0.5473684072494507,
+-0.06247398629784584,
+0.37665775418281555,
+0.22036872804164886,
+0.5180791020393372,
+0.008236690424382687,
+0.30594706535339355,
+0.14965803921222687,
+0.5887897610664368,
+0.031578946858644485,
+0.34736841917037964,
+0.23157894611358643,
+0.5473684072494507,
+-0.009842408820986748,
+0.37665775418281555,
+0.2730002999305725,
+0.5180791020393372,
+0.0608682706952095,
+0.30594706535339355,
+0.2022896260023117,
+0.5887897610664368,
+0.08421052992343903,
+0.34736841917037964,
+0.28421053290367126,
+0.5473684072494507,
+0.042789168655872345,
+0.37665775418281555,
+0.32563188672065735,
+0.5180791020393372,
+0.11349985003471375,
+0.30594706535339355,
+0.25492119789123535,
+0.5887897610664368,
+0.13684210181236267,
+0.34736841917037964,
+0.3368421196937561,
+0.5473684072494507,
+0.09542074799537659,
+0.37665775418281555,
+0.3782634735107422,
+0.5180791020393372,
+0.1661314219236374,
+0.30594706535339355,
+0.3075527846813202,
+0.5887897610664368,
+0.1894736886024475,
+0.34736841917037964,
+0.38947367668151855,
+0.5473684072494507,
+0.14805233478546143,
+0.37665775418281555,
+0.43089503049850464,
+0.5180791020393372,
+0.21876300871372223,
+0.30594706535339355,
+0.36018437147140503,
+0.5887897610664368,
+0.24210526049137115,
+0.34736841917037964,
+0.4421052634716034,
+0.5473684072494507,
+0.20068390667438507,
+0.37665775418281555,
+0.4835266172885895,
+0.5180791020393372,
+0.2713945806026459,
+0.30594706535339355,
+0.4128159284591675,
+0.5887897610664368,
+0.2947368323802948,
+0.34736841917037964,
+0.49473685026168823,
+0.5473684072494507,
+0.2533154785633087,
+0.37665775418281555,
+0.5361582040786743,
+0.5180791020393372,
+0.3240261673927307,
+0.30594706535339355,
+0.4654475152492523,
+0.5887897610664368,
+0.34736841917037964,
+0.34736841917037964,
+0.5473684072494507,
+0.5473684072494507,
+0.30594706535339355,
+0.37665775418281555,
+0.5887897610664368,
+0.5180791020393372,
+0.37665775418281555,
+0.30594706535339355,
+0.5180791020393372,
+0.5887897610664368,
+0.4000000059604645,
+0.34736841917037964,
+0.6000000238418579,
+0.5473684072494507,
+0.3585786521434784,
+0.37665775418281555,
+0.641421377658844,
+0.5180791020393372,
+0.429289311170578,
+0.30594706535339355,
+0.5707106590270996,
+0.5887897610664368,
+0.4526315927505493,
+0.34736841917037964,
+0.6526315808296204,
+0.5473684072494507,
+0.41121020913124084,
+0.37665775418281555,
+0.6940529346466064,
+0.5180791020393372,
+0.48192089796066284,
+0.30594706535339355,
+0.6233422756195068,
+0.5887897610664368,
+0.5052631497383118,
+0.34736841917037964,
+0.7052631378173828,
+0.5473684072494507,
+0.4638417959213257,
+0.37665775418281555,
+0.7466844916343689,
+0.5180791020393372,
+0.5345524549484253,
+0.30594706535339355,
+0.6759738326072693,
+0.5887897610664368,
+0.557894766330719,
+0.34736841917037964,
+0.75789475440979,
+0.5473684072494507,
+0.5164733529090881,
+0.37665775418281555,
+0.7993161082267761,
+0.5180791020393372,
+0.5871840715408325,
+0.30594706535339355,
+0.7286053895950317,
+0.5887897610664368,
+0.6105263233184814,
+0.34736841917037964,
+0.8105263113975525,
+0.5473684072494507,
+0.5691049695014954,
+0.37665775418281555,
+0.8519476652145386,
+0.5180791020393372,
+0.639815628528595,
+0.30594706535339355,
+0.781237006187439,
+0.5887897610664368,
+0.6631578803062439,
+0.34736841917037964,
+0.8631578683853149,
+0.5473684072494507,
+0.6217365264892578,
+0.37665775418281555,
+0.904579222202301,
+0.5180791020393372,
+0.6924472451210022,
+0.30594706535339355,
+0.8338685631752014,
+0.5887897610664368,
+0.7157894968986511,
+0.34736841917037964,
+0.9157894849777222,
+0.5473684072494507,
+0.674368143081665,
+0.37665775418281555,
+0.9572108387947083,
+0.5180791020393372,
+0.7450788021087646,
+0.30594706535339355,
+0.8865001797676086,
+0.5887897610664368,
+0.7684210538864136,
+0.34736841917037964,
+0.9684210419654846,
+0.5473684072494507,
+0.7269997000694275,
+0.37665775418281555,
+1.0098423957824707,
+0.5180791020393372,
+0.7977103590965271,
+0.30594706535339355,
+0.9391317367553711,
+0.5887897610664368,
+0.821052610874176,
+0.34736841917037964,
+1.021052598953247,
+0.5473684072494507,
+0.7796312570571899,
+0.37665775418281555,
+1.062474012374878,
+0.5180791020393372,
+0.8503419756889343,
+0.30594706535339355,
+0.9917632937431335,
+0.5887897610664368,
+0.8736842274665833,
+0.34736841917037964,
+1.0736842155456543,
+0.5473684072494507,
+0.8322628736495972,
+0.37665775418281555,
+1.1151055097579956,
+0.5180791020393372,
+0.9029735326766968,
+0.30594706535339355,
+1.044394850730896,
+0.5887897610664368,
+-0.0736842080950737,
+0.4000000059604645,
+0.12631578743457794,
+0.6000000238418579,
+-0.11510556936264038,
+0.429289311170578,
+0.16773714125156403,
+0.5707106590270996,
+-0.04439488798379898,
+0.3585786521434784,
+0.09702646732330322,
+0.641421377658844,
+-0.021052632480859756,
+0.4000000059604645,
+0.17894737422466278,
+0.6000000238418579,
+-0.06247398629784584,
+0.429289311170578,
+0.22036872804164886,
+0.5707106590270996,
+0.008236690424382687,
+0.3585786521434784,
+0.14965803921222687,
+0.641421377658844,
+0.031578946858644485,
+0.4000000059604645,
+0.23157894611358643,
+0.6000000238418579,
+-0.009842408820986748,
+0.429289311170578,
+0.2730002999305725,
+0.5707106590270996,
+0.0608682706952095,
+0.3585786521434784,
+0.2022896260023117,
+0.641421377658844,
+0.08421052992343903,
+0.4000000059604645,
+0.28421053290367126,
+0.6000000238418579,
+0.042789168655872345,
+0.429289311170578,
+0.32563188672065735,
+0.5707106590270996,
+0.11349985003471375,
+0.3585786521434784,
+0.25492119789123535,
+0.641421377658844,
+0.13684210181236267,
+0.4000000059604645,
+0.3368421196937561,
+0.6000000238418579,
+0.09542074799537659,
+0.429289311170578,
+0.3782634735107422,
+0.5707106590270996,
+0.1661314219236374,
+0.3585786521434784,
+0.3075527846813202,
+0.641421377658844,
+0.1894736886024475,
+0.4000000059604645,
+0.38947367668151855,
+0.6000000238418579,
+0.14805233478546143,
+0.429289311170578,
+0.43089503049850464,
+0.5707106590270996,
+0.21876300871372223,
+0.3585786521434784,
+0.36018437147140503,
+0.641421377658844,
+0.24210526049137115,
+0.4000000059604645,
+0.4421052634716034,
+0.6000000238418579,
+0.20068390667438507,
+0.429289311170578,
+0.4835266172885895,
+0.5707106590270996,
+0.2713945806026459,
+0.3585786521434784,
+0.4128159284591675,
+0.641421377658844,
+0.2947368323802948,
+0.4000000059604645,
+0.49473685026168823,
+0.6000000238418579,
+0.2533154785633087,
+0.429289311170578,
+0.5361582040786743,
+0.5707106590270996,
+0.3240261673927307,
+0.3585786521434784,
+0.4654475152492523,
+0.641421377658844,
+0.34736841917037964,
+0.4000000059604645,
+0.5473684072494507,
+0.6000000238418579,
+0.30594706535339355,
+0.429289311170578,
+0.5887897610664368,
+0.5707106590270996,
+0.37665775418281555,
+0.3585786521434784,
+0.5180791020393372,
+0.641421377658844,
+0.4000000059604645,
+0.4000000059604645,
+0.6000000238418579,
+0.6000000238418579,
+0.3585786521434784,
+0.429289311170578,
+0.641421377658844,
+0.5707106590270996,
+0.429289311170578,
+0.3585786521434784,
+0.5707106590270996,
+0.641421377658844,
+0.4526315927505493,
+0.4000000059604645,
+0.6526315808296204,
+0.6000000238418579,
+0.41121020913124084,
+0.429289311170578,
+0.6940529346466064,
+0.5707106590270996,
+0.48192089796066284,
+0.3585786521434784,
+0.6233422756195068,
+0.641421377658844,
+0.5052631497383118,
+0.4000000059604645,
+0.7052631378173828,
+0.6000000238418579,
+0.4638417959213257,
+0.429289311170578,
+0.7466844916343689,
+0.5707106590270996,
+0.5345524549484253,
+0.3585786521434784,
+0.6759738326072693,
+0.641421377658844,
+0.557894766330719,
+0.4000000059604645,
+0.75789475440979,
+0.6000000238418579,
+0.5164733529090881,
+0.429289311170578,
+0.7993161082267761,
+0.5707106590270996,
+0.5871840715408325,
+0.3585786521434784,
+0.7286053895950317,
+0.641421377658844,
+0.6105263233184814,
+0.4000000059604645,
+0.8105263113975525,
+0.6000000238418579,
+0.5691049695014954,
+0.429289311170578,
+0.8519476652145386,
+0.5707106590270996,
+0.639815628528595,
+0.3585786521434784,
+0.781237006187439,
+0.641421377658844,
+0.6631578803062439,
+0.4000000059604645,
+0.8631578683853149,
+0.6000000238418579,
+0.6217365264892578,
+0.429289311170578,
+0.904579222202301,
+0.5707106590270996,
+0.6924472451210022,
+0.3585786521434784,
+0.8338685631752014,
+0.641421377658844,
+0.7157894968986511,
+0.4000000059604645,
+0.9157894849777222,
+0.6000000238418579,
+0.674368143081665,
+0.429289311170578,
+0.9572108387947083,
+0.5707106590270996,
+0.7450788021087646,
+0.3585786521434784,
+0.8865001797676086,
+0.641421377658844,
+0.7684210538864136,
+0.4000000059604645,
+0.9684210419654846,
+0.6000000238418579,
+0.7269997000694275,
+0.429289311170578,
+1.0098423957824707,
+0.5707106590270996,
+0.7977103590965271,
+0.3585786521434784,
+0.9391317367553711,
+0.641421377658844,
+0.821052610874176,
+0.4000000059604645,
+1.021052598953247,
+0.6000000238418579,
+0.7796312570571899,
+0.429289311170578,
+1.062474012374878,
+0.5707106590270996,
+0.8503419756889343,
+0.3585786521434784,
+0.9917632937431335,
+0.641421377658844,
+0.8736842274665833,
+0.4000000059604645,
+1.0736842155456543,
+0.6000000238418579,
+0.8322628736495972,
+0.429289311170578,
+1.1151055097579956,
+0.5707106590270996,
+0.9029735326766968,
+0.3585786521434784,
+1.044394850730896,
+0.641421377658844,
+-0.0736842080950737,
+0.4526315927505493,
+0.12631578743457794,
+0.6526315808296204,
+-0.11510556936264038,
+0.48192089796066284,
+0.16773714125156403,
+0.6233422756195068,
+-0.04439488798379898,
+0.41121020913124084,
+0.09702646732330322,
+0.6940529346466064,
+-0.021052632480859756,
+0.4526315927505493,
+0.17894737422466278,
+0.6526315808296204,
+-0.06247398629784584,
+0.48192089796066284,
+0.22036872804164886,
+0.6233422756195068,
+0.008236690424382687,
+0.41121020913124084,
+0.14965803921222687,
+0.6940529346466064,
+0.031578946858644485,
+0.4526315927505493,
+0.23157894611358643,
+0.6526315808296204,
+-0.009842408820986748,
+0.48192089796066284,
+0.2730002999305725,
+0.6233422756195068,
+0.0608682706952095,
+0.41121020913124084,
+0.2022896260023117,
+0.6940529346466064,
+0.08421052992343903,
+0.4526315927505493,
+0.28421053290367126,
+0.6526315808296204,
+0.042789168655872345,
+0.48192089796066284,
+0.32563188672065735,
+0.6233422756195068,
+0.11349985003471375,
+0.41121020913124084,
+0.25492119789123535,
+0.6940529346466064,
+0.13684210181236267,
+0.4526315927505493,
+0.3368421196937561,
+0.6526315808296204,
+0.09542074799537659,
+0.48192089796066284,
+0.3782634735107422,
+0.6233422756195068,
+0.1661314219236374,
+0.41121020913124084,
+0.3075527846813202,
+0.6940529346466064,
+0.1894736886024475,
+0.4526315927505493,
+0.38947367668151855,
+0.6526315808296204,
+0.14805233478546143,
+0.48192089796066284,
+0.43089503049850464,
+0.6233422756195068,
+0.21876300871372223,
+0.41121020913124084,
+0.36018437147140503,
+0.6940529346466064,
+0.24210526049137115,
+0.4526315927505493,
+0.4421052634716034,
+0.6526315808296204,
+0.20068390667438507,
+0.48192089796066284,
+0.4835266172885895,
+0.6233422756195068,
+0.2713945806026459,
+0.41121020913124084,
+0.4128159284591675,
+0.6940529346466064,
+0.2947368323802948,
+0.4526315927505493,
+0.49473685026168823,
+0.6526315808296204,
+0.2533154785633087,
+0.48192089796066284,
+0.5361582040786743,
+0.6233422756195068,
+0.3240261673927307,
+0.41121020913124084,
+0.4654475152492523,
+0.6940529346466064,
+0.34736841917037964,
+0.4526315927505493,
+0.5473684072494507,
+0.6526315808296204,
+0.30594706535339355,
+0.48192089796066284,
+0.5887897610664368,
+0.6233422756195068,
+0.37665775418281555,
+0.41121020913124084,
+0.5180791020393372,
+0.6940529346466064,
+0.4000000059604645,
+0.4526315927505493,
+0.6000000238418579,
+0.6526315808296204,
+0.3585786521434784,
+0.48192089796066284,
+0.641421377658844,
+0.6233422756195068,
+0.429289311170578,
+0.41121020913124084,
+0.5707106590270996,
+0.6940529346466064,
+0.4526315927505493,
+0.4526315927505493,
+0.6526315808296204,
+0.6526315808296204,
+0.41121020913124084,
+0.48192089796066284,
+0.6940529346466064,
+0.6233422756195068,
+0.48192089796066284,
+0.41121020913124084,
+0.6233422756195068,
+0.6940529346466064,
+0.5052631497383118,
+0.4526315927505493,
+0.7052631378173828,
+0.6526315808296204,
+0.4638417959213257,
+0.48192089796066284,
+0.7466844916343689,
+0.6233422756195068,
+0.5345524549484253,
+0.41121020913124084,
+0.6759738326072693,
+0.6940529346466064,
+0.557894766330719,
+0.4526315927505493,
+0.75789475440979,
+0.6526315808296204,
+0.5164733529090881,
+0.48192089796066284,
+0.7993161082267761,
+0.6233422756195068,
+0.5871840715408325,
+0.41121020913124084,
+0.7286053895950317,
+0.6940529346466064,
+0.6105263233184814,
+0.4526315927505493,
+0.8105263113975525,
+0.6526315808296204,
+0.5691049695014954,
+0.48192089796066284,
+0.8519476652145386,
+0.6233422756195068,
+0.639815628528595,
+0.41121020913124084,
+0.781237006187439,
+0.6940529346466064,
+0.6631578803062439,
+0.4526315927505493,
+0.8631578683853149,
+0.6526315808296204,
+0.6217365264892578,
+0.48192089796066284,
+0.904579222202301,
+0.6233422756195068,
+0.6924472451210022,
+0.41121020913124084,
+0.8338685631752014,
+0.6940529346466064,
+0.7157894968986511,
+0.4526315927505493,
+0.9157894849777222,
+0.6526315808296204,
+0.674368143081665,
+0.48192089796066284,
+0.9572108387947083,
+0.6233422756195068,
+0.7450788021087646,
+0.41121020913124084,
+0.8865001797676086,
+0.6940529346466064,
+0.7684210538864136,
+0.4526315927505493,
+0.9684210419654846,
+0.6526315808296204,
+0.7269997000694275,
+0.48192089796066284,
+1.0098423957824707,
+0.6233422756195068,
+0.7977103590965271,
+0.41121020913124084,
+0.9391317367553711,
+0.6940529346466064,
+0.821052610874176,
+0.4526315927505493,
+1.021052598953247,
+0.6526315808296204,
+0.7796312570571899,
+0.48192089796066284,
+1.062474012374878,
+0.6233422756195068,
+0.8503419756889343,
+0.41121020913124084,
+0.9917632937431335,
+0.6940529346466064,
+0.8736842274665833,
+0.4526315927505493,
+1.0736842155456543,
+0.6526315808296204,
+0.8322628736495972,
+0.48192089796066284,
+1.1151055097579956,
+0.6233422756195068,
+0.9029735326766968,
+0.41121020913124084,
+1.044394850730896,
+0.6940529346466064,
+-0.0736842080950737,
+0.5052631497383118,
+0.12631578743457794,
+0.7052631378173828,
+-0.11510556936264038,
+0.5345524549484253,
+0.16773714125156403,
+0.6759738326072693,
+-0.04439488798379898,
+0.4638417959213257,
+0.09702646732330322,
+0.7466844916343689,
+-0.021052632480859756,
+0.5052631497383118,
+0.17894737422466278,
+0.7052631378173828,
+-0.06247398629784584,
+0.5345524549484253,
+0.22036872804164886,
+0.6759738326072693,
+0.008236690424382687,
+0.4638417959213257,
+0.14965803921222687,
+0.7466844916343689,
+0.031578946858644485,
+0.5052631497383118,
+0.23157894611358643,
+0.7052631378173828,
+-0.009842408820986748,
+0.5345524549484253,
+0.2730002999305725,
+0.6759738326072693,
+0.0608682706952095,
+0.4638417959213257,
+0.2022896260023117,
+0.7466844916343689,
+0.08421052992343903,
+0.5052631497383118,
+0.28421053290367126,
+0.7052631378173828,
+0.042789168655872345,
+0.5345524549484253,
+0.32563188672065735,
+0.6759738326072693,
+0.11349985003471375,
+0.4638417959213257,
+0.25492119789123535,
+0.7466844916343689,
+0.13684210181236267,
+0.5052631497383118,
+0.3368421196937561,
+0.7052631378173828,
+0.09542074799537659,
+0.5345524549484253,
+0.3782634735107422,
+0.6759738326072693,
+0.1661314219236374,
+0.4638417959213257,
+0.3075527846813202,
+0.7466844916343689,
+0.1894736886024475,
+0.5052631497383118,
+0.38947367668151855,
+0.7052631378173828,
+0.14805233478546143,
+0.5345524549484253,
+0.43089503049850464,
+0.6759738326072693,
+0.21876300871372223,
+0.4638417959213257,
+0.36018437147140503,
+0.7466844916343689,
+0.24210526049137115,
+0.5052631497383118,
+0.4421052634716034,
+0.7052631378173828,
+0.20068390667438507,
+0.5345524549484253,
+0.4835266172885895,
+0.6759738326072693,
+0.2713945806026459,
+0.4638417959213257,
+0.4128159284591675,
+0.7466844916343689,
+0.2947368323802948,
+0.5052631497383118,
+0.49473685026168823,
+0.7052631378173828,
+0.2533154785633087,
+0.5345524549484253,
+0.5361582040786743,
+0.6759738326072693,
+0.3240261673927307,
+0.4638417959213257,
+0.4654475152492523,
+0.7466844916343689,
+0.34736841917037964,
+0.5052631497383118,
+0.5473684072494507,
+0.7052631378173828,
+0.30594706535339355,
+0.5345524549484253,
+0.5887897610664368,
+0.6759738326072693,
+0.37665775418281555,
+0.4638417959213257,
+0.5180791020393372,
+0.7466844916343689,
+0.4000000059604645,
+0.5052631497383118,
+0.6000000238418579,
+0.7052631378173828,
+0.3585786521434784,
+0.5345524549484253,
+0.641421377658844,
+0.6759738326072693,
+0.429289311170578,
+0.4638417959213257,
+0.5707106590270996,
+0.7466844916343689,
+0.4526315927505493,
+0.5052631497383118,
+0.6526315808296204,
+0.7052631378173828,
+0.41121020913124084,
+0.5345524549484253,
+0.6940529346466064,
+0.6759738326072693,
+0.48192089796066284,
+0.4638417959213257,
+0.6233422756195068,
+0.7466844916343689,
+0.5052631497383118,
+0.5052631497383118,
+0.7052631378173828,
+0.7052631378173828,
+0.4638417959213257,
+0.5345524549484253,
+0.7466844916343689,
+0.6759738326072693,
+0.5345524549484253,
+0.4638417959213257,
+0.6759738326072693,
+0.7466844916343689,
+0.557894766330719,
+0.5052631497383118,
+0.75789475440979,
+0.7052631378173828,
+0.5164733529090881,
+0.5345524549484253,
+0.7993161082267761,
+0.6759738326072693,
+0.5871840715408325,
+0.4638417959213257,
+0.7286053895950317,
+0.7466844916343689,
+0.6105263233184814,
+0.5052631497383118,
+0.8105263113975525,
+0.7052631378173828,
+0.5691049695014954,
+0.5345524549484253,
+0.8519476652145386,
+0.6759738326072693,
+0.639815628528595,
+0.4638417959213257,
+0.781237006187439,
+0.7466844916343689,
+0.6631578803062439,
+0.5052631497383118,
+0.8631578683853149,
+0.7052631378173828,
+0.6217365264892578,
+0.5345524549484253,
+0.904579222202301,
+0.6759738326072693,
+0.6924472451210022,
+0.4638417959213257,
+0.8338685631752014,
+0.7466844916343689,
+0.7157894968986511,
+0.5052631497383118,
+0.9157894849777222,
+0.7052631378173828,
+0.674368143081665,
+0.5345524549484253,
+0.9572108387947083,
+0.6759738326072693,
+0.7450788021087646,
+0.4638417959213257,
+0.8865001797676086,
+0.7466844916343689,
+0.7684210538864136,
+0.5052631497383118,
+0.9684210419654846,
+0.7052631378173828,
+0.7269997000694275,
+0.5345524549484253,
+1.0098423957824707,
+0.6759738326072693,
+0.7977103590965271,
+0.4638417959213257,
+0.9391317367553711,
+0.7466844916343689,
+0.821052610874176,
+0.5052631497383118,
+1.021052598953247,
+0.7052631378173828,
+0.7796312570571899,
+0.5345524549484253,
+1.062474012374878,
+0.6759738326072693,
+0.8503419756889343,
+0.4638417959213257,
+0.9917632937431335,
+0.7466844916343689,
+0.8736842274665833,
+0.5052631497383118,
+1.0736842155456543,
+0.7052631378173828,
+0.8322628736495972,
+0.5345524549484253,
+1.1151055097579956,
+0.6759738326072693,
+0.9029735326766968,
+0.4638417959213257,
+1.044394850730896,
+0.7466844916343689,
+-0.0736842080950737,
+0.557894766330719,
+0.12631578743457794,
+0.75789475440979,
+-0.11510556936264038,
+0.5871840715408325,
+0.16773714125156403,
+0.7286053895950317,
+-0.04439488798379898,
+0.5164733529090881,
+0.09702646732330322,
+0.7993161082267761,
+-0.021052632480859756,
+0.557894766330719,
+0.17894737422466278,
+0.75789475440979,
+-0.06247398629784584,
+0.5871840715408325,
+0.22036872804164886,
+0.7286053895950317,
+0.008236690424382687,
+0.5164733529090881,
+0.14965803921222687,
+0.7993161082267761,
+0.031578946858644485,
+0.557894766330719,
+0.23157894611358643,
+0.75789475440979,
+-0.009842408820986748,
+0.5871840715408325,
+0.2730002999305725,
+0.7286053895950317,
+0.0608682706952095,
+0.5164733529090881,
+0.2022896260023117,
+0.7993161082267761,
+0.08421052992343903,
+0.557894766330719,
+0.28421053290367126,
+0.75789475440979,
+0.042789168655872345,
+0.5871840715408325,
+0.32563188672065735,
+0.7286053895950317,
+0.11349985003471375,
+0.5164733529090881,
+0.25492119789123535,
+0.7993161082267761,
+0.13684210181236267,
+0.557894766330719,
+0.3368421196937561,
+0.75789475440979,
+0.09542074799537659,
+0.5871840715408325,
+0.3782634735107422,
+0.7286053895950317,
+0.1661314219236374,
+0.5164733529090881,
+0.3075527846813202,
+0.7993161082267761,
+0.1894736886024475,
+0.557894766330719,
+0.38947367668151855,
+0.75789475440979,
+0.14805233478546143,
+0.5871840715408325,
+0.43089503049850464,
+0.7286053895950317,
+0.21876300871372223,
+0.5164733529090881,
+0.36018437147140503,
+0.7993161082267761,
+0.24210526049137115,
+0.557894766330719,
+0.4421052634716034,
+0.75789475440979,
+0.20068390667438507,
+0.5871840715408325,
+0.4835266172885895,
+0.7286053895950317,
+0.2713945806026459,
+0.5164733529090881,
+0.4128159284591675,
+0.7993161082267761,
+0.2947368323802948,
+0.557894766330719,
+0.49473685026168823,
+0.75789475440979,
+0.2533154785633087,
+0.5871840715408325,
+0.5361582040786743,
+0.7286053895950317,
+0.3240261673927307,
+0.5164733529090881,
+0.4654475152492523,
+0.7993161082267761,
+0.34736841917037964,
+0.557894766330719,
+0.5473684072494507,
+0.75789475440979,
+0.30594706535339355,
+0.5871840715408325,
+0.5887897610664368,
+0.7286053895950317,
+0.37665775418281555,
+0.5164733529090881,
+0.5180791020393372,
+0.7993161082267761,
+0.4000000059604645,
+0.557894766330719,
+0.6000000238418579,
+0.75789475440979,
+0.3585786521434784,
+0.5871840715408325,
+0.641421377658844,
+0.7286053895950317,
+0.429289311170578,
+0.5164733529090881,
+0.5707106590270996,
+0.7993161082267761,
+0.4526315927505493,
+0.557894766330719,
+0.6526315808296204,
+0.75789475440979,
+0.41121020913124084,
+0.5871840715408325,
+0.6940529346466064,
+0.7286053895950317,
+0.48192089796066284,
+0.5164733529090881,
+0.6233422756195068,
+0.7993161082267761,
+0.5052631497383118,
+0.557894766330719,
+0.7052631378173828,
+0.75789475440979,
+0.4638417959213257,
+0.5871840715408325,
+0.7466844916343689,
+0.7286053895950317,
+0.5345524549484253,
+0.5164733529090881,
+0.6759738326072693,
+0.7993161082267761,
+0.557894766330719,
+0.557894766330719,
+0.75789475440979,
+0.75789475440979,
+0.5164733529090881,
+0.5871840715408325,
+0.7993161082267761,
+0.7286053895950317,
+0.5871840715408325,
+0.5164733529090881,
+0.7286053895950317,
+0.7993161082267761,
+0.6105263233184814,
+0.557894766330719,
+0.8105263113975525,
+0.75789475440979,
+0.5691049695014954,
+0.5871840715408325,
+0.8519476652145386,
+0.7286053895950317,
+0.639815628528595,
+0.5164733529090881,
+0.781237006187439,
+0.7993161082267761,
+0.6631578803062439,
+0.557894766330719,
+0.8631578683853149,
+0.75789475440979,
+0.6217365264892578,
+0.5871840715408325,
+0.904579222202301,
+0.7286053895950317,
+0.6924472451210022,
+0.5164733529090881,
+0.8338685631752014,
+0.7993161082267761,
+0.7157894968986511,
+0.557894766330719,
+0.9157894849777222,
+0.75789475440979,
+0.674368143081665,
+0.5871840715408325,
+0.9572108387947083,
+0.7286053895950317,
+0.7450788021087646,
+0.5164733529090881,
+0.8865001797676086,
+0.7993161082267761,
+0.7684210538864136,
+0.557894766330719,
+0.9684210419654846,
+0.75789475440979,
+0.7269997000694275,
+0.5871840715408325,
+1.0098423957824707,
+0.7286053895950317,
+0.7977103590965271,
+0.5164733529090881,
+0.9391317367553711,
+0.7993161082267761,
+0.821052610874176,
+0.557894766330719,
+1.021052598953247,
+0.75789475440979,
+0.7796312570571899,
+0.5871840715408325,
+1.062474012374878,
+0.7286053895950317,
+0.8503419756889343,
+0.5164733529090881,
+0.9917632937431335,
+0.7993161082267761,
+0.8736842274665833,
+0.557894766330719,
+1.0736842155456543,
+0.75789475440979,
+0.8322628736495972,
+0.5871840715408325,
+1.1151055097579956,
+0.7286053895950317,
+0.9029735326766968,
+0.5164733529090881,
+1.044394850730896,
+0.7993161082267761,
+-0.0736842080950737,
+0.6105263233184814,
+0.12631578743457794,
+0.8105263113975525,
+-0.11510556936264038,
+0.639815628528595,
+0.16773714125156403,
+0.781237006187439,
+-0.04439488798379898,
+0.5691049695014954,
+0.09702646732330322,
+0.8519476652145386,
+-0.021052632480859756,
+0.6105263233184814,
+0.17894737422466278,
+0.8105263113975525,
+-0.06247398629784584,
+0.639815628528595,
+0.22036872804164886,
+0.781237006187439,
+0.008236690424382687,
+0.5691049695014954,
+0.14965803921222687,
+0.8519476652145386,
+0.031578946858644485,
+0.6105263233184814,
+0.23157894611358643,
+0.8105263113975525,
+-0.009842408820986748,
+0.639815628528595,
+0.2730002999305725,
+0.781237006187439,
+0.0608682706952095,
+0.5691049695014954,
+0.2022896260023117,
+0.8519476652145386,
+0.08421052992343903,
+0.6105263233184814,
+0.28421053290367126,
+0.8105263113975525,
+0.042789168655872345,
+0.639815628528595,
+0.32563188672065735,
+0.781237006187439,
+0.11349985003471375,
+0.5691049695014954,
+0.25492119789123535,
+0.8519476652145386,
+0.13684210181236267,
+0.6105263233184814,
+0.3368421196937561,
+0.8105263113975525,
+0.09542074799537659,
+0.639815628528595,
+0.3782634735107422,
+0.781237006187439,
+0.1661314219236374,
+0.5691049695014954,
+0.3075527846813202,
+0.8519476652145386,
+0.1894736886024475,
+0.6105263233184814,
+0.38947367668151855,
+0.8105263113975525,
+0.14805233478546143,
+0.639815628528595,
+0.43089503049850464,
+0.781237006187439,
+0.21876300871372223,
+0.5691049695014954,
+0.36018437147140503,
+0.8519476652145386,
+0.24210526049137115,
+0.6105263233184814,
+0.4421052634716034,
+0.8105263113975525,
+0.20068390667438507,
+0.639815628528595,
+0.4835266172885895,
+0.781237006187439,
+0.2713945806026459,
+0.5691049695014954,
+0.4128159284591675,
+0.8519476652145386,
+0.2947368323802948,
+0.6105263233184814,
+0.49473685026168823,
+0.8105263113975525,
+0.2533154785633087,
+0.639815628528595,
+0.5361582040786743,
+0.781237006187439,
+0.3240261673927307,
+0.5691049695014954,
+0.4654475152492523,
+0.8519476652145386,
+0.34736841917037964,
+0.6105263233184814,
+0.5473684072494507,
+0.8105263113975525,
+0.30594706535339355,
+0.639815628528595,
+0.5887897610664368,
+0.781237006187439,
+0.37665775418281555,
+0.5691049695014954,
+0.5180791020393372,
+0.8519476652145386,
+0.4000000059604645,
+0.6105263233184814,
+0.6000000238418579,
+0.8105263113975525,
+0.3585786521434784,
+0.639815628528595,
+0.641421377658844,
+0.781237006187439,
+0.429289311170578,
+0.5691049695014954,
+0.5707106590270996,
+0.8519476652145386,
+0.4526315927505493,
+0.6105263233184814,
+0.6526315808296204,
+0.8105263113975525,
+0.41121020913124084,
+0.639815628528595,
+0.6940529346466064,
+0.781237006187439,
+0.48192089796066284,
+0.5691049695014954,
+0.6233422756195068,
+0.8519476652145386,
+0.5052631497383118,
+0.6105263233184814,
+0.7052631378173828,
+0.8105263113975525,
+0.4638417959213257,
+0.639815628528595,
+0.7466844916343689,
+0.781237006187439,
+0.5345524549484253,
+0.5691049695014954,
+0.6759738326072693,
+0.8519476652145386,
+0.557894766330719,
+0.6105263233184814,
+0.75789475440979,
+0.8105263113975525,
+0.5164733529090881,
+0.639815628528595,
+0.7993161082267761,
+0.781237006187439,
+0.5871840715408325,
+0.5691049695014954,
+0.7286053895950317,
+0.8519476652145386,
+0.6105263233184814,
+0.6105263233184814,
+0.8105263113975525,
+0.8105263113975525,
+0.5691049695014954,
+0.639815628528595,
+0.8519476652145386,
+0.781237006187439,
+0.639815628528595,
+0.5691049695014954,
+0.781237006187439,
+0.8519476652145386,
+0.6631578803062439,
+0.6105263233184814,
+0.8631578683853149,
+0.8105263113975525,
+0.6217365264892578,
+0.639815628528595,
+0.904579222202301,
+0.781237006187439,
+0.6924472451210022,
+0.5691049695014954,
+0.8338685631752014,
+0.8519476652145386,
+0.7157894968986511,
+0.6105263233184814,
+0.9157894849777222,
+0.8105263113975525,
+0.674368143081665,
+0.639815628528595,
+0.9572108387947083,
+0.781237006187439,
+0.7450788021087646,
+0.5691049695014954,
+0.8865001797676086,
+0.8519476652145386,
+0.7684210538864136,
+0.6105263233184814,
+0.9684210419654846,
+0.8105263113975525,
+0.7269997000694275,
+0.639815628528595,
+1.0098423957824707,
+0.781237006187439,
+0.7977103590965271,
+0.5691049695014954,
+0.9391317367553711,
+0.8519476652145386,
+0.821052610874176,
+0.6105263233184814,
+1.021052598953247,
+0.8105263113975525,
+0.7796312570571899,
+0.639815628528595,
+1.062474012374878,
+0.781237006187439,
+0.8503419756889343,
+0.5691049695014954,
+0.9917632937431335,
+0.8519476652145386,
+0.8736842274665833,
+0.6105263233184814,
+1.0736842155456543,
+0.8105263113975525,
+0.8322628736495972,
+0.639815628528595,
+1.1151055097579956,
+0.781237006187439,
+0.9029735326766968,
+0.5691049695014954,
+1.044394850730896,
+0.8519476652145386,
+-0.0736842080950737,
+0.6631578803062439,
+0.12631578743457794,
+0.8631578683853149,
+-0.11510556936264038,
+0.6924472451210022,
+0.16773714125156403,
+0.8338685631752014,
+-0.04439488798379898,
+0.6217365264892578,
+0.09702646732330322,
+0.904579222202301,
+-0.021052632480859756,
+0.6631578803062439,
+0.17894737422466278,
+0.8631578683853149,
+-0.06247398629784584,
+0.6924472451210022,
+0.22036872804164886,
+0.8338685631752014,
+0.008236690424382687,
+0.6217365264892578,
+0.14965803921222687,
+0.904579222202301,
+0.031578946858644485,
+0.6631578803062439,
+0.23157894611358643,
+0.8631578683853149,
+-0.009842408820986748,
+0.6924472451210022,
+0.2730002999305725,
+0.8338685631752014,
+0.0608682706952095,
+0.6217365264892578,
+0.2022896260023117,
+0.904579222202301,
+0.08421052992343903,
+0.6631578803062439,
+0.28421053290367126,
+0.8631578683853149,
+0.042789168655872345,
+0.6924472451210022,
+0.32563188672065735,
+0.8338685631752014,
+0.11349985003471375,
+0.6217365264892578,
+0.25492119789123535,
+0.904579222202301,
+0.13684210181236267,
+0.6631578803062439,
+0.3368421196937561,
+0.8631578683853149,
+0.09542074799537659,
+0.6924472451210022,
+0.3782634735107422,
+0.8338685631752014,
+0.1661314219236374,
+0.6217365264892578,
+0.3075527846813202,
+0.904579222202301,
+0.1894736886024475,
+0.6631578803062439,
+0.38947367668151855,
+0.8631578683853149,
+0.14805233478546143,
+0.6924472451210022,
+0.43089503049850464,
+0.8338685631752014,
+0.21876300871372223,
+0.6217365264892578,
+0.36018437147140503,
+0.904579222202301,
+0.24210526049137115,
+0.6631578803062439,
+0.4421052634716034,
+0.8631578683853149,
+0.20068390667438507,
+0.6924472451210022,
+0.4835266172885895,
+0.8338685631752014,
+0.2713945806026459,
+0.6217365264892578,
+0.4128159284591675,
+0.904579222202301,
+0.2947368323802948,
+0.6631578803062439,
+0.49473685026168823,
+0.8631578683853149,
+0.2533154785633087,
+0.6924472451210022,
+0.5361582040786743,
+0.8338685631752014,
+0.3240261673927307,
+0.6217365264892578,
+0.4654475152492523,
+0.904579222202301,
+0.34736841917037964,
+0.6631578803062439,
+0.5473684072494507,
+0.8631578683853149,
+0.30594706535339355,
+0.6924472451210022,
+0.5887897610664368,
+0.8338685631752014,
+0.37665775418281555,
+0.6217365264892578,
+0.5180791020393372,
+0.904579222202301,
+0.4000000059604645,
+0.6631578803062439,
+0.6000000238418579,
+0.8631578683853149,
+0.3585786521434784,
+0.6924472451210022,
+0.641421377658844,
+0.8338685631752014,
+0.429289311170578,
+0.6217365264892578,
+0.5707106590270996,
+0.904579222202301,
+0.4526315927505493,
+0.6631578803062439,
+0.6526315808296204,
+0.8631578683853149,
+0.41121020913124084,
+0.6924472451210022,
+0.6940529346466064,
+0.8338685631752014,
+0.48192089796066284,
+0.6217365264892578,
+0.6233422756195068,
+0.904579222202301,
+0.5052631497383118,
+0.6631578803062439,
+0.7052631378173828,
+0.8631578683853149,
+0.4638417959213257,
+0.6924472451210022,
+0.7466844916343689,
+0.8338685631752014,
+0.5345524549484253,
+0.6217365264892578,
+0.6759738326072693,
+0.904579222202301,
+0.557894766330719,
+0.6631578803062439,
+0.75789475440979,
+0.8631578683853149,
+0.5164733529090881,
+0.6924472451210022,
+0.7993161082267761,
+0.8338685631752014,
+0.5871840715408325,
+0.6217365264892578,
+0.7286053895950317,
+0.904579222202301,
+0.6105263233184814,
+0.6631578803062439,
+0.8105263113975525,
+0.8631578683853149,
+0.5691049695014954,
+0.6924472451210022,
+0.8519476652145386,
+0.8338685631752014,
+0.639815628528595,
+0.6217365264892578,
+0.781237006187439,
+0.904579222202301,
+0.6631578803062439,
+0.6631578803062439,
+0.8631578683853149,
+0.8631578683853149,
+0.6217365264892578,
+0.6924472451210022,
+0.904579222202301,
+0.8338685631752014,
+0.6924472451210022,
+0.6217365264892578,
+0.8338685631752014,
+0.904579222202301,
+0.7157894968986511,
+0.6631578803062439,
+0.9157894849777222,
+0.8631578683853149,
+0.674368143081665,
+0.6924472451210022,
+0.9572108387947083,
+0.8338685631752014,
+0.7450788021087646,
+0.6217365264892578,
+0.8865001797676086,
+0.904579222202301,
+0.7684210538864136,
+0.6631578803062439,
+0.9684210419654846,
+0.8631578683853149,
+0.7269997000694275,
+0.6924472451210022,
+1.0098423957824707,
+0.8338685631752014,
+0.7977103590965271,
+0.6217365264892578,
+0.9391317367553711,
+0.904579222202301,
+0.821052610874176,
+0.6631578803062439,
+1.021052598953247,
+0.8631578683853149,
+0.7796312570571899,
+0.6924472451210022,
+1.062474012374878,
+0.8338685631752014,
+0.8503419756889343,
+0.6217365264892578,
+0.9917632937431335,
+0.904579222202301,
+0.8736842274665833,
+0.6631578803062439,
+1.0736842155456543,
+0.8631578683853149,
+0.8322628736495972,
+0.6924472451210022,
+1.1151055097579956,
+0.8338685631752014,
+0.9029735326766968,
+0.6217365264892578,
+1.044394850730896,
+0.904579222202301,
+-0.0736842080950737,
+0.7157894968986511,
+0.12631578743457794,
+0.9157894849777222,
+-0.11510556936264038,
+0.7450788021087646,
+0.16773714125156403,
+0.8865001797676086,
+-0.04439488798379898,
+0.674368143081665,
+0.09702646732330322,
+0.9572108387947083,
+-0.021052632480859756,
+0.7157894968986511,
+0.17894737422466278,
+0.9157894849777222,
+-0.06247398629784584,
+0.7450788021087646,
+0.22036872804164886,
+0.8865001797676086,
+0.008236690424382687,
+0.674368143081665,
+0.14965803921222687,
+0.9572108387947083,
+0.031578946858644485,
+0.7157894968986511,
+0.23157894611358643,
+0.9157894849777222,
+-0.009842408820986748,
+0.7450788021087646,
+0.2730002999305725,
+0.8865001797676086,
+0.0608682706952095,
+0.674368143081665,
+0.2022896260023117,
+0.9572108387947083,
+0.08421052992343903,
+0.7157894968986511,
+0.28421053290367126,
+0.9157894849777222,
+0.042789168655872345,
+0.7450788021087646,
+0.32563188672065735,
+0.8865001797676086,
+0.11349985003471375,
+0.674368143081665,
+0.25492119789123535,
+0.9572108387947083,
+0.13684210181236267,
+0.7157894968986511,
+0.3368421196937561,
+0.9157894849777222,
+0.09542074799537659,
+0.7450788021087646,
+0.3782634735107422,
+0.8865001797676086,
+0.1661314219236374,
+0.674368143081665,
+0.3075527846813202,
+0.9572108387947083,
+0.1894736886024475,
+0.7157894968986511,
+0.38947367668151855,
+0.9157894849777222,
+0.14805233478546143,
+0.7450788021087646,
+0.43089503049850464,
+0.8865001797676086,
+0.21876300871372223,
+0.674368143081665,
+0.36018437147140503,
+0.9572108387947083,
+0.24210526049137115,
+0.7157894968986511,
+0.4421052634716034,
+0.9157894849777222,
+0.20068390667438507,
+0.7450788021087646,
+0.4835266172885895,
+0.8865001797676086,
+0.2713945806026459,
+0.674368143081665,
+0.4128159284591675,
+0.9572108387947083,
+0.2947368323802948,
+0.7157894968986511,
+0.49473685026168823,
+0.9157894849777222,
+0.2533154785633087,
+0.7450788021087646,
+0.5361582040786743,
+0.8865001797676086,
+0.3240261673927307,
+0.674368143081665,
+0.4654475152492523,
+0.9572108387947083,
+0.34736841917037964,
+0.7157894968986511,
+0.5473684072494507,
+0.9157894849777222,
+0.30594706535339355,
+0.7450788021087646,
+0.5887897610664368,
+0.8865001797676086,
+0.37665775418281555,
+0.674368143081665,
+0.5180791020393372,
+0.9572108387947083,
+0.4000000059604645,
+0.7157894968986511,
+0.6000000238418579,
+0.9157894849777222,
+0.3585786521434784,
+0.7450788021087646,
+0.641421377658844,
+0.8865001797676086,
+0.429289311170578,
+0.674368143081665,
+0.5707106590270996,
+0.9572108387947083,
+0.4526315927505493,
+0.7157894968986511,
+0.6526315808296204,
+0.9157894849777222,
+0.41121020913124084,
+0.7450788021087646,
+0.6940529346466064,
+0.8865001797676086,
+0.48192089796066284,
+0.674368143081665,
+0.6233422756195068,
+0.9572108387947083,
+0.5052631497383118,
+0.7157894968986511,
+0.7052631378173828,
+0.9157894849777222,
+0.4638417959213257,
+0.7450788021087646,
+0.7466844916343689,
+0.8865001797676086,
+0.5345524549484253,
+0.674368143081665,
+0.6759738326072693,
+0.9572108387947083,
+0.557894766330719,
+0.7157894968986511,
+0.75789475440979,
+0.9157894849777222,
+0.5164733529090881,
+0.7450788021087646,
+0.7993161082267761,
+0.8865001797676086,
+0.5871840715408325,
+0.674368143081665,
+0.7286053895950317,
+0.9572108387947083,
+0.6105263233184814,
+0.7157894968986511,
+0.8105263113975525,
+0.9157894849777222,
+0.5691049695014954,
+0.7450788021087646,
+0.8519476652145386,
+0.8865001797676086,
+0.639815628528595,
+0.674368143081665,
+0.781237006187439,
+0.9572108387947083,
+0.6631578803062439,
+0.7157894968986511,
+0.8631578683853149,
+0.9157894849777222,
+0.6217365264892578,
+0.7450788021087646,
+0.904579222202301,
+0.8865001797676086,
+0.6924472451210022,
+0.674368143081665,
+0.8338685631752014,
+0.9572108387947083,
+0.7157894968986511,
+0.7157894968986511,
+0.9157894849777222,
+0.9157894849777222,
+0.674368143081665,
+0.7450788021087646,
+0.9572108387947083,
+0.8865001797676086,
+0.7450788021087646,
+0.674368143081665,
+0.8865001797676086,
+0.9572108387947083,
+0.7684210538864136,
+0.7157894968986511,
+0.9684210419654846,
+0.9157894849777222,
+0.7269997000694275,
+0.7450788021087646,
+1.0098423957824707,
+0.8865001797676086,
+0.7977103590965271,
+0.674368143081665,
+0.9391317367553711,
+0.9572108387947083,
+0.821052610874176,
+0.7157894968986511,
+1.021052598953247,
+0.9157894849777222,
+0.7796312570571899,
+0.7450788021087646,
+1.062474012374878,
+0.8865001797676086,
+0.8503419756889343,
+0.674368143081665,
+0.9917632937431335,
+0.9572108387947083,
+0.8736842274665833,
+0.7157894968986511,
+1.0736842155456543,
+0.9157894849777222,
+0.8322628736495972,
+0.7450788021087646,
+1.1151055097579956,
+0.8865001797676086,
+0.9029735326766968,
+0.674368143081665,
+1.044394850730896,
+0.9572108387947083,
+-0.0736842080950737,
+0.7684210538864136,
+0.12631578743457794,
+0.9684210419654846,
+-0.11510556936264038,
+0.7977103590965271,
+0.16773714125156403,
+0.9391317367553711,
+-0.04439488798379898,
+0.7269997000694275,
+0.09702646732330322,
+1.0098423957824707,
+-0.021052632480859756,
+0.7684210538864136,
+0.17894737422466278,
+0.9684210419654846,
+-0.06247398629784584,
+0.7977103590965271,
+0.22036872804164886,
+0.9391317367553711,
+0.008236690424382687,
+0.7269997000694275,
+0.14965803921222687,
+1.0098423957824707,
+0.031578946858644485,
+0.7684210538864136,
+0.23157894611358643,
+0.9684210419654846,
+-0.009842408820986748,
+0.7977103590965271,
+0.2730002999305725,
+0.9391317367553711,
+0.0608682706952095,
+0.7269997000694275,
+0.2022896260023117,
+1.0098423957824707,
+0.08421052992343903,
+0.7684210538864136,
+0.28421053290367126,
+0.9684210419654846,
+0.042789168655872345,
+0.7977103590965271,
+0.32563188672065735,
+0.9391317367553711,
+0.11349985003471375,
+0.7269997000694275,
+0.25492119789123535,
+1.0098423957824707,
+0.13684210181236267,
+0.7684210538864136,
+0.3368421196937561,
+0.9684210419654846,
+0.09542074799537659,
+0.7977103590965271,
+0.3782634735107422,
+0.9391317367553711,
+0.1661314219236374,
+0.7269997000694275,
+0.3075527846813202,
+1.0098423957824707,
+0.1894736886024475,
+0.7684210538864136,
+0.38947367668151855,
+0.9684210419654846,
+0.14805233478546143,
+0.7977103590965271,
+0.43089503049850464,
+0.9391317367553711,
+0.21876300871372223,
+0.7269997000694275,
+0.36018437147140503,
+1.0098423957824707,
+0.24210526049137115,
+0.7684210538864136,
+0.4421052634716034,
+0.9684210419654846,
+0.20068390667438507,
+0.7977103590965271,
+0.4835266172885895,
+0.9391317367553711,
+0.2713945806026459,
+0.7269997000694275,
+0.4128159284591675,
+1.0098423957824707,
+0.2947368323802948,
+0.7684210538864136,
+0.49473685026168823,
+0.9684210419654846,
+0.2533154785633087,
+0.7977103590965271,
+0.5361582040786743,
+0.9391317367553711,
+0.3240261673927307,
+0.7269997000694275,
+0.4654475152492523,
+1.0098423957824707,
+0.34736841917037964,
+0.7684210538864136,
+0.5473684072494507,
+0.9684210419654846,
+0.30594706535339355,
+0.7977103590965271,
+0.5887897610664368,
+0.9391317367553711,
+0.37665775418281555,
+0.7269997000694275,
+0.5180791020393372,
+1.0098423957824707,
+0.4000000059604645,
+0.7684210538864136,
+0.6000000238418579,
+0.9684210419654846,
+0.3585786521434784,
+0.7977103590965271,
+0.641421377658844,
+0.9391317367553711,
+0.429289311170578,
+0.7269997000694275,
+0.5707106590270996,
+1.0098423957824707,
+0.4526315927505493,
+0.7684210538864136,
+0.6526315808296204,
+0.9684210419654846,
+0.41121020913124084,
+0.7977103590965271,
+0.6940529346466064,
+0.9391317367553711,
+0.48192089796066284,
+0.7269997000694275,
+0.6233422756195068,
+1.0098423957824707,
+0.5052631497383118,
+0.7684210538864136,
+0.7052631378173828,
+0.9684210419654846,
+0.4638417959213257,
+0.7977103590965271,
+0.7466844916343689,
+0.9391317367553711,
+0.5345524549484253,
+0.7269997000694275,
+0.6759738326072693,
+1.0098423957824707,
+0.557894766330719,
+0.7684210538864136,
+0.75789475440979,
+0.9684210419654846,
+0.5164733529090881,
+0.7977103590965271,
+0.7993161082267761,
+0.9391317367553711,
+0.5871840715408325,
+0.7269997000694275,
+0.7286053895950317,
+1.0098423957824707,
+0.6105263233184814,
+0.7684210538864136,
+0.8105263113975525,
+0.9684210419654846,
+0.5691049695014954,
+0.7977103590965271,
+0.8519476652145386,
+0.9391317367553711,
+0.639815628528595,
+0.7269997000694275,
+0.781237006187439,
+1.0098423957824707,
+0.6631578803062439,
+0.7684210538864136,
+0.8631578683853149,
+0.9684210419654846,
+0.6217365264892578,
+0.7977103590965271,
+0.904579222202301,
+0.9391317367553711,
+0.6924472451210022,
+0.7269997000694275,
+0.8338685631752014,
+1.0098423957824707,
+0.7157894968986511,
+0.7684210538864136,
+0.9157894849777222,
+0.9684210419654846,
+0.674368143081665,
+0.7977103590965271,
+0.9572108387947083,
+0.9391317367553711,
+0.7450788021087646,
+0.7269997000694275,
+0.8865001797676086,
+1.0098423957824707,
+0.7684210538864136,
+0.7684210538864136,
+0.9684210419654846,
+0.9684210419654846,
+0.7269997000694275,
+0.7977103590965271,
+1.0098423957824707,
+0.9391317367553711,
+0.7977103590965271,
+0.7269997000694275,
+0.9391317367553711,
+1.0098423957824707,
+0.821052610874176,
+0.7684210538864136,
+1.021052598953247,
+0.9684210419654846,
+0.7796312570571899,
+0.7977103590965271,
+1.062474012374878,
+0.9391317367553711,
+0.8503419756889343,
+0.7269997000694275,
+0.9917632937431335,
+1.0098423957824707,
+0.8736842274665833,
+0.7684210538864136,
+1.0736842155456543,
+0.9684210419654846,
+0.8322628736495972,
+0.7977103590965271,
+1.1151055097579956,
+0.9391317367553711,
+0.9029735326766968,
+0.7269997000694275,
+1.044394850730896,
+1.0098423957824707,
+-0.0736842080950737,
+0.821052610874176,
+0.12631578743457794,
+1.021052598953247,
+-0.11510556936264038,
+0.8503419756889343,
+0.16773714125156403,
+0.9917632937431335,
+-0.04439488798379898,
+0.7796312570571899,
+0.09702646732330322,
+1.062474012374878,
+-0.021052632480859756,
+0.821052610874176,
+0.17894737422466278,
+1.021052598953247,
+-0.06247398629784584,
+0.8503419756889343,
+0.22036872804164886,
+0.9917632937431335,
+0.008236690424382687,
+0.7796312570571899,
+0.14965803921222687,
+1.062474012374878,
+0.031578946858644485,
+0.821052610874176,
+0.23157894611358643,
+1.021052598953247,
+-0.009842408820986748,
+0.8503419756889343,
+0.2730002999305725,
+0.9917632937431335,
+0.0608682706952095,
+0.7796312570571899,
+0.2022896260023117,
+1.062474012374878,
+0.08421052992343903,
+0.821052610874176,
+0.28421053290367126,
+1.021052598953247,
+0.042789168655872345,
+0.8503419756889343,
+0.32563188672065735,
+0.9917632937431335,
+0.11349985003471375,
+0.7796312570571899,
+0.25492119789123535,
+1.062474012374878,
+0.13684210181236267,
+0.821052610874176,
+0.3368421196937561,
+1.021052598953247,
+0.09542074799537659,
+0.8503419756889343,
+0.3782634735107422,
+0.9917632937431335,
+0.1661314219236374,
+0.7796312570571899,
+0.3075527846813202,
+1.062474012374878,
+0.1894736886024475,
+0.821052610874176,
+0.38947367668151855,
+1.021052598953247,
+0.14805233478546143,
+0.8503419756889343,
+0.43089503049850464,
+0.9917632937431335,
+0.21876300871372223,
+0.7796312570571899,
+0.36018437147140503,
+1.062474012374878,
+0.24210526049137115,
+0.821052610874176,
+0.4421052634716034,
+1.021052598953247,
+0.20068390667438507,
+0.8503419756889343,
+0.4835266172885895,
+0.9917632937431335,
+0.2713945806026459,
+0.7796312570571899,
+0.4128159284591675,
+1.062474012374878,
+0.2947368323802948,
+0.821052610874176,
+0.49473685026168823,
+1.021052598953247,
+0.2533154785633087,
+0.8503419756889343,
+0.5361582040786743,
+0.9917632937431335,
+0.3240261673927307,
+0.7796312570571899,
+0.4654475152492523,
+1.062474012374878,
+0.34736841917037964,
+0.821052610874176,
+0.5473684072494507,
+1.021052598953247,
+0.30594706535339355,
+0.8503419756889343,
+0.5887897610664368,
+0.9917632937431335,
+0.37665775418281555,
+0.7796312570571899,
+0.5180791020393372,
+1.062474012374878,
+0.4000000059604645,
+0.821052610874176,
+0.6000000238418579,
+1.021052598953247,
+0.3585786521434784,
+0.8503419756889343,
+0.641421377658844,
+0.9917632937431335,
+0.429289311170578,
+0.7796312570571899,
+0.5707106590270996,
+1.062474012374878,
+0.4526315927505493,
+0.821052610874176,
+0.6526315808296204,
+1.021052598953247,
+0.41121020913124084,
+0.8503419756889343,
+0.6940529346466064,
+0.9917632937431335,
+0.48192089796066284,
+0.7796312570571899,
+0.6233422756195068,
+1.062474012374878,
+0.5052631497383118,
+0.821052610874176,
+0.7052631378173828,
+1.021052598953247,
+0.4638417959213257,
+0.8503419756889343,
+0.7466844916343689,
+0.9917632937431335,
+0.5345524549484253,
+0.7796312570571899,
+0.6759738326072693,
+1.062474012374878,
+0.557894766330719,
+0.821052610874176,
+0.75789475440979,
+1.021052598953247,
+0.5164733529090881,
+0.8503419756889343,
+0.7993161082267761,
+0.9917632937431335,
+0.5871840715408325,
+0.7796312570571899,
+0.7286053895950317,
+1.062474012374878,
+0.6105263233184814,
+0.821052610874176,
+0.8105263113975525,
+1.021052598953247,
+0.5691049695014954,
+0.8503419756889343,
+0.8519476652145386,
+0.9917632937431335,
+0.639815628528595,
+0.7796312570571899,
+0.781237006187439,
+1.062474012374878,
+0.6631578803062439,
+0.821052610874176,
+0.8631578683853149,
+1.021052598953247,
+0.6217365264892578,
+0.8503419756889343,
+0.904579222202301,
+0.9917632937431335,
+0.6924472451210022,
+0.7796312570571899,
+0.8338685631752014,
+1.062474012374878,
+0.7157894968986511,
+0.821052610874176,
+0.9157894849777222,
+1.021052598953247,
+0.674368143081665,
+0.8503419756889343,
+0.9572108387947083,
+0.9917632937431335,
+0.7450788021087646,
+0.7796312570571899,
+0.8865001797676086,
+1.062474012374878,
+0.7684210538864136,
+0.821052610874176,
+0.9684210419654846,
+1.021052598953247,
+0.7269997000694275,
+0.8503419756889343,
+1.0098423957824707,
+0.9917632937431335,
+0.7977103590965271,
+0.7796312570571899,
+0.9391317367553711,
+1.062474012374878,
+0.821052610874176,
+0.821052610874176,
+1.021052598953247,
+1.021052598953247,
+0.7796312570571899,
+0.8503419756889343,
+1.062474012374878,
+0.9917632937431335,
+0.8503419756889343,
+0.7796312570571899,
+0.9917632937431335,
+1.062474012374878,
+0.8736842274665833,
+0.821052610874176,
+1.0736842155456543,
+1.021052598953247,
+0.8322628736495972,
+0.8503419756889343,
+1.1151055097579956,
+0.9917632937431335,
+0.9029735326766968,
+0.7796312570571899,
+1.044394850730896,
+1.062474012374878,
+-0.0736842080950737,
+0.8736842274665833,
+0.12631578743457794,
+1.0736842155456543,
+-0.11510556936264038,
+0.9029735326766968,
+0.16773714125156403,
+1.044394850730896,
+-0.04439488798379898,
+0.8322628736495972,
+0.09702646732330322,
+1.1151055097579956,
+-0.021052632480859756,
+0.8736842274665833,
+0.17894737422466278,
+1.0736842155456543,
+-0.06247398629784584,
+0.9029735326766968,
+0.22036872804164886,
+1.044394850730896,
+0.008236690424382687,
+0.8322628736495972,
+0.14965803921222687,
+1.1151055097579956,
+0.031578946858644485,
+0.8736842274665833,
+0.23157894611358643,
+1.0736842155456543,
+-0.009842408820986748,
+0.9029735326766968,
+0.2730002999305725,
+1.044394850730896,
+0.0608682706952095,
+0.8322628736495972,
+0.2022896260023117,
+1.1151055097579956,
+0.08421052992343903,
+0.8736842274665833,
+0.28421053290367126,
+1.0736842155456543,
+0.042789168655872345,
+0.9029735326766968,
+0.32563188672065735,
+1.044394850730896,
+0.11349985003471375,
+0.8322628736495972,
+0.25492119789123535,
+1.1151055097579956,
+0.13684210181236267,
+0.8736842274665833,
+0.3368421196937561,
+1.0736842155456543,
+0.09542074799537659,
+0.9029735326766968,
+0.3782634735107422,
+1.044394850730896,
+0.1661314219236374,
+0.8322628736495972,
+0.3075527846813202,
+1.1151055097579956,
+0.1894736886024475,
+0.8736842274665833,
+0.38947367668151855,
+1.0736842155456543,
+0.14805233478546143,
+0.9029735326766968,
+0.43089503049850464,
+1.044394850730896,
+0.21876300871372223,
+0.8322628736495972,
+0.36018437147140503,
+1.1151055097579956,
+0.24210526049137115,
+0.8736842274665833,
+0.4421052634716034,
+1.0736842155456543,
+0.20068390667438507,
+0.9029735326766968,
+0.4835266172885895,
+1.044394850730896,
+0.2713945806026459,
+0.8322628736495972,
+0.4128159284591675,
+1.1151055097579956,
+0.2947368323802948,
+0.8736842274665833,
+0.49473685026168823,
+1.0736842155456543,
+0.2533154785633087,
+0.9029735326766968,
+0.5361582040786743,
+1.044394850730896,
+0.3240261673927307,
+0.8322628736495972,
+0.4654475152492523,
+1.1151055097579956,
+0.34736841917037964,
+0.8736842274665833,
+0.5473684072494507,
+1.0736842155456543,
+0.30594706535339355,
+0.9029735326766968,
+0.5887897610664368,
+1.044394850730896,
+0.37665775418281555,
+0.8322628736495972,
+0.5180791020393372,
+1.1151055097579956,
+0.4000000059604645,
+0.8736842274665833,
+0.6000000238418579,
+1.0736842155456543,
+0.3585786521434784,
+0.9029735326766968,
+0.641421377658844,
+1.044394850730896,
+0.429289311170578,
+0.8322628736495972,
+0.5707106590270996,
+1.1151055097579956,
+0.4526315927505493,
+0.8736842274665833,
+0.6526315808296204,
+1.0736842155456543,
+0.41121020913124084,
+0.9029735326766968,
+0.6940529346466064,
+1.044394850730896,
+0.48192089796066284,
+0.8322628736495972,
+0.6233422756195068,
+1.1151055097579956,
+0.5052631497383118,
+0.8736842274665833,
+0.7052631378173828,
+1.0736842155456543,
+0.4638417959213257,
+0.9029735326766968,
+0.7466844916343689,
+1.044394850730896,
+0.5345524549484253,
+0.8322628736495972,
+0.6759738326072693,
+1.1151055097579956,
+0.557894766330719,
+0.8736842274665833,
+0.75789475440979,
+1.0736842155456543,
+0.5164733529090881,
+0.9029735326766968,
+0.7993161082267761,
+1.044394850730896,
+0.5871840715408325,
+0.8322628736495972,
+0.7286053895950317,
+1.1151055097579956,
+0.6105263233184814,
+0.8736842274665833,
+0.8105263113975525,
+1.0736842155456543,
+0.5691049695014954,
+0.9029735326766968,
+0.8519476652145386,
+1.044394850730896,
+0.639815628528595,
+0.8322628736495972,
+0.781237006187439,
+1.1151055097579956,
+0.6631578803062439,
+0.8736842274665833,
+0.8631578683853149,
+1.0736842155456543,
+0.6217365264892578,
+0.9029735326766968,
+0.904579222202301,
+1.044394850730896,
+0.6924472451210022,
+0.8322628736495972,
+0.8338685631752014,
+1.1151055097579956,
+0.7157894968986511,
+0.8736842274665833,
+0.9157894849777222,
+1.0736842155456543,
+0.674368143081665,
+0.9029735326766968,
+0.9572108387947083,
+1.044394850730896,
+0.7450788021087646,
+0.8322628736495972,
+0.8865001797676086,
+1.1151055097579956,
+0.7684210538864136,
+0.8736842274665833,
+0.9684210419654846,
+1.0736842155456543,
+0.7269997000694275,
+0.9029735326766968,
+1.0098423957824707,
+1.044394850730896,
+0.7977103590965271,
+0.8322628736495972,
+0.9391317367553711,
+1.1151055097579956,
+0.821052610874176,
+0.8736842274665833,
+1.021052598953247,
+1.0736842155456543,
+0.7796312570571899,
+0.9029735326766968,
+1.062474012374878,
+1.044394850730896,
+0.8503419756889343,
+0.8322628736495972,
+0.9917632937431335,
+1.1151055097579956,
+0.8736842274665833,
+0.8736842274665833,
+1.0736842155456543,
+1.0736842155456543,
+0.8322628736495972,
+0.9029735326766968,
+1.1151055097579956,
+1.044394850730896,
+0.9029735326766968,
+0.8322628736495972,
+1.044394850730896,
+1.1151055097579956,
+-0.125,
+-0.125,
+0.22499999403953552,
+0.22499999403953552,
+-0.15916500985622406,
+-0.15916500985622406,
+0.2591650187969208,
+0.2591650187969208,
+-0.19748736917972565,
+-0.07374368607997894,
+0.29748737812042236,
+0.17374368011951447,
+-0.07374368607997894,
+-0.19748736917972565,
+0.17374368011951447,
+0.29748737812042236,
+-0.2531088888645172,
+-0.051036298274993896,
+0.35310888290405273,
+0.15103629231452942,
+-0.051036298274993896,
+-0.2531088888645172,
+0.15103629231452942,
+0.35310888290405273,
+-0.02500000037252903,
+-0.125,
+0.32499998807907104,
+0.22499999403953552,
+-0.05916500836610794,
+-0.15916500985622406,
+0.3591650128364563,
+0.2591650187969208,
+-0.09748737514019012,
+-0.07374368607997894,
+0.3974873721599579,
+0.17374368011951447,
+0.026256313547492027,
+-0.19748736917972565,
+0.2737436890602112,
+0.29748737812042236,
+-0.1531088948249817,
+-0.051036298274993896,
+0.45310887694358826,
+0.15103629231452942,
+0.04896370321512222,
+-0.2531088888645172,
+0.25103628635406494,
+0.35310888290405273,
+0.07500000298023224,
+-0.125,
+0.42500001192092896,
+0.22499999403953552,
+0.04083499312400818,
+-0.15916500985622406,
+0.4591650068759918,
+0.2591650187969208,
+0.0025126265827566385,
+-0.07374368607997894,
+0.4974873661994934,
+0.17374368011951447,
+0.1262563169002533,
+-0.19748736917972565,
+0.3737436830997467,
+0.29748737812042236,
+-0.05310888960957527,
+-0.051036298274993896,
+0.5531088709831238,
+0.15103629231452942,
+0.14896370470523834,
+-0.2531088888645172,
+0.35103631019592285,
+0.35310888290405273,
+0.17499999701976776,
+-0.125,
+0.5249999761581421,
+0.22499999403953552,
+0.1408349871635437,
+-0.15916500985622406,
+0.5591650009155273,
+0.2591650187969208,
+0.10251262784004211,
+-0.07374368607997894,
+0.5974873900413513,
+0.17374368011951447,
+0.22625631093978882,
+-0.19748736917972565,
+0.4737436771392822,
+0.29748737812042236,
+0.04689110815525055,
+-0.051036298274993896,
+0.6531088948249817,
+0.15103629231452942,
+0.24896369874477386,
+-0.2531088888645172,
+0.4510363042354584,
+0.35310888290405273,
+0.2750000059604645,
+-0.125,
+0.625,
+0.22499999403953552,
+0.24083499610424042,
+-0.15916500985622406,
+0.6591650247573853,
+0.2591650187969208,
+0.20251262187957764,
+-0.07374368607997894,
+0.6974873542785645,
+0.17374368011951447,
+0.32625630497932434,
+-0.19748736917972565,
+0.5737437009811401,
+0.29748737812042236,
+0.14689110219478607,
+-0.051036298274993896,
+0.7531089186668396,
+0.15103629231452942,
+0.3489637076854706,
+-0.2531088888645172,
+0.5510362982749939,
+0.35310888290405273,
+0.375,
+-0.125,
+0.7250000238418579,
+0.22499999403953552,
+0.34083500504493713,
+-0.15916500985622406,
+0.7591649889945984,
+0.2591650187969208,
+0.30251261591911316,
+-0.07374368607997894,
+0.7974873781204224,
+0.17374368011951447,
+0.42625629901885986,
+-0.19748736917972565,
+0.6737436652183533,
+0.29748737812042236,
+0.2468911111354828,
+-0.051036298274993896,
+0.8531088829040527,
+0.15103629231452942,
+0.4489637017250061,
+-0.2531088888645172,
+0.6510363221168518,
+0.35310888290405273,
+0.4749999940395355,
+-0.125,
+0.824999988079071,
+0.22499999403953552,
+0.44083499908447266,
+-0.15916500985622406,
+0.8591650128364563,
+0.2591650187969208,
+0.40251263976097107,
+-0.07374368607997894,
+0.8974874019622803,
+0.17374368011951447,
+0.5262563228607178,
+-0.19748736917972565,
+0.7737436890602112,
+0.29748737812042236,
+0.3468911051750183,
+-0.051036298274993896,
+0.9531089067459106,
+0.15103629231452942,
+0.548963725566864,
+-0.2531088888645172,
+0.7510362863540649,
+0.35310888290405273,
+0.574999988079071,
+-0.125,
+0.925000011920929,
+0.22499999403953552,
+0.5408350229263306,
+-0.15916500985622406,
+0.9591649770736694,
+0.2591650187969208,
+0.5025126338005066,
+-0.07374368607997894,
+0.9974873661994934,
+0.17374368011951447,
+0.6262562870979309,
+-0.19748736917972565,
+0.8737437129020691,
+0.29748737812042236,
+0.44689109921455383,
+-0.051036298274993896,
+1.0531089305877686,
+0.15103629231452942,
+0.6489636898040771,
+-0.2531088888645172,
+0.8510363101959229,
+0.35310888290405273,
+0.675000011920929,
+-0.125,
+1.024999976158142,
+0.22499999403953552,
+0.6408349871635437,
+-0.15916500985622406,
+1.0591650009155273,
+0.2591650187969208,
+0.6025125980377197,
+-0.07374368607997894,
+1.0974873304367065,
+0.17374368011951447,
+0.7262563109397888,
+-0.19748736917972565,
+0.9737436771392822,
+0.29748737812042236,
+0.5468910932540894,
+-0.051036298274993896,
+1.153108835220337,
+0.15103629231452942,
+0.7489637136459351,
+-0.2531088888645172,
+0.951036274433136,
+0.35310888290405273,
+0.7749999761581421,
+-0.125,
+1.125,
+0.22499999403953552,
+0.7408350110054016,
+-0.15916500985622406,
+1.1591650247573853,
+0.2591650187969208,
+0.7025126218795776,
+-0.07374368607997894,
+1.1974873542785645,
+0.17374368011951447,
+0.8262563347816467,
+-0.19748736917972565,
+1.0737437009811401,
+0.29748737812042236,
+0.6468911170959473,
+-0.051036298274993896,
+1.2531088590621948,
+0.15103629231452942,
+0.8489636778831482,
+-0.2531088888645172,
+1.0510362386703491,
+0.35310888290405273,
+-0.125,
+-0.02500000037252903,
+0.22499999403953552,
+0.32499998807907104,
+-0.15916500985622406,
+-0.05916500836610794,
+0.2591650187969208,
+0.3591650128364563,
+-0.19748736917972565,
+0.026256313547492027,
+0.29748737812042236,
+0.2737436890602112,
+-0.07374368607997894,
+-0.09748737514019012,
+0.17374368011951447,
+0.3974873721599579,
+-0.2531088888645172,
+0.04896370321512222,
+0.35310888290405273,
+0.25103628635406494,
+-0.051036298274993896,
+-0.1531088948249817,
+0.15103629231452942,
+0.45310887694358826,
+-0.02500000037252903,
+-0.02500000037252903,
+0.32499998807907104,
+0.32499998807907104,
+-0.05916500836610794,
+-0.05916500836610794,
+0.3591650128364563,
+0.3591650128364563,
+-0.09748737514019012,
+0.026256313547492027,
+0.3974873721599579,
+0.2737436890602112,
+0.026256313547492027,
+-0.09748737514019012,
+0.2737436890602112,
+0.3974873721599579,
+-0.1531088948249817,
+0.04896370321512222,
+0.45310887694358826,
+0.25103628635406494,
+0.04896370321512222,
+-0.1531088948249817,
+0.25103628635406494,
+0.45310887694358826,
+0.07500000298023224,
+-0.02500000037252903,
+0.42500001192092896,
+0.32499998807907104,
+0.04083499312400818,
+-0.05916500836610794,
+0.4591650068759918,
+0.3591650128364563,
+0.0025126265827566385,
+0.026256313547492027,
+0.4974873661994934,
+0.2737436890602112,
+0.1262563169002533,
+-0.09748737514019012,
+0.3737436830997467,
+0.3974873721599579,
+-0.05310888960957527,
+0.04896370321512222,
+0.5531088709831238,
+0.25103628635406494,
+0.14896370470523834,
+-0.1531088948249817,
+0.35103631019592285,
+0.45310887694358826,
+0.17499999701976776,
+-0.02500000037252903,
+0.5249999761581421,
+0.32499998807907104,
+0.1408349871635437,
+-0.05916500836610794,
+0.5591650009155273,
+0.3591650128364563,
+0.10251262784004211,
+0.026256313547492027,
+0.5974873900413513,
+0.2737436890602112,
+0.22625631093978882,
+-0.09748737514019012,
+0.4737436771392822,
+0.3974873721599579,
+0.04689110815525055,
+0.04896370321512222,
+0.6531088948249817,
+0.25103628635406494,
+0.24896369874477386,
+-0.1531088948249817,
+0.4510363042354584,
+0.45310887694358826,
+0.2750000059604645,
+-0.02500000037252903,
+0.625,
+0.32499998807907104,
+0.24083499610424042,
+-0.05916500836610794,
+0.6591650247573853,
+0.3591650128364563,
+0.20251262187957764,
+0.026256313547492027,
+0.6974873542785645,
+0.2737436890602112,
+0.32625630497932434,
+-0.09748737514019012,
+0.5737437009811401,
+0.3974873721599579,
+0.14689110219478607,
+0.04896370321512222,
+0.7531089186668396,
+0.25103628635406494,
+0.3489637076854706,
+-0.1531088948249817,
+0.5510362982749939,
+0.45310887694358826,
+0.375,
+-0.02500000037252903,
+0.7250000238418579,
+0.32499998807907104,
+0.34083500504493713,
+-0.05916500836610794,
+0.7591649889945984,
+0.3591650128364563,
+0.30251261591911316,
+0.026256313547492027,
+0.7974873781204224,
+0.2737436890602112,
+0.42625629901885986,
+-0.09748737514019012,
+0.6737436652183533,
+0.3974873721599579,
+0.2468911111354828,
+0.04896370321512222,
+0.8531088829040527,
+0.25103628635406494,
+0.4489637017250061,
+-0.1531088948249817,
+0.6510363221168518,
+0.45310887694358826,
+0.4749999940395355,
+-0.02500000037252903,
+0.824999988079071,
+0.32499998807907104,
+0.44083499908447266,
+-0.05916500836610794,
+0.8591650128364563,
+0.3591650128364563,
+0.40251263976097107,
+0.026256313547492027,
+0.8974874019622803,
+0.2737436890602112,
+0.5262563228607178,
+-0.09748737514019012,
+0.7737436890602112,
+0.3974873721599579,
+0.3468911051750183,
+0.04896370321512222,
+0.9531089067459106,
+0.25103628635406494,
+0.548963725566864,
+-0.1531088948249817,
+0.7510362863540649,
+0.45310887694358826,
+0.574999988079071,
+-0.02500000037252903,
+0.925000011920929,
+0.32499998807907104,
+0.5408350229263306,
+-0.05916500836610794,
+0.9591649770736694,
+0.3591650128364563,
+0.5025126338005066,
+0.026256313547492027,
+0.9974873661994934,
+0.2737436890602112,
+0.6262562870979309,
+-0.09748737514019012,
+0.8737437129020691,
+0.3974873721599579,
+0.44689109921455383,
+0.04896370321512222,
+1.0531089305877686,
+0.25103628635406494,
+0.6489636898040771,
+-0.1531088948249817,
+0.8510363101959229,
+0.45310887694358826,
+0.675000011920929,
+-0.02500000037252903,
+1.024999976158142,
+0.32499998807907104,
+0.6408349871635437,
+-0.05916500836610794,
+1.0591650009155273,
+0.3591650128364563,
+0.6025125980377197,
+0.026256313547492027,
+1.0974873304367065,
+0.2737436890602112,
+0.7262563109397888,
+-0.09748737514019012,
+0.9737436771392822,
+0.3974873721599579,
+0.5468910932540894,
+0.04896370321512222,
+1.153108835220337,
+0.25103628635406494,
+0.7489637136459351,
+-0.1531088948249817,
+0.951036274433136,
+0.45310887694358826,
+0.7749999761581421,
+-0.02500000037252903,
+1.125,
+0.32499998807907104,
+0.7408350110054016,
+-0.05916500836610794,
+1.1591650247573853,
+0.3591650128364563,
+0.7025126218795776,
+0.026256313547492027,
+1.1974873542785645,
+0.2737436890602112,
+0.8262563347816467,
+-0.09748737514019012,
+1.0737437009811401,
+0.3974873721599579,
+0.6468911170959473,
+0.04896370321512222,
+1.2531088590621948,
+0.25103628635406494,
+0.8489636778831482,
+-0.1531088948249817,
+1.0510362386703491,
+0.45310887694358826,
+-0.125,
+0.07500000298023224,
+0.22499999403953552,
+0.42500001192092896,
+-0.15916500985622406,
+0.04083499312400818,
+0.2591650187969208,
+0.4591650068759918,
+-0.19748736917972565,
+0.1262563169002533,
+0.29748737812042236,
+0.3737436830997467,
+-0.07374368607997894,
+0.0025126265827566385,
+0.17374368011951447,
+0.4974873661994934,
+-0.2531088888645172,
+0.14896370470523834,
+0.35310888290405273,
+0.35103631019592285,
+-0.051036298274993896,
+-0.05310888960957527,
+0.15103629231452942,
+0.5531088709831238,
+-0.02500000037252903,
+0.07500000298023224,
+0.32499998807907104,
+0.42500001192092896,
+-0.05916500836610794,
+0.04083499312400818,
+0.3591650128364563,
+0.4591650068759918,
+-0.09748737514019012,
+0.1262563169002533,
+0.3974873721599579,
+0.3737436830997467,
+0.026256313547492027,
+0.0025126265827566385,
+0.2737436890602112,
+0.4974873661994934,
+-0.1531088948249817,
+0.14896370470523834,
+0.45310887694358826,
+0.35103631019592285,
+0.04896370321512222,
+-0.05310888960957527,
+0.25103628635406494,
+0.5531088709831238,
+0.07500000298023224,
+0.07500000298023224,
+0.42500001192092896,
+0.42500001192092896,
+0.04083499312400818,
+0.04083499312400818,
+0.4591650068759918,
+0.4591650068759918,
+0.0025126265827566385,
+0.1262563169002533,
+0.4974873661994934,
+0.3737436830997467,
+0.1262563169002533,
+0.0025126265827566385,
+0.3737436830997467,
+0.4974873661994934,
+-0.05310888960957527,
+0.14896370470523834,
+0.5531088709831238,
+0.35103631019592285,
+0.14896370470523834,
+-0.05310888960957527,
+0.35103631019592285,
+0.5531088709831238,
+0.17499999701976776,
+0.07500000298023224,
+0.5249999761581421,
+0.42500001192092896,
+0.1408349871635437,
+0.04083499312400818,
+0.5591650009155273,
+0.4591650068759918,
+0.10251262784004211,
+0.1262563169002533,
+0.5974873900413513,
+0.3737436830997467,
+0.22625631093978882,
+0.0025126265827566385,
+0.4737436771392822,
+0.4974873661994934,
+0.04689110815525055,
+0.14896370470523834,
+0.6531088948249817,
+0.35103631019592285,
+0.24896369874477386,
+-0.05310888960957527,
+0.4510363042354584,
+0.5531088709831238,
+0.2750000059604645,
+0.07500000298023224,
+0.625,
+0.42500001192092896,
+0.24083499610424042,
+0.04083499312400818,
+0.6591650247573853,
+0.4591650068759918,
+0.20251262187957764,
+0.1262563169002533,
+0.6974873542785645,
+0.3737436830997467,
+0.32625630497932434,
+0.0025126265827566385,
+0.5737437009811401,
+0.4974873661994934,
+0.14689110219478607,
+0.14896370470523834,
+0.7531089186668396,
+0.35103631019592285,
+0.3489637076854706,
+-0.05310888960957527,
+0.5510362982749939,
+0.5531088709831238,
+0.375,
+0.07500000298023224,
+0.7250000238418579,
+0.42500001192092896,
+0.34083500504493713,
+0.04083499312400818,
+0.7591649889945984,
+0.4591650068759918,
+0.30251261591911316,
+0.1262563169002533,
+0.7974873781204224,
+0.3737436830997467,
+0.42625629901885986,
+0.0025126265827566385,
+0.6737436652183533,
+0.4974873661994934,
+0.2468911111354828,
+0.14896370470523834,
+0.8531088829040527,
+0.35103631019592285,
+0.4489637017250061,
+-0.05310888960957527,
+0.6510363221168518,
+0.5531088709831238,
+0.4749999940395355,
+0.07500000298023224,
+0.824999988079071,
+0.42500001192092896,
+0.44083499908447266,
+0.04083499312400818,
+0.8591650128364563,
+0.4591650068759918,
+0.40251263976097107,
+0.1262563169002533,
+0.8974874019622803,
+0.3737436830997467,
+0.5262563228607178,
+0.0025126265827566385,
+0.7737436890602112,
+0.4974873661994934,
+0.3468911051750183,
+0.14896370470523834,
+0.9531089067459106,
+0.35103631019592285,
+0.548963725566864,
+-0.05310888960957527,
+0.7510362863540649,
+0.5531088709831238,
+0.574999988079071,
+0.07500000298023224,
+0.925000011920929,
+0.42500001192092896,
+0.5408350229263306,
+0.04083499312400818,
+0.9591649770736694,
+0.4591650068759918,
+0.5025126338005066,
+0.1262563169002533,
+0.9974873661994934,
+0.3737436830997467,
+0.6262562870979309,
+0.0025126265827566385,
+0.8737437129020691,
+0.4974873661994934,
+0.44689109921455383,
+0.14896370470523834,
+1.0531089305877686,
+0.35103631019592285,
+0.6489636898040771,
+-0.05310888960957527,
+0.8510363101959229,
+0.5531088709831238,
+0.675000011920929,
+0.07500000298023224,
+1.024999976158142,
+0.42500001192092896,
+0.6408349871635437,
+0.04083499312400818,
+1.0591650009155273,
+0.4591650068759918,
+0.6025125980377197,
+0.1262563169002533,
+1.0974873304367065,
+0.3737436830997467,
+0.7262563109397888,
+0.0025126265827566385,
+0.9737436771392822,
+0.4974873661994934,
+0.5468910932540894,
+0.14896370470523834,
+1.153108835220337,
+0.35103631019592285,
+0.7489637136459351,
+-0.05310888960957527,
+0.951036274433136,
+0.5531088709831238,
+0.7749999761581421,
+0.07500000298023224,
+1.125,
+0.42500001192092896,
+0.7408350110054016,
+0.04083499312400818,
+1.1591650247573853,
+0.4591650068759918,
+0.7025126218795776,
+0.1262563169002533,
+1.1974873542785645,
+0.3737436830997467,
+0.8262563347816467,
+0.0025126265827566385,
+1.0737437009811401,
+0.4974873661994934,
+0.6468911170959473,
+0.14896370470523834,
+1.2531088590621948,
+0.35103631019592285,
+0.8489636778831482,
+-0.05310888960957527,
+1.0510362386703491,
+0.5531088709831238,
+-0.125,
+0.17499999701976776,
+0.22499999403953552,
+0.5249999761581421,
+-0.15916500985622406,
+0.1408349871635437,
+0.2591650187969208,
+0.5591650009155273,
+-0.19748736917972565,
+0.22625631093978882,
+0.29748737812042236,
+0.4737436771392822,
+-0.07374368607997894,
+0.10251262784004211,
+0.17374368011951447,
+0.5974873900413513,
+-0.2531088888645172,
+0.24896369874477386,
+0.35310888290405273,
+0.4510363042354584,
+-0.051036298274993896,
+0.04689110815525055,
+0.15103629231452942,
+0.6531088948249817,
+-0.02500000037252903,
+0.17499999701976776,
+0.32499998807907104,
+0.5249999761581421,
+-0.05916500836610794,
+0.1408349871635437,
+0.3591650128364563,
+0.5591650009155273,
+-0.09748737514019012,
+0.22625631093978882,
+0.3974873721599579,
+0.4737436771392822,
+0.026256313547492027,
+0.10251262784004211,
+0.2737436890602112,
+0.5974873900413513,
+-0.1531088948249817,
+0.24896369874477386,
+0.45310887694358826,
+0.4510363042354584,
+0.04896370321512222,
+0.04689110815525055,
+0.25103628635406494,
+0.6531088948249817,
+0.07500000298023224,
+0.17499999701976776,
+0.42500001192092896,
+0.5249999761581421,
+0.04083499312400818,
+0.1408349871635437,
+0.4591650068759918,
+0.5591650009155273,
+0.0025126265827566385,
+0.22625631093978882,
+0.4974873661994934,
+0.4737436771392822,
+0.1262563169002533,
+0.10251262784004211,
+0.3737436830997467,
+0.5974873900413513,
+-0.05310888960957527,
+0.24896369874477386,
+0.5531088709831238,
+0.4510363042354584,
+0.14896370470523834,
+0.04689110815525055,
+0.35103631019592285,
+0.6531088948249817,
+0.17499999701976776,
+0.17499999701976776,
+0.5249999761581421,
+0.5249999761581421,
+0.1408349871635437,
+0.1408349871635437,
+0.5591650009155273,
+0.5591650009155273,
+0.10251262784004211,
+0.22625631093978882,
+0.5974873900413513,
+0.4737436771392822,
+0.22625631093978882,
+0.10251262784004211,
+0.4737436771392822,
+0.5974873900413513,
+0.04689110815525055,
+0.24896369874477386,
+0.6531088948249817,
+0.4510363042354584,
+0.24896369874477386,
+0.04689110815525055,
+0.4510363042354584,
+0.6531088948249817,
+0.2750000059604645,
+0.17499999701976776,
+0.625,
+0.5249999761581421,
+0.24083499610424042,
+0.1408349871635437,
+0.6591650247573853,
+0.5591650009155273,
+0.20251262187957764,
+0.22625631093978882,
+0.6974873542785645,
+0.4737436771392822,
+0.32625630497932434,
+0.10251262784004211,
+0.5737437009811401,
+0.5974873900413513,
+0.14689110219478607,
+0.24896369874477386,
+0.7531089186668396,
+0.4510363042354584,
+0.3489637076854706,
+0.04689110815525055,
+0.5510362982749939,
+0.6531088948249817,
+0.375,
+0.17499999701976776,
+0.7250000238418579,
+0.5249999761581421,
+0.34083500504493713,
+0.1408349871635437,
+0.7591649889945984,
+0.5591650009155273,
+0.30251261591911316,
+0.22625631093978882,
+0.7974873781204224,
+0.4737436771392822,
+0.42625629901885986,
+0.10251262784004211,
+0.6737436652183533,
+0.5974873900413513,
+0.2468911111354828,
+0.24896369874477386,
+0.8531088829040527,
+0.4510363042354584,
+0.4489637017250061,
+0.04689110815525055,
+0.6510363221168518,
+0.6531088948249817,
+0.4749999940395355,
+0.17499999701976776,
+0.824999988079071,
+0.5249999761581421,
+0.44083499908447266,
+0.1408349871635437,
+0.8591650128364563,
+0.5591650009155273,
+0.40251263976097107,
+0.22625631093978882,
+0.8974874019622803,
+0.4737436771392822,
+0.5262563228607178,
+0.10251262784004211,
+0.7737436890602112,
+0.5974873900413513,
+0.3468911051750183,
+0.24896369874477386,
+0.9531089067459106,
+0.4510363042354584,
+0.548963725566864,
+0.04689110815525055,
+0.7510362863540649,
+0.6531088948249817,
+0.574999988079071,
+0.17499999701976776,
+0.925000011920929,
+0.5249999761581421,
+0.5408350229263306,
+0.1408349871635437,
+0.9591649770736694,
+0.5591650009155273,
+0.5025126338005066,
+0.22625631093978882,
+0.9974873661994934,
+0.4737436771392822,
+0.6262562870979309,
+0.10251262784004211,
+0.8737437129020691,
+0.5974873900413513,
+0.44689109921455383,
+0.24896369874477386,
+1.0531089305877686,
+0.4510363042354584,
+0.6489636898040771,
+0.04689110815525055,
+0.8510363101959229,
+0.6531088948249817,
+0.675000011920929,
+0.17499999701976776,
+1.024999976158142,
+0.5249999761581421,
+0.6408349871635437,
+0.1408349871635437,
+1.0591650009155273,
+0.5591650009155273,
+0.6025125980377197,
+0.22625631093978882,
+1.0974873304367065,
+0.4737436771392822,
+0.7262563109397888,
+0.10251262784004211,
+0.9737436771392822,
+0.5974873900413513,
+0.5468910932540894,
+0.24896369874477386,
+1.153108835220337,
+0.4510363042354584,
+0.7489637136459351,
+0.04689110815525055,
+0.951036274433136,
+0.6531088948249817,
+0.7749999761581421,
+0.17499999701976776,
+1.125,
+0.5249999761581421,
+0.7408350110054016,
+0.1408349871635437,
+1.1591650247573853,
+0.5591650009155273,
+0.7025126218795776,
+0.22625631093978882,
+1.1974873542785645,
+0.4737436771392822,
+0.8262563347816467,
+0.10251262784004211,
+1.0737437009811401,
+0.5974873900413513,
+0.6468911170959473,
+0.24896369874477386,
+1.2531088590621948,
+0.4510363042354584,
+0.8489636778831482,
+0.04689110815525055,
+1.0510362386703491,
+0.6531088948249817,
+-0.125,
+0.2750000059604645,
+0.22499999403953552,
+0.625,
+-0.15916500985622406,
+0.24083499610424042,
+0.2591650187969208,
+0.6591650247573853,
+-0.19748736917972565,
+0.32625630497932434,
+0.29748737812042236,
+0.5737437009811401,
+-0.07374368607997894,
+0.20251262187957764,
+0.17374368011951447,
+0.6974873542785645,
+-0.2531088888645172,
+0.3489637076854706,
+0.35310888290405273,
+0.5510362982749939,
+-0.051036298274993896,
+0.14689110219478607,
+0.15103629231452942,
+0.7531089186668396,
+-0.02500000037252903,
+0.2750000059604645,
+0.32499998807907104,
+0.625,
+-0.05916500836610794,
+0.24083499610424042,
+0.3591650128364563,
+0.6591650247573853,
+-0.09748737514019012,
+0.32625630497932434,
+0.3974873721599579,
+0.5737437009811401,
+0.026256313547492027,
+0.20251262187957764,
+0.2737436890602112,
+0.6974873542785645,
+-0.1531088948249817,
+0.3489637076854706,
+0.45310887694358826,
+0.5510362982749939,
+0.04896370321512222,
+0.14689110219478607,
+0.25103628635406494,
+0.7531089186668396,
+0.07500000298023224,
+0.2750000059604645,
+0.42500001192092896,
+0.625,
+0.04083499312400818,
+0.24083499610424042,
+0.4591650068759918,
+0.6591650247573853,
+0.0025126265827566385,
+0.32625630497932434,
+0.4974873661994934,
+0.5737437009811401,
+0.1262563169002533,
+0.20251262187957764,
+0.3737436830997467,
+0.6974873542785645,
+-0.05310888960957527,
+0.3489637076854706,
+0.5531088709831238,
+0.5510362982749939,
+0.14896370470523834,
+0.14689110219478607,
+0.35103631019592285,
+0.7531089186668396,
+0.17499999701976776,
+0.2750000059604645,
+0.5249999761581421,
+0.625,
+0.1408349871635437,
+0.24083499610424042,
+0.5591650009155273,
+0.6591650247573853,
+0.10251262784004211,
+0.32625630497932434,
+0.5974873900413513,
+0.5737437009811401,
+0.22625631093978882,
+0.20251262187957764,
+0.4737436771392822,
+0.6974873542785645,
+0.04689110815525055,
+0.3489637076854706,
+0.6531088948249817,
+0.5510362982749939,
+0.24896369874477386,
+0.14689110219478607,
+0.4510363042354584,
+0.7531089186668396,
+0.2750000059604645,
+0.2750000059604645,
+0.625,
+0.625,
+0.24083499610424042,
+0.24083499610424042,
+0.6591650247573853,
+0.6591650247573853,
+0.20251262187957764,
+0.32625630497932434,
+0.6974873542785645,
+0.5737437009811401,
+0.32625630497932434,
+0.20251262187957764,
+0.5737437009811401,
+0.6974873542785645,
+0.14689110219478607,
+0.3489637076854706,
+0.7531089186668396,
+0.5510362982749939,
+0.3489637076854706,
+0.14689110219478607,
+0.5510362982749939,
+0.7531089186668396,
+0.375,
+0.2750000059604645,
+0.7250000238418579,
+0.625,
+0.34083500504493713,
+0.24083499610424042,
+0.7591649889945984,
+0.6591650247573853,
+0.30251261591911316,
+0.32625630497932434,
+0.7974873781204224,
+0.5737437009811401,
+0.42625629901885986,
+0.20251262187957764,
+0.6737436652183533,
+0.6974873542785645,
+0.2468911111354828,
+0.3489637076854706,
+0.8531088829040527,
+0.5510362982749939,
+0.4489637017250061,
+0.14689110219478607,
+0.6510363221168518,
+0.7531089186668396,
+0.4749999940395355,
+0.2750000059604645,
+0.824999988079071,
+0.625,
+0.44083499908447266,
+0.24083499610424042,
+0.8591650128364563,
+0.6591650247573853,
+0.40251263976097107,
+0.32625630497932434,
+0.8974874019622803,
+0.5737437009811401,
+0.5262563228607178,
+0.20251262187957764,
+0.7737436890602112,
+0.6974873542785645,
+0.3468911051750183,
+0.3489637076854706,
+0.9531089067459106,
+0.5510362982749939,
+0.548963725566864,
+0.14689110219478607,
+0.7510362863540649,
+0.7531089186668396,
+0.574999988079071,
+0.2750000059604645,
+0.925000011920929,
+0.625,
+0.5408350229263306,
+0.24083499610424042,
+0.9591649770736694,
+0.6591650247573853,
+0.5025126338005066,
+0.32625630497932434,
+0.9974873661994934,
+0.5737437009811401,
+0.6262562870979309,
+0.20251262187957764,
+0.8737437129020691,
+0.6974873542785645,
+0.44689109921455383,
+0.3489637076854706,
+1.0531089305877686,
+0.5510362982749939,
+0.6489636898040771,
+0.14689110219478607,
+0.8510363101959229,
+0.7531089186668396,
+0.675000011920929,
+0.2750000059604645,
+1.024999976158142,
+0.625,
+0.6408349871635437,
+0.24083499610424042,
+1.0591650009155273,
+0.6591650247573853,
+0.6025125980377197,
+0.32625630497932434,
+1.0974873304367065,
+0.5737437009811401,
+0.7262563109397888,
+0.20251262187957764,
+0.9737436771392822,
+0.6974873542785645,
+0.5468910932540894,
+0.3489637076854706,
+1.153108835220337,
+0.5510362982749939,
+0.7489637136459351,
+0.14689110219478607,
+0.951036274433136,
+0.7531089186668396,
+0.7749999761581421,
+0.2750000059604645,
+1.125,
+0.625,
+0.7408350110054016,
+0.24083499610424042,
+1.1591650247573853,
+0.6591650247573853,
+0.7025126218795776,
+0.32625630497932434,
+1.1974873542785645,
+0.5737437009811401,
+0.8262563347816467,
+0.20251262187957764,
+1.0737437009811401,
+0.6974873542785645,
+0.6468911170959473,
+0.3489637076854706,
+1.2531088590621948,
+0.5510362982749939,
+0.8489636778831482,
+0.14689110219478607,
+1.0510362386703491,
+0.7531089186668396,
+-0.125,
+0.375,
+0.22499999403953552,
+0.7250000238418579,
+-0.15916500985622406,
+0.34083500504493713,
+0.2591650187969208,
+0.7591649889945984,
+-0.19748736917972565,
+0.42625629901885986,
+0.29748737812042236,
+0.6737436652183533,
+-0.07374368607997894,
+0.30251261591911316,
+0.17374368011951447,
+0.7974873781204224,
+-0.2531088888645172,
+0.4489637017250061,
+0.35310888290405273,
+0.6510363221168518,
+-0.051036298274993896,
+0.2468911111354828,
+0.15103629231452942,
+0.8531088829040527,
+-0.02500000037252903,
+0.375,
+0.32499998807907104,
+0.7250000238418579,
+-0.05916500836610794,
+0.34083500504493713,
+0.3591650128364563,
+0.7591649889945984,
+-0.09748737514019012,
+0.42625629901885986,
+0.3974873721599579,
+0.6737436652183533,
+0.026256313547492027,
+0.30251261591911316,
+0.2737436890602112,
+0.7974873781204224,
+-0.1531088948249817,
+0.4489637017250061,
+0.45310887694358826,
+0.6510363221168518,
+0.04896370321512222,
+0.2468911111354828,
+0.25103628635406494,
+0.8531088829040527,
+0.07500000298023224,
+0.375,
+0.42500001192092896,
+0.7250000238418579,
+0.04083499312400818,
+0.34083500504493713,
+0.4591650068759918,
+0.7591649889945984,
+0.0025126265827566385,
+0.42625629901885986,
+0.4974873661994934,
+0.6737436652183533,
+0.1262563169002533,
+0.30251261591911316,
+0.3737436830997467,
+0.7974873781204224,
+-0.05310888960957527,
+0.4489637017250061,
+0.5531088709831238,
+0.6510363221168518,
+0.14896370470523834,
+0.2468911111354828,
+0.35103631019592285,
+0.8531088829040527,
+0.17499999701976776,
+0.375,
+0.5249999761581421,
+0.7250000238418579,
+0.1408349871635437,
+0.34083500504493713,
+0.5591650009155273,
+0.7591649889945984,
+0.10251262784004211,
+0.42625629901885986,
+0.5974873900413513,
+0.6737436652183533,
+0.22625631093978882,
+0.30251261591911316,
+0.4737436771392822,
+0.7974873781204224,
+0.04689110815525055,
+0.4489637017250061,
+0.6531088948249817,
+0.6510363221168518,
+0.24896369874477386,
+0.2468911111354828,
+0.4510363042354584,
+0.8531088829040527,
+0.2750000059604645,
+0.375,
+0.625,
+0.7250000238418579,
+0.24083499610424042,
+0.34083500504493713,
+0.6591650247573853,
+0.7591649889945984,
+0.20251262187957764,
+0.42625629901885986,
+0.6974873542785645,
+0.6737436652183533,
+0.32625630497932434,
+0.30251261591911316,
+0.5737437009811401,
+0.7974873781204224,
+0.14689110219478607,
+0.4489637017250061,
+0.7531089186668396,
+0.6510363221168518,
+0.3489637076854706,
+0.2468911111354828,
+0.5510362982749939,
+0.8531088829040527,
+0.375,
+0.375,
+0.7250000238418579,
+0.7250000238418579,
+0.34083500504493713,
+0.34083500504493713,
+0.7591649889945984,
+0.7591649889945984,
+0.30251261591911316,
+0.42625629901885986,
+0.7974873781204224,
+0.6737436652183533,
+0.42625629901885986,
+0.30251261591911316,
+0.6737436652183533,
+0.7974873781204224,
+0.2468911111354828,
+0.4489637017250061,
+0.8531088829040527,
+0.6510363221168518,
+0.4489637017250061,
+0.2468911111354828,
+0.6510363221168518,
+0.8531088829040527,
+0.4749999940395355,
+0.375,
+0.824999988079071,
+0.7250000238418579,
+0.44083499908447266,
+0.34083500504493713,
+0.8591650128364563,
+0.7591649889945984,
+0.40251263976097107,
+0.42625629901885986,
+0.8974874019622803,
+0.6737436652183533,
+0.5262563228607178,
+0.30251261591911316,
+0.7737436890602112,
+0.7974873781204224,
+0.3468911051750183,
+0.4489637017250061,
+0.9531089067459106,
+0.6510363221168518,
+0.548963725566864,
+0.2468911111354828,
+0.7510362863540649,
+0.8531088829040527,
+0.574999988079071,
+0.375,
+0.925000011920929,
+0.7250000238418579,
+0.5408350229263306,
+0.34083500504493713,
+0.9591649770736694,
+0.7591649889945984,
+0.5025126338005066,
+0.42625629901885986,
+0.9974873661994934,
+0.6737436652183533,
+0.6262562870979309,
+0.30251261591911316,
+0.8737437129020691,
+0.7974873781204224,
+0.44689109921455383,
+0.4489637017250061,
+1.0531089305877686,
+0.6510363221168518,
+0.6489636898040771,
+0.2468911111354828,
+0.8510363101959229,
+0.8531088829040527,
+0.675000011920929,
+0.375,
+1.024999976158142,
+0.7250000238418579,
+0.6408349871635437,
+0.34083500504493713,
+1.0591650009155273,
+0.7591649889945984,
+0.6025125980377197,
+0.42625629901885986,
+1.0974873304367065,
+0.6737436652183533,
+0.7262563109397888,
+0.30251261591911316,
+0.9737436771392822,
+0.7974873781204224,
+0.5468910932540894,
+0.4489637017250061,
+1.153108835220337,
+0.6510363221168518,
+0.7489637136459351,
+0.2468911111354828,
+0.951036274433136,
+0.8531088829040527,
+0.7749999761581421,
+0.375,
+1.125,
+0.7250000238418579,
+0.7408350110054016,
+0.34083500504493713,
+1.1591650247573853,
+0.7591649889945984,
+0.7025126218795776,
+0.42625629901885986,
+1.1974873542785645,
+0.6737436652183533,
+0.8262563347816467,
+0.30251261591911316,
+1.0737437009811401,
+0.7974873781204224,
+0.6468911170959473,
+0.4489637017250061,
+1.2531088590621948,
+0.6510363221168518,
+0.8489636778831482,
+0.2468911111354828,
+1.0510362386703491,
+0.8531088829040527,
+-0.125,
+0.4749999940395355,
+0.22499999403953552,
+0.824999988079071,
+-0.15916500985622406,
+0.44083499908447266,
+0.2591650187969208,
+0.8591650128364563,
+-0.19748736917972565,
+0.5262563228607178,
+0.29748737812042236,
+0.7737436890602112,
+-0.07374368607997894,
+0.40251263976097107,
+0.17374368011951447,
+0.8974874019622803,
+-0.2531088888645172,
+0.548963725566864,
+0.35310888290405273,
+0.7510362863540649,
+-0.051036298274993896,
+0.3468911051750183,
+0.15103629231452942,
+0.9531089067459106,
+-0.02500000037252903,
+0.4749999940395355,
+0.32499998807907104,
+0.824999988079071,
+-0.05916500836610794,
+0.44083499908447266,
+0.3591650128364563,
+0.8591650128364563,
+-0.09748737514019012,
+0.5262563228607178,
+0.3974873721599579,
+0.7737436890602112,
+0.026256313547492027,
+0.40251263976097107,
+0.2737436890602112,
+0.8974874019622803,
+-0.1531088948249817,
+0.548963725566864,
+0.45310887694358826,
+0.7510362863540649,
+0.04896370321512222,
+0.3468911051750183,
+0.25103628635406494,
+0.9531089067459106,
+0.07500000298023224,
+0.4749999940395355,
+0.42500001192092896,
+0.824999988079071,
+0.04083499312400818,
+0.44083499908447266,
+0.4591650068759918,
+0.8591650128364563,
+0.0025126265827566385,
+0.5262563228607178,
+0.4974873661994934,
+0.7737436890602112,
+0.1262563169002533,
+0.40251263976097107,
+0.3737436830997467,
+0.8974874019622803,
+-0.05310888960957527,
+0.548963725566864,
+0.5531088709831238,
+0.7510362863540649,
+0.14896370470523834,
+0.3468911051750183,
+0.35103631019592285,
+0.9531089067459106,
+0.17499999701976776,
+0.4749999940395355,
+0.5249999761581421,
+0.824999988079071,
+0.1408349871635437,
+0.44083499908447266,
+0.5591650009155273,
+0.8591650128364563,
+0.10251262784004211,
+0.5262563228607178,
+0.5974873900413513,
+0.7737436890602112,
+0.22625631093978882,
+0.40251263976097107,
+0.4737436771392822,
+0.8974874019622803,
+0.04689110815525055,
+0.548963725566864,
+0.6531088948249817,
+0.7510362863540649,
+0.24896369874477386,
+0.3468911051750183,
+0.4510363042354584,
+0.9531089067459106,
+0.2750000059604645,
+0.4749999940395355,
+0.625,
+0.824999988079071,
+0.24083499610424042,
+0.44083499908447266,
+0.6591650247573853,
+0.8591650128364563,
+0.20251262187957764,
+0.5262563228607178,
+0.6974873542785645,
+0.7737436890602112,
+0.32625630497932434,
+0.40251263976097107,
+0.5737437009811401,
+0.8974874019622803,
+0.14689110219478607,
+0.548963725566864,
+0.7531089186668396,
+0.7510362863540649,
+0.3489637076854706,
+0.3468911051750183,
+0.5510362982749939,
+0.9531089067459106,
+0.375,
+0.4749999940395355,
+0.7250000238418579,
+0.824999988079071,
+0.34083500504493713,
+0.44083499908447266,
+0.7591649889945984,
+0.8591650128364563,
+0.30251261591911316,
+0.5262563228607178,
+0.7974873781204224,
+0.7737436890602112,
+0.42625629901885986,
+0.40251263976097107,
+0.6737436652183533,
+0.8974874019622803,
+0.2468911111354828,
+0.548963725566864,
+0.8531088829040527,
+0.7510362863540649,
+0.4489637017250061,
+0.3468911051750183,
+0.6510363221168518,
+0.9531089067459106,
+0.4749999940395355,
+0.4749999940395355,
+0.824999988079071,
+0.824999988079071,
+0.44083499908447266,
+0.44083499908447266,
+0.8591650128364563,
+0.8591650128364563,
+0.40251263976097107,
+0.5262563228607178,
+0.8974874019622803,
+0.7737436890602112,
+0.5262563228607178,
+0.40251263976097107,
+0.7737436890602112,
+0.8974874019622803,
+0.3468911051750183,
+0.548963725566864,
+0.9531089067459106,
+0.7510362863540649,
+0.548963725566864,
+0.3468911051750183,
+0.7510362863540649,
+0.9531089067459106,
+0.574999988079071,
+0.4749999940395355,
+0.925000011920929,
+0.824999988079071,
+0.5408350229263306,
+0.44083499908447266,
+0.9591649770736694,
+0.8591650128364563,
+0.5025126338005066,
+0.5262563228607178,
+0.9974873661994934,
+0.7737436890602112,
+0.6262562870979309,
+0.40251263976097107,
+0.8737437129020691,
+0.8974874019622803,
+0.44689109921455383,
+0.548963725566864,
+1.0531089305877686,
+0.7510362863540649,
+0.6489636898040771,
+0.3468911051750183,
+0.8510363101959229,
+0.9531089067459106,
+0.675000011920929,
+0.4749999940395355,
+1.024999976158142,
+0.824999988079071,
+0.6408349871635437,
+0.44083499908447266,
+1.0591650009155273,
+0.8591650128364563,
+0.6025125980377197,
+0.5262563228607178,
+1.0974873304367065,
+0.7737436890602112,
+0.7262563109397888,
+0.40251263976097107,
+0.9737436771392822,
+0.8974874019622803,
+0.5468910932540894,
+0.548963725566864,
+1.153108835220337,
+0.7510362863540649,
+0.7489637136459351,
+0.3468911051750183,
+0.951036274433136,
+0.9531089067459106,
+0.7749999761581421,
+0.4749999940395355,
+1.125,
+0.824999988079071,
+0.7408350110054016,
+0.44083499908447266,
+1.1591650247573853,
+0.8591650128364563,
+0.7025126218795776,
+0.5262563228607178,
+1.1974873542785645,
+0.7737436890602112,
+0.8262563347816467,
+0.40251263976097107,
+1.0737437009811401,
+0.8974874019622803,
+0.6468911170959473,
+0.548963725566864,
+1.2531088590621948,
+0.7510362863540649,
+0.8489636778831482,
+0.3468911051750183,
+1.0510362386703491,
+0.9531089067459106,
+-0.125,
+0.574999988079071,
+0.22499999403953552,
+0.925000011920929,
+-0.15916500985622406,
+0.5408350229263306,
+0.2591650187969208,
+0.9591649770736694,
+-0.19748736917972565,
+0.6262562870979309,
+0.29748737812042236,
+0.8737437129020691,
+-0.07374368607997894,
+0.5025126338005066,
+0.17374368011951447,
+0.9974873661994934,
+-0.2531088888645172,
+0.6489636898040771,
+0.35310888290405273,
+0.8510363101959229,
+-0.051036298274993896,
+0.44689109921455383,
+0.15103629231452942,
+1.0531089305877686,
+-0.02500000037252903,
+0.574999988079071,
+0.32499998807907104,
+0.925000011920929,
+-0.05916500836610794,
+0.5408350229263306,
+0.3591650128364563,
+0.9591649770736694,
+-0.09748737514019012,
+0.6262562870979309,
+0.3974873721599579,
+0.8737437129020691,
+0.026256313547492027,
+0.5025126338005066,
+0.2737436890602112,
+0.9974873661994934,
+-0.1531088948249817,
+0.6489636898040771,
+0.45310887694358826,
+0.8510363101959229,
+0.04896370321512222,
+0.44689109921455383,
+0.25103628635406494,
+1.0531089305877686,
+0.07500000298023224,
+0.574999988079071,
+0.42500001192092896,
+0.925000011920929,
+0.04083499312400818,
+0.5408350229263306,
+0.4591650068759918,
+0.9591649770736694,
+0.0025126265827566385,
+0.6262562870979309,
+0.4974873661994934,
+0.8737437129020691,
+0.1262563169002533,
+0.5025126338005066,
+0.3737436830997467,
+0.9974873661994934,
+-0.05310888960957527,
+0.6489636898040771,
+0.5531088709831238,
+0.8510363101959229,
+0.14896370470523834,
+0.44689109921455383,
+0.35103631019592285,
+1.0531089305877686,
+0.17499999701976776,
+0.574999988079071,
+0.5249999761581421,
+0.925000011920929,
+0.1408349871635437,
+0.5408350229263306,
+0.5591650009155273,
+0.9591649770736694,
+0.10251262784004211,
+0.6262562870979309,
+0.5974873900413513,
+0.8737437129020691,
+0.22625631093978882,
+0.5025126338005066,
+0.4737436771392822,
+0.9974873661994934,
+0.04689110815525055,
+0.6489636898040771,
+0.6531088948249817,
+0.8510363101959229,
+0.24896369874477386,
+0.44689109921455383,
+0.4510363042354584,
+1.0531089305877686,
+0.2750000059604645,
+0.574999988079071,
+0.625,
+0.925000011920929,
+0.24083499610424042,
+0.5408350229263306,
+0.6591650247573853,
+0.9591649770736694,
+0.20251262187957764,
+0.6262562870979309,
+0.6974873542785645,
+0.8737437129020691,
+0.32625630497932434,
+0.5025126338005066,
+0.5737437009811401,
+0.9974873661994934,
+0.14689110219478607,
+0.6489636898040771,
+0.7531089186668396,
+0.8510363101959229,
+0.3489637076854706,
+0.44689109921455383,
+0.5510362982749939,
+1.0531089305877686,
+0.375,
+0.574999988079071,
+0.7250000238418579,
+0.925000011920929,
+0.34083500504493713,
+0.5408350229263306,
+0.7591649889945984,
+0.9591649770736694,
+0.30251261591911316,
+0.6262562870979309,
+0.7974873781204224,
+0.8737437129020691,
+0.42625629901885986,
+0.5025126338005066,
+0.6737436652183533,
+0.9974873661994934,
+0.2468911111354828,
+0.6489636898040771,
+0.8531088829040527,
+0.8510363101959229,
+0.4489637017250061,
+0.44689109921455383,
+0.6510363221168518,
+1.0531089305877686,
+0.4749999940395355,
+0.574999988079071,
+0.824999988079071,
+0.925000011920929,
+0.44083499908447266,
+0.5408350229263306,
+0.8591650128364563,
+0.9591649770736694,
+0.40251263976097107,
+0.6262562870979309,
+0.8974874019622803,
+0.8737437129020691,
+0.5262563228607178,
+0.5025126338005066,
+0.7737436890602112,
+0.9974873661994934,
+0.3468911051750183,
+0.6489636898040771,
+0.9531089067459106,
+0.8510363101959229,
+0.548963725566864,
+0.44689109921455383,
+0.7510362863540649,
+1.0531089305877686,
+0.574999988079071,
+0.574999988079071,
+0.925000011920929,
+0.925000011920929,
+0.5408350229263306,
+0.5408350229263306,
+0.9591649770736694,
+0.9591649770736694,
+0.5025126338005066,
+0.6262562870979309,
+0.9974873661994934,
+0.8737437129020691,
+0.6262562870979309,
+0.5025126338005066,
+0.8737437129020691,
+0.9974873661994934,
+0.44689109921455383,
+0.6489636898040771,
+1.0531089305877686,
+0.8510363101959229,
+0.6489636898040771,
+0.44689109921455383,
+0.8510363101959229,
+1.0531089305877686,
+0.675000011920929,
+0.574999988079071,
+1.024999976158142,
+0.925000011920929,
+0.6408349871635437,
+0.5408350229263306,
+1.0591650009155273,
+0.9591649770736694,
+0.6025125980377197,
+0.6262562870979309,
+1.0974873304367065,
+0.8737437129020691,
+0.7262563109397888,
+0.5025126338005066,
+0.9737436771392822,
+0.9974873661994934,
+0.5468910932540894,
+0.6489636898040771,
+1.153108835220337,
+0.8510363101959229,
+0.7489637136459351,
+0.44689109921455383,
+0.951036274433136,
+1.0531089305877686,
+0.7749999761581421,
+0.574999988079071,
+1.125,
+0.925000011920929,
+0.7408350110054016,
+0.5408350229263306,
+1.1591650247573853,
+0.9591649770736694,
+0.7025126218795776,
+0.6262562870979309,
+1.1974873542785645,
+0.8737437129020691,
+0.8262563347816467,
+0.5025126338005066,
+1.0737437009811401,
+0.9974873661994934,
+0.6468911170959473,
+0.6489636898040771,
+1.2531088590621948,
+0.8510363101959229,
+0.8489636778831482,
+0.44689109921455383,
+1.0510362386703491,
+1.0531089305877686,
+-0.125,
+0.675000011920929,
+0.22499999403953552,
+1.024999976158142,
+-0.15916500985622406,
+0.6408349871635437,
+0.2591650187969208,
+1.0591650009155273,
+-0.19748736917972565,
+0.7262563109397888,
+0.29748737812042236,
+0.9737436771392822,
+-0.07374368607997894,
+0.6025125980377197,
+0.17374368011951447,
+1.0974873304367065,
+-0.2531088888645172,
+0.7489637136459351,
+0.35310888290405273,
+0.951036274433136,
+-0.051036298274993896,
+0.5468910932540894,
+0.15103629231452942,
+1.153108835220337,
+-0.02500000037252903,
+0.675000011920929,
+0.32499998807907104,
+1.024999976158142,
+-0.05916500836610794,
+0.6408349871635437,
+0.3591650128364563,
+1.0591650009155273,
+-0.09748737514019012,
+0.7262563109397888,
+0.3974873721599579,
+0.9737436771392822,
+0.026256313547492027,
+0.6025125980377197,
+0.2737436890602112,
+1.0974873304367065,
+-0.1531088948249817,
+0.7489637136459351,
+0.45310887694358826,
+0.951036274433136,
+0.04896370321512222,
+0.5468910932540894,
+0.25103628635406494,
+1.153108835220337,
+0.07500000298023224,
+0.675000011920929,
+0.42500001192092896,
+1.024999976158142,
+0.04083499312400818,
+0.6408349871635437,
+0.4591650068759918,
+1.0591650009155273,
+0.0025126265827566385,
+0.7262563109397888,
+0.4974873661994934,
+0.9737436771392822,
+0.1262563169002533,
+0.6025125980377197,
+0.3737436830997467,
+1.0974873304367065,
+-0.05310888960957527,
+0.7489637136459351,
+0.5531088709831238,
+0.951036274433136,
+0.14896370470523834,
+0.5468910932540894,
+0.35103631019592285,
+1.153108835220337,
+0.17499999701976776,
+0.675000011920929,
+0.5249999761581421,
+1.024999976158142,
+0.1408349871635437,
+0.6408349871635437,
+0.5591650009155273,
+1.0591650009155273,
+0.10251262784004211,
+0.7262563109397888,
+0.5974873900413513,
+0.9737436771392822,
+0.22625631093978882,
+0.6025125980377197,
+0.4737436771392822,
+1.0974873304367065,
+0.04689110815525055,
+0.7489637136459351,
+0.6531088948249817,
+0.951036274433136,
+0.24896369874477386,
+0.5468910932540894,
+0.4510363042354584,
+1.153108835220337,
+0.2750000059604645,
+0.675000011920929,
+0.625,
+1.024999976158142,
+0.24083499610424042,
+0.6408349871635437,
+0.6591650247573853,
+1.0591650009155273,
+0.20251262187957764,
+0.7262563109397888,
+0.6974873542785645,
+0.9737436771392822,
+0.32625630497932434,
+0.6025125980377197,
+0.5737437009811401,
+1.0974873304367065,
+0.14689110219478607,
+0.7489637136459351,
+0.7531089186668396,
+0.951036274433136,
+0.3489637076854706,
+0.5468910932540894,
+0.5510362982749939,
+1.153108835220337,
+0.375,
+0.675000011920929,
+0.7250000238418579,
+1.024999976158142,
+0.34083500504493713,
+0.6408349871635437,
+0.7591649889945984,
+1.0591650009155273,
+0.30251261591911316,
+0.7262563109397888,
+0.7974873781204224,
+0.9737436771392822,
+0.42625629901885986,
+0.6025125980377197,
+0.6737436652183533,
+1.0974873304367065,
+0.2468911111354828,
+0.7489637136459351,
+0.8531088829040527,
+0.951036274433136,
+0.4489637017250061,
+0.5468910932540894,
+0.6510363221168518,
+1.153108835220337,
+0.4749999940395355,
+0.675000011920929,
+0.824999988079071,
+1.024999976158142,
+0.44083499908447266,
+0.6408349871635437,
+0.8591650128364563,
+1.0591650009155273,
+0.40251263976097107,
+0.7262563109397888,
+0.8974874019622803,
+0.9737436771392822,
+0.5262563228607178,
+0.6025125980377197,
+0.7737436890602112,
+1.0974873304367065,
+0.3468911051750183,
+0.7489637136459351,
+0.9531089067459106,
+0.951036274433136,
+0.548963725566864,
+0.5468910932540894,
+0.7510362863540649,
+1.153108835220337,
+0.574999988079071,
+0.675000011920929,
+0.925000011920929,
+1.024999976158142,
+0.5408350229263306,
+0.6408349871635437,
+0.9591649770736694,
+1.0591650009155273,
+0.5025126338005066,
+0.7262563109397888,
+0.9974873661994934,
+0.9737436771392822,
+0.6262562870979309,
+0.6025125980377197,
+0.8737437129020691,
+1.0974873304367065,
+0.44689109921455383,
+0.7489637136459351,
+1.0531089305877686,
+0.951036274433136,
+0.6489636898040771,
+0.5468910932540894,
+0.8510363101959229,
+1.153108835220337,
+0.675000011920929,
+0.675000011920929,
+1.024999976158142,
+1.024999976158142,
+0.6408349871635437,
+0.6408349871635437,
+1.0591650009155273,
+1.0591650009155273,
+0.6025125980377197,
+0.7262563109397888,
+1.0974873304367065,
+0.9737436771392822,
+0.7262563109397888,
+0.6025125980377197,
+0.9737436771392822,
+1.0974873304367065,
+0.5468910932540894,
+0.7489637136459351,
+1.153108835220337,
+0.951036274433136,
+0.7489637136459351,
+0.5468910932540894,
+0.951036274433136,
+1.153108835220337,
+0.7749999761581421,
+0.675000011920929,
+1.125,
+1.024999976158142,
+0.7408350110054016,
+0.6408349871635437,
+1.1591650247573853,
+1.0591650009155273,
+0.7025126218795776,
+0.7262563109397888,
+1.1974873542785645,
+0.9737436771392822,
+0.8262563347816467,
+0.6025125980377197,
+1.0737437009811401,
+1.0974873304367065,
+0.6468911170959473,
+0.7489637136459351,
+1.2531088590621948,
+0.951036274433136,
+0.8489636778831482,
+0.5468910932540894,
+1.0510362386703491,
+1.153108835220337,
+-0.125,
+0.7749999761581421,
+0.22499999403953552,
+1.125,
+-0.15916500985622406,
+0.7408350110054016,
+0.2591650187969208,
+1.1591650247573853,
+-0.19748736917972565,
+0.8262563347816467,
+0.29748737812042236,
+1.0737437009811401,
+-0.07374368607997894,
+0.7025126218795776,
+0.17374368011951447,
+1.1974873542785645,
+-0.2531088888645172,
+0.8489636778831482,
+0.35310888290405273,
+1.0510362386703491,
+-0.051036298274993896,
+0.6468911170959473,
+0.15103629231452942,
+1.2531088590621948,
+-0.02500000037252903,
+0.7749999761581421,
+0.32499998807907104,
+1.125,
+-0.05916500836610794,
+0.7408350110054016,
+0.3591650128364563,
+1.1591650247573853,
+-0.09748737514019012,
+0.8262563347816467,
+0.3974873721599579,
+1.0737437009811401,
+0.026256313547492027,
+0.7025126218795776,
+0.2737436890602112,
+1.1974873542785645,
+-0.1531088948249817,
+0.8489636778831482,
+0.45310887694358826,
+1.0510362386703491,
+0.04896370321512222,
+0.6468911170959473,
+0.25103628635406494,
+1.2531088590621948,
+0.07500000298023224,
+0.7749999761581421,
+0.42500001192092896,
+1.125,
+0.04083499312400818,
+0.7408350110054016,
+0.4591650068759918,
+1.1591650247573853,
+0.0025126265827566385,
+0.8262563347816467,
+0.4974873661994934,
+1.0737437009811401,
+0.1262563169002533,
+0.7025126218795776,
+0.3737436830997467,
+1.1974873542785645,
+-0.05310888960957527,
+0.8489636778831482,
+0.5531088709831238,
+1.0510362386703491,
+0.14896370470523834,
+0.6468911170959473,
+0.35103631019592285,
+1.2531088590621948,
+0.17499999701976776,
+0.7749999761581421,
+0.5249999761581421,
+1.125,
+0.1408349871635437,
+0.7408350110054016,
+0.5591650009155273,
+1.1591650247573853,
+0.10251262784004211,
+0.8262563347816467,
+0.5974873900413513,
+1.0737437009811401,
+0.22625631093978882,
+0.7025126218795776,
+0.4737436771392822,
+1.1974873542785645,
+0.04689110815525055,
+0.8489636778831482,
+0.6531088948249817,
+1.0510362386703491,
+0.24896369874477386,
+0.6468911170959473,
+0.4510363042354584,
+1.2531088590621948,
+0.2750000059604645,
+0.7749999761581421,
+0.625,
+1.125,
+0.24083499610424042,
+0.7408350110054016,
+0.6591650247573853,
+1.1591650247573853,
+0.20251262187957764,
+0.8262563347816467,
+0.6974873542785645,
+1.0737437009811401,
+0.32625630497932434,
+0.7025126218795776,
+0.5737437009811401,
+1.1974873542785645,
+0.14689110219478607,
+0.8489636778831482,
+0.7531089186668396,
+1.0510362386703491,
+0.3489637076854706,
+0.6468911170959473,
+0.5510362982749939,
+1.2531088590621948,
+0.375,
+0.7749999761581421,
+0.7250000238418579,
+1.125,
+0.34083500504493713,
+0.7408350110054016,
+0.7591649889945984,
+1.1591650247573853,
+0.30251261591911316,
+0.8262563347816467,
+0.7974873781204224,
+1.0737437009811401,
+0.42625629901885986,
+0.7025126218795776,
+0.6737436652183533,
+1.1974873542785645,
+0.2468911111354828,
+0.8489636778831482,
+0.8531088829040527,
+1.0510362386703491,
+0.4489637017250061,
+0.6468911170959473,
+0.6510363221168518,
+1.2531088590621948,
+0.4749999940395355,
+0.7749999761581421,
+0.824999988079071,
+1.125,
+0.44083499908447266,
+0.7408350110054016,
+0.8591650128364563,
+1.1591650247573853,
+0.40251263976097107,
+0.8262563347816467,
+0.8974874019622803,
+1.0737437009811401,
+0.5262563228607178,
+0.7025126218795776,
+0.7737436890602112,
+1.1974873542785645,
+0.3468911051750183,
+0.8489636778831482,
+0.9531089067459106,
+1.0510362386703491,
+0.548963725566864,
+0.6468911170959473,
+0.7510362863540649,
+1.2531088590621948,
+0.574999988079071,
+0.7749999761581421,
+0.925000011920929,
+1.125,
+0.5408350229263306,
+0.7408350110054016,
+0.9591649770736694,
+1.1591650247573853,
+0.5025126338005066,
+0.8262563347816467,
+0.9974873661994934,
+1.0737437009811401,
+0.6262562870979309,
+0.7025126218795776,
+0.8737437129020691,
+1.1974873542785645,
+0.44689109921455383,
+0.8489636778831482,
+1.0531089305877686,
+1.0510362386703491,
+0.6489636898040771,
+0.6468911170959473,
+0.8510363101959229,
+1.2531088590621948,
+0.675000011920929,
+0.7749999761581421,
+1.024999976158142,
+1.125,
+0.6408349871635437,
+0.7408350110054016,
+1.0591650009155273,
+1.1591650247573853,
+0.6025125980377197,
+0.8262563347816467,
+1.0974873304367065,
+1.0737437009811401,
+0.7262563109397888,
+0.7025126218795776,
+0.9737436771392822,
+1.1974873542785645,
+0.5468910932540894,
+0.8489636778831482,
+1.153108835220337,
+1.0510362386703491,
+0.7489637136459351,
+0.6468911170959473,
+0.951036274433136,
+1.2531088590621948,
+0.7749999761581421,
+0.7749999761581421,
+1.125,
+1.125,
+0.7408350110054016,
+0.7408350110054016,
+1.1591650247573853,
+1.1591650247573853,
+0.7025126218795776,
+0.8262563347816467,
+1.1974873542785645,
+1.0737437009811401,
+0.8262563347816467,
+0.7025126218795776,
+1.0737437009811401,
+1.1974873542785645,
+0.6468911170959473,
+0.8489636778831482,
+1.2531088590621948,
+1.0510362386703491,
+0.8489636778831482,
+0.6468911170959473,
+1.0510362386703491,
+1.2531088590621948,
+-0.15000000596046448,
+-0.15000000596046448,
+0.3499999940395355,
+0.3499999940395355,
+-0.18504385650157928,
+-0.18504385650157928,
+0.3850438594818115,
+0.3850438594818115,
+-0.2535533905029297,
+-0.07677669823169708,
+0.45355337858200073,
+0.2767767012119293,
+-0.07677669823169708,
+-0.2535533905029297,
+0.2767767012119293,
+0.45355337858200073,
+-0.3330127000808716,
+-0.04433756694197655,
+0.5330126881599426,
+0.24433757364749908,
+-0.04433756694197655,
+-0.3330127000808716,
+0.24433757364749908,
+0.5330126881599426,
+0.05000000074505806,
+-0.15000000596046448,
+0.550000011920929,
+0.3499999940395355,
+0.01495614368468523,
+-0.18504385650157928,
+0.5850438475608826,
+0.3850438594818115,
+-0.05355339124798775,
+-0.07677669823169708,
+0.6535533666610718,
+0.2767767012119293,
+0.12322330474853516,
+-0.2535533905029297,
+0.47677668929100037,
+0.45355337858200073,
+-0.13301269710063934,
+-0.04433756694197655,
+0.7330126762390137,
+0.24433757364749908,
+0.1556624323129654,
+-0.3330127000808716,
+0.4443375766277313,
+0.5330126881599426,
+0.25,
+-0.15000000596046448,
+0.75,
+0.3499999940395355,
+0.2149561494588852,
+-0.18504385650157928,
+0.7850438356399536,
+0.3850438594818115,
+0.1464466154575348,
+-0.07677669823169708,
+0.8535534143447876,
+0.2767767012119293,
+0.3232232928276062,
+-0.2535533905029297,
+0.6767767071723938,
+0.45355337858200073,
+0.0669872984290123,
+-0.04433756694197655,
+0.9330127239227295,
+0.24433757364749908,
+0.35566243529319763,
+-0.3330127000808716,
+0.6443375945091248,
+0.5330126881599426,
+0.44999998807907104,
+-0.15000000596046448,
+0.949999988079071,
+0.3499999940395355,
+0.41495615243911743,
+-0.18504385650157928,
+0.9850438833236694,
+0.3850438594818115,
+0.34644660353660583,
+-0.07677669823169708,
+1.0535533428192139,
+0.2767767012119293,
+0.5232232809066772,
+-0.2535533905029297,
+0.8767766952514648,
+0.45355337858200073,
+0.26698729395866394,
+-0.04433756694197655,
+1.1330126523971558,
+0.24433757364749908,
+0.5556624531745911,
+-0.3330127000808716,
+0.8443375825881958,
+0.5330126881599426,
+0.6499999761581421,
+-0.15000000596046448,
+1.149999976158142,
+0.3499999940395355,
+0.6149561405181885,
+-0.18504385650157928,
+1.1850438117980957,
+0.3850438594818115,
+0.5464466214179993,
+-0.07677669823169708,
+1.2535533905029297,
+0.2767767012119293,
+0.7232233285903931,
+-0.2535533905029297,
+1.0767767429351807,
+0.45355337858200073,
+0.4669873118400574,
+-0.04433756694197655,
+1.3330127000808716,
+0.24433757364749908,
+0.7556624412536621,
+-0.3330127000808716,
+1.044337511062622,
+0.5330126881599426,
+-0.15000000596046448,
+0.05000000074505806,
+0.3499999940395355,
+0.550000011920929,
+-0.18504385650157928,
+0.01495614368468523,
+0.3850438594818115,
+0.5850438475608826,
+-0.2535533905029297,
+0.12322330474853516,
+0.45355337858200073,
+0.47677668929100037,
+-0.07677669823169708,
+-0.05355339124798775,
+0.2767767012119293,
+0.6535533666610718,
+-0.3330127000808716,
+0.1556624323129654,
+0.5330126881599426,
+0.4443375766277313,
+-0.04433756694197655,
+-0.13301269710063934,
+0.24433757364749908,
+0.7330126762390137,
+0.05000000074505806,
+0.05000000074505806,
+0.550000011920929,
+0.550000011920929,
+0.01495614368468523,
+0.01495614368468523,
+0.5850438475608826,
+0.5850438475608826,
+-0.05355339124798775,
+0.12322330474853516,
+0.6535533666610718,
+0.47677668929100037,
+0.12322330474853516,
+-0.05355339124798775,
+0.47677668929100037,
+0.6535533666610718,
+-0.13301269710063934,
+0.1556624323129654,
+0.7330126762390137,
+0.4443375766277313,
+0.1556624323129654,
+-0.13301269710063934,
+0.4443375766277313,
+0.7330126762390137,
+0.25,
+0.05000000074505806,
+0.75,
+0.550000011920929,
+0.2149561494588852,
+0.01495614368468523,
+0.7850438356399536,
+0.5850438475608826,
+0.1464466154575348,
+0.12322330474853516,
+0.8535534143447876,
+0.47677668929100037,
+0.3232232928276062,
+-0.05355339124798775,
+0.6767767071723938,
+0.6535533666610718,
+0.0669872984290123,
+0.1556624323129654,
+0.9330127239227295,
+0.4443375766277313,
+0.35566243529319763,
+-0.13301269710063934,
+0.6443375945091248,
+0.7330126762390137,
+0.44999998807907104,
+0.05000000074505806,
+0.949999988079071,
+0.550000011920929,
+0.41495615243911743,
+0.01495614368468523,
+0.9850438833236694,
+0.5850438475608826,
+0.34644660353660583,
+0.12322330474853516,
+1.0535533428192139,
+0.47677668929100037,
+0.5232232809066772,
+-0.05355339124798775,
+0.8767766952514648,
+0.6535533666610718,
+0.26698729395866394,
+0.1556624323129654,
+1.1330126523971558,
+0.4443375766277313,
+0.5556624531745911,
+-0.13301269710063934,
+0.8443375825881958,
+0.7330126762390137,
+0.6499999761581421,
+0.05000000074505806,
+1.149999976158142,
+0.550000011920929,
+0.6149561405181885,
+0.01495614368468523,
+1.1850438117980957,
+0.5850438475608826,
+0.5464466214179993,
+0.12322330474853516,
+1.2535533905029297,
+0.47677668929100037,
+0.7232233285903931,
+-0.05355339124798775,
+1.0767767429351807,
+0.6535533666610718,
+0.4669873118400574,
+0.1556624323129654,
+1.3330127000808716,
+0.4443375766277313,
+0.7556624412536621,
+-0.13301269710063934,
+1.044337511062622,
+0.7330126762390137,
+-0.15000000596046448,
+0.25,
+0.3499999940395355,
+0.75,
+-0.18504385650157928,
+0.2149561494588852,
+0.3850438594818115,
+0.7850438356399536,
+-0.2535533905029297,
+0.3232232928276062,
+0.45355337858200073,
+0.6767767071723938,
+-0.07677669823169708,
+0.1464466154575348,
+0.2767767012119293,
+0.8535534143447876,
+-0.3330127000808716,
+0.35566243529319763,
+0.5330126881599426,
+0.6443375945091248,
+-0.04433756694197655,
+0.0669872984290123,
+0.24433757364749908,
+0.9330127239227295,
+0.05000000074505806,
+0.25,
+0.550000011920929,
+0.75,
+0.01495614368468523,
+0.2149561494588852,
+0.5850438475608826,
+0.7850438356399536,
+-0.05355339124798775,
+0.3232232928276062,
+0.6535533666610718,
+0.6767767071723938,
+0.12322330474853516,
+0.1464466154575348,
+0.47677668929100037,
+0.8535534143447876,
+-0.13301269710063934,
+0.35566243529319763,
+0.7330126762390137,
+0.6443375945091248,
+0.1556624323129654,
+0.0669872984290123,
+0.4443375766277313,
+0.9330127239227295,
+0.25,
+0.25,
+0.75,
+0.75,
+0.2149561494588852,
+0.2149561494588852,
+0.7850438356399536,
+0.7850438356399536,
+0.1464466154575348,
+0.3232232928276062,
+0.8535534143447876,
+0.6767767071723938,
+0.3232232928276062,
+0.1464466154575348,
+0.6767767071723938,
+0.8535534143447876,
+0.0669872984290123,
+0.35566243529319763,
+0.9330127239227295,
+0.6443375945091248,
+0.35566243529319763,
+0.0669872984290123,
+0.6443375945091248,
+0.9330127239227295,
+0.44999998807907104,
+0.25,
+0.949999988079071,
+0.75,
+0.41495615243911743,
+0.2149561494588852,
+0.9850438833236694,
+0.7850438356399536,
+0.34644660353660583,
+0.3232232928276062,
+1.0535533428192139,
+0.6767767071723938,
+0.5232232809066772,
+0.1464466154575348,
+0.8767766952514648,
+0.8535534143447876,
+0.26698729395866394,
+0.35566243529319763,
+1.1330126523971558,
+0.6443375945091248,
+0.5556624531745911,
+0.0669872984290123,
+0.8443375825881958,
+0.9330127239227295,
+0.6499999761581421,
+0.25,
+1.149999976158142,
+0.75,
+0.6149561405181885,
+0.2149561494588852,
+1.1850438117980957,
+0.7850438356399536,
+0.5464466214179993,
+0.3232232928276062,
+1.2535533905029297,
+0.6767767071723938,
+0.7232233285903931,
+0.1464466154575348,
+1.0767767429351807,
+0.8535534143447876,
+0.4669873118400574,
+0.35566243529319763,
+1.3330127000808716,
+0.6443375945091248,
+0.7556624412536621,
+0.0669872984290123,
+1.044337511062622,
+0.9330127239227295,
+-0.15000000596046448,
+0.44999998807907104,
+0.3499999940395355,
+0.949999988079071,
+-0.18504385650157928,
+0.41495615243911743,
+0.3850438594818115,
+0.9850438833236694,
+-0.2535533905029297,
+0.5232232809066772,
+0.45355337858200073,
+0.8767766952514648,
+-0.07677669823169708,
+0.34644660353660583,
+0.2767767012119293,
+1.0535533428192139,
+-0.3330127000808716,
+0.5556624531745911,
+0.5330126881599426,
+0.8443375825881958,
+-0.04433756694197655,
+0.26698729395866394,
+0.24433757364749908,
+1.1330126523971558,
+0.05000000074505806,
+0.44999998807907104,
+0.550000011920929,
+0.949999988079071,
+0.01495614368468523,
+0.41495615243911743,
+0.5850438475608826,
+0.9850438833236694,
+-0.05355339124798775,
+0.5232232809066772,
+0.6535533666610718,
+0.8767766952514648,
+0.12322330474853516,
+0.34644660353660583,
+0.47677668929100037,
+1.0535533428192139,
+-0.13301269710063934,
+0.5556624531745911,
+0.7330126762390137,
+0.8443375825881958,
+0.1556624323129654,
+0.26698729395866394,
+0.4443375766277313,
+1.1330126523971558,
+0.25,
+0.44999998807907104,
+0.75,
+0.949999988079071,
+0.2149561494588852,
+0.41495615243911743,
+0.7850438356399536,
+0.9850438833236694,
+0.1464466154575348,
+0.5232232809066772,
+0.8535534143447876,
+0.8767766952514648,
+0.3232232928276062,
+0.34644660353660583,
+0.6767767071723938,
+1.0535533428192139,
+0.0669872984290123,
+0.5556624531745911,
+0.9330127239227295,
+0.8443375825881958,
+0.35566243529319763,
+0.26698729395866394,
+0.6443375945091248,
+1.1330126523971558,
+0.44999998807907104,
+0.44999998807907104,
+0.949999988079071,
+0.949999988079071,
+0.41495615243911743,
+0.41495615243911743,
+0.9850438833236694,
+0.9850438833236694,
+0.34644660353660583,
+0.5232232809066772,
+1.0535533428192139,
+0.8767766952514648,
+0.5232232809066772,
+0.34644660353660583,
+0.8767766952514648,
+1.0535533428192139,
+0.26698729395866394,
+0.5556624531745911,
+1.1330126523971558,
+0.8443375825881958,
+0.5556624531745911,
+0.26698729395866394,
+0.8443375825881958,
+1.1330126523971558,
+0.6499999761581421,
+0.44999998807907104,
+1.149999976158142,
+0.949999988079071,
+0.6149561405181885,
+0.41495615243911743,
+1.1850438117980957,
+0.9850438833236694,
+0.5464466214179993,
+0.5232232809066772,
+1.2535533905029297,
+0.8767766952514648,
+0.7232233285903931,
+0.34644660353660583,
+1.0767767429351807,
+1.0535533428192139,
+0.4669873118400574,
+0.5556624531745911,
+1.3330127000808716,
+0.8443375825881958,
+0.7556624412536621,
+0.26698729395866394,
+1.044337511062622,
+1.1330126523971558,
+-0.15000000596046448,
+0.6499999761581421,
+0.3499999940395355,
+1.149999976158142,
+-0.18504385650157928,
+0.6149561405181885,
+0.3850438594818115,
+1.1850438117980957,
+-0.2535533905029297,
+0.7232233285903931,
+0.45355337858200073,
+1.0767767429351807,
+-0.07677669823169708,
+0.5464466214179993,
+0.2767767012119293,
+1.2535533905029297,
+-0.3330127000808716,
+0.7556624412536621,
+0.5330126881599426,
+1.044337511062622,
+-0.04433756694197655,
+0.4669873118400574,
+0.24433757364749908,
+1.3330127000808716,
+0.05000000074505806,
+0.6499999761581421,
+0.550000011920929,
+1.149999976158142,
+0.01495614368468523,
+0.6149561405181885,
+0.5850438475608826,
+1.1850438117980957,
+-0.05355339124798775,
+0.7232233285903931,
+0.6535533666610718,
+1.0767767429351807,
+0.12322330474853516,
+0.5464466214179993,
+0.47677668929100037,
+1.2535533905029297,
+-0.13301269710063934,
+0.7556624412536621,
+0.7330126762390137,
+1.044337511062622,
+0.1556624323129654,
+0.4669873118400574,
+0.4443375766277313,
+1.3330127000808716,
+0.25,
+0.6499999761581421,
+0.75,
+1.149999976158142,
+0.2149561494588852,
+0.6149561405181885,
+0.7850438356399536,
+1.1850438117980957,
+0.1464466154575348,
+0.7232233285903931,
+0.8535534143447876,
+1.0767767429351807,
+0.3232232928276062,
+0.5464466214179993,
+0.6767767071723938,
+1.2535533905029297,
+0.0669872984290123,
+0.7556624412536621,
+0.9330127239227295,
+1.044337511062622,
+0.35566243529319763,
+0.4669873118400574,
+0.6443375945091248,
+1.3330127000808716,
+0.44999998807907104,
+0.6499999761581421,
+0.949999988079071,
+1.149999976158142,
+0.41495615243911743,
+0.6149561405181885,
+0.9850438833236694,
+1.1850438117980957,
+0.34644660353660583,
+0.7232233285903931,
+1.0535533428192139,
+1.0767767429351807,
+0.5232232809066772,
+0.5464466214179993,
+0.8767766952514648,
+1.2535533905029297,
+0.26698729395866394,
+0.7556624412536621,
+1.1330126523971558,
+1.044337511062622,
+0.5556624531745911,
+0.4669873118400574,
+0.8443375825881958,
+1.3330127000808716,
+0.6499999761581421,
+0.6499999761581421,
+1.149999976158142,
+1.149999976158142,
+0.6149561405181885,
+0.6149561405181885,
+1.1850438117980957,
+1.1850438117980957,
+0.5464466214179993,
+0.7232233285903931,
+1.2535533905029297,
+1.0767767429351807,
+0.7232233285903931,
+0.5464466214179993,
+1.0767767429351807,
+1.2535533905029297,
+0.4669873118400574,
+0.7556624412536621,
+1.3330127000808716,
+1.044337511062622,
+0.7556624412536621,
+0.4669873118400574,
+1.044337511062622,
+1.3330127000808716,
+-0.15833333134651184,
+-0.15833333134651184,
+0.49166667461395264,
+0.49166667461395264,
+-0.19388845562934875,
+-0.19388845562934875,
+0.5272217988967896,
+0.5272217988967896,
+-0.2929527461528778,
+-0.0631430372595787,
+0.6262860894203186,
+0.3964763581752777,
+-0.0631430372595787,
+-0.2929527461528778,
+0.3964763581752777,
+0.6262860894203186,
+-0.3962498605251312,
+-0.020972169935703278,
+0.729583203792572,
+0.3543055057525635,
+-0.020972169935703278,
+-0.3962498605251312,
+0.3543055057525635,
+0.729583203792572,
+0.17499999701976776,
+-0.15833333134651184,
+0.824999988079071,
+0.49166667461395264,
+0.13944487273693085,
+-0.19388845562934875,
+0.860555112361908,
+0.5272217988967896,
+0.04038059338927269,
+-0.0631430372595787,
+0.959619402885437,
+0.3964763581752777,
+0.2701902985572815,
+-0.2929527461528778,
+0.7298097014427185,
+0.6262860894203186,
+-0.06291650980710983,
+-0.020972169935703278,
+1.0629165172576904,
+0.3543055057525635,
+0.3123611509799957,
+-0.3962498605251312,
+0.6876388192176819,
+0.729583203792572,
+0.5083333253860474,
+-0.15833333134651184,
+1.1583333015441895,
+0.49166667461395264,
+0.47277820110321045,
+-0.19388845562934875,
+1.1938884258270264,
+0.5272217988967896,
+0.3737139403820038,
+-0.0631430372595787,
+1.2929527759552002,
+0.3964763581752777,
+0.6035236120223999,
+-0.2929527461528778,
+1.063143014907837,
+0.6262860894203186,
+0.27041682600975037,
+-0.020972169935703278,
+1.3962498903274536,
+0.3543055057525635,
+0.6456944942474365,
+-0.3962498605251312,
+1.0209721326828003,
+0.729583203792572,
+-0.15833333134651184,
+0.17499999701976776,
+0.49166667461395264,
+0.824999988079071,
+-0.19388845562934875,
+0.13944487273693085,
+0.5272217988967896,
+0.860555112361908,
+-0.2929527461528778,
+0.2701902985572815,
+0.6262860894203186,
+0.7298097014427185,
+-0.0631430372595787,
+0.04038059338927269,
+0.3964763581752777,
+0.959619402885437,
+-0.3962498605251312,
+0.3123611509799957,
+0.729583203792572,
+0.6876388192176819,
+-0.020972169935703278,
+-0.06291650980710983,
+0.3543055057525635,
+1.0629165172576904,
+0.17499999701976776,
+0.17499999701976776,
+0.824999988079071,
+0.824999988079071,
+0.13944487273693085,
+0.13944487273693085,
+0.860555112361908,
+0.860555112361908,
+0.04038059338927269,
+0.2701902985572815,
+0.959619402885437,
+0.7298097014427185,
+0.2701902985572815,
+0.04038059338927269,
+0.7298097014427185,
+0.959619402885437,
+-0.06291650980710983,
+0.3123611509799957,
+1.0629165172576904,
+0.6876388192176819,
+0.3123611509799957,
+-0.06291650980710983,
+0.6876388192176819,
+1.0629165172576904,
+0.5083333253860474,
+0.17499999701976776,
+1.1583333015441895,
+0.824999988079071,
+0.47277820110321045,
+0.13944487273693085,
+1.1938884258270264,
+0.860555112361908,
+0.3737139403820038,
+0.2701902985572815,
+1.2929527759552002,
+0.7298097014427185,
+0.6035236120223999,
+0.04038059338927269,
+1.063143014907837,
+0.959619402885437,
+0.27041682600975037,
+0.3123611509799957,
+1.3962498903274536,
+0.6876388192176819,
+0.6456944942474365,
+-0.06291650980710983,
+1.0209721326828003,
+1.0629165172576904,
+-0.15833333134651184,
+0.5083333253860474,
+0.49166667461395264,
+1.1583333015441895,
+-0.19388845562934875,
+0.47277820110321045,
+0.5272217988967896,
+1.1938884258270264,
+-0.2929527461528778,
+0.6035236120223999,
+0.6262860894203186,
+1.063143014907837,
+-0.0631430372595787,
+0.3737139403820038,
+0.3964763581752777,
+1.2929527759552002,
+-0.3962498605251312,
+0.6456944942474365,
+0.729583203792572,
+1.0209721326828003,
+-0.020972169935703278,
+0.27041682600975037,
+0.3543055057525635,
+1.3962498903274536,
+0.17499999701976776,
+0.5083333253860474,
+0.824999988079071,
+1.1583333015441895,
+0.13944487273693085,
+0.47277820110321045,
+0.860555112361908,
+1.1938884258270264,
+0.04038059338927269,
+0.6035236120223999,
+0.959619402885437,
+1.063143014907837,
+0.2701902985572815,
+0.3737139403820038,
+0.7298097014427185,
+1.2929527759552002,
+-0.06291650980710983,
+0.6456944942474365,
+1.0629165172576904,
+1.0209721326828003,
+0.3123611509799957,
+0.27041682600975037,
+0.6876388192176819,
+1.3962498903274536,
+0.5083333253860474,
+0.5083333253860474,
+1.1583333015441895,
+1.1583333015441895,
+0.47277820110321045,
+0.47277820110321045,
+1.1938884258270264,
+1.1938884258270264,
+0.3737139403820038,
+0.6035236120223999,
+1.2929527759552002,
+1.063143014907837,
+0.6035236120223999,
+0.3737139403820038,
+1.063143014907837,
+1.2929527759552002,
+0.27041682600975037,
+0.6456944942474365,
+1.3962498903274536,
+1.0209721326828003,
+0.6456944942474365,
+0.27041682600975037,
+1.0209721326828003,
+1.3962498903274536,
+-0.15000000596046448,
+-0.15000000596046448,
+0.6499999761581421,
+0.6499999761581421,
+-0.18588989973068237,
+-0.18588989973068237,
+0.6858898997306824,
+0.6858898997306824,
+-0.3156854212284088,
+-0.03284271061420441,
+0.8156854510307312,
+0.5328426957130432,
+-0.03284271061420441,
+-0.3156854212284088,
+0.5328426957130432,
+0.8156854510307312,
+-0.44282031059265137,
+0.019059892743825912,
+0.9428203105926514,
+0.4809401035308838,
+0.019059892743825912,
+-0.44282031059265137,
+0.4809401035308838,
+0.9428203105926514,
+0.3499999940395355,
+-0.15000000596046448,
+1.149999976158142,
+0.6499999761581421,
+0.3141101002693176,
+-0.18588989973068237,
+1.1858898401260376,
+0.6858898997306824,
+0.1843145787715912,
+-0.03284271061420441,
+1.3156853914260864,
+0.5328426957130432,
+0.4671572744846344,
+-0.3156854212284088,
+1.032842755317688,
+0.8156854510307312,
+0.05717967823147774,
+0.019059892743825912,
+1.4428203105926514,
+0.4809401035308838,
+0.5190598964691162,
+-0.44282031059265137,
+0.9809401035308838,
+0.9428203105926514,
+-0.15000000596046448,
+0.3499999940395355,
+0.6499999761581421,
+1.149999976158142,
+-0.18588989973068237,
+0.3141101002693176,
+0.6858898997306824,
+1.1858898401260376,
+-0.3156854212284088,
+0.4671572744846344,
+0.8156854510307312,
+1.032842755317688,
+-0.03284271061420441,
+0.1843145787715912,
+0.5328426957130432,
+1.3156853914260864,
+-0.44282031059265137,
+0.5190598964691162,
+0.9428203105926514,
+0.9809401035308838,
+0.019059892743825912,
+0.05717967823147774,
+0.4809401035308838,
+1.4428203105926514,
+0.3499999940395355,
+0.3499999940395355,
+1.149999976158142,
+1.149999976158142,
+0.3141101002693176,
+0.3141101002693176,
+1.1858898401260376,
+1.1858898401260376,
+0.1843145787715912,
+0.4671572744846344,
+1.3156853914260864,
+1.032842755317688,
+0.4671572744846344,
+0.1843145787715912,
+1.032842755317688,
+1.3156853914260864,
+0.05717967823147774,
+0.5190598964691162,
+1.4428203105926514,
+0.9809401035308838,
+0.5190598964691162,
+0.05717967823147774,
+0.9809401035308838,
+1.4428203105926514,
+0.02500000037252903,
+0.02500000037252903,
+0.9750000238418579,
+0.9750000238418579,
+0.012660282664000988,
+0.012660282664000988,
+0.9873397350311279,
+0.9873397350311279,
+-0.17175143957138062,
+0.1641242802143097,
+1.1717514991760254,
+0.8358757495880127,
+0.1641242802143097,
+-0.17175143957138062,
+0.8358757495880127,
+1.1717514991760254,
+-0.3227241337299347,
+0.2257586270570755,
+1.3227241039276123,
+0.7742413878440857,
+0.2257586270570755,
+-0.3227241337299347,
+0.7742413878440857,
+1.3227241039276123,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+0.10000000149011612,
+0.10000000149011612,
+0.20000000298023224,
+0.20000000298023224,
+};

+ 432 - 0
test/face_detect/process.c

@@ -0,0 +1,432 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#include "process.h"
+
+#include "io.h"
+
+#define LINEAR_INTERPOLATION(l_value, r_value, coff) \
+  ({ (1 - (coff)) * (l_value) + (coff) * (r_value); })
+
+/******************************************************************************
+ *                                                                            *
+ *                      Static Functions                                      *
+ *                                                                            *
+ * ***************************************************************************/
+
+/*!
+ * \brief Clip data to range: [v_min, v_max]
+ *
+ * \param data The value will be clip
+ * \param v_min The left boundary
+ * \param v_max The right boundary
+ * \return The clipped value
+ *
+ */
+static float _clip(float data, float v_min, float v_max) {
+  data = data >= v_min ? data : v_min;
+  data = data <= v_max ? data : v_max;
+  return data;
+}
+
+/*!
+ * \brief Get data from tensor file or text file.
+ * Note that: Only One data in a line in the file.
+ *
+ * \param filename The file path, the suffix is .tensor or .txt
+ * \param size The number of data items
+ *
+ */
+static float* _get_data_from_file(const char* filename, uint32_t size) {
+  uint32_t j;
+  float fval = 0.0;
+  float* buffer = NULL;
+  FILE* fp = fopen(filename, "rb");
+  if (fp == NULL) {
+    printf("Invalid input file: %s\n", filename);
+    return NULL;
+  }
+
+  buffer = malloc(size * sizeof(float));
+  if (buffer == NULL) {
+    printf("Malloc fail\n");
+    return NULL;
+  }
+  for (j = 0; j < size; j++) {
+    if (fscanf(fp, "%f ", &fval) != 1) {
+      printf("Invalid input file\n");
+      return NULL;
+    } else {
+      buffer[j] = fval;
+    }
+  }
+
+  fclose(fp);
+  return buffer;
+}
+
+/*!
+ * \brief Obain the number of pixels in given image.
+ *
+ *  \param img The object of struct image_data
+ *  \return The number of pixels
+ */
+uint32_t get_size(struct image_data img) {
+  uint32_t i;
+  uint32_t sz = 1;
+  for (i = 0; i < img.dim; i++) {
+    sz *= img.shape[i];
+  }
+  return sz;
+}
+
+/*!
+ * \brief Get Value of image at (h_idx, w_idx, c_idx)
+ *
+ * \param img The pointer of struct image_data
+ * \param h_idx The index value of the point's height
+ * \param w_idx The index value of the point's width
+ * \param c_idx The index value of the point's channel
+ * \return The pixel value of image at (h_idx, w_idx, c_idx)
+ *
+ */
+float get_value(struct image_data img, uint32_t h_idx, uint32_t w_idx, uint32_t c_idx) {
+  int32_t height = img.shape[0];
+  int32_t width = img.shape[1];
+  int32_t channel = img.shape[2];
+  if (h_idx < 0 || h_idx >= height || w_idx < 0 || w_idx >= width || c_idx < 0 ||
+      c_idx >= channel) {
+    printf("Invalid shape index! (%d, %d, %d)\n", h_idx, w_idx, c_idx);
+    exit(1);
+  }
+  uint32_t idx = h_idx * (width * channel) + w_idx * channel + c_idx;
+  return img.data[idx];
+}
+
+/*!
+ * \brief Get the data of the specified file
+ * Generally, the data obtained from tensor file can be directly used for model
+ * inference while the data obtained from image file needs further preprocessing.
+ *
+ * \param filename The path of data file
+ * \param size The expected number of data. If the file is image, this param will
+ *  be ignored
+ * \return The object struct image_data that contain the loaded image data
+ */
+struct image_data* get_input_data(const char* filename, uint32_t size) {
+  enum file_type type;
+  struct image_data* img = calloc(1, sizeof(struct image_data));
+  type = get_file_type(filename);
+  if (type == FILE_TENSOR) {
+    // read data from tensor or txt file.
+    img->data = _get_data_from_file(filename, size);
+  } else if (type == FILE_BIN) {
+    img->data = (float*)get_binary_from_file(filename, NULL);
+  } else {
+    free(img);
+    return NULL;
+  }
+  return img;
+}
+
+void free_image_data(struct image_data* img) {
+  if (img->shape) {
+    free(img->shape);
+  }
+  free(img);
+}
+
+/*!
+ * \brief Substract mean values(RGB). If the channel of data is 1, then use
+ * r_mean only.
+ *
+ * \param img The pointer of struct image_data
+ * \param r_mean The mean value of r-channel in img->data
+ * \param g_mean The mean value of g-channel in img->data that will be ignored
+ *               if the dim of original image's channel is 1
+ * \param b_mean The mean value of b-channel in img->data that will be ignored
+ *               if the dim of original image's channel is 1
+ */
+void sub_mean(struct image_data* img, float r_mean, float g_mean, float b_mean) {
+  uint32_t sz, channel;
+  uint32_t idx;
+
+  channel = img->shape[2];
+  if (channel != 1 && channel != 3) {
+    printf("Don't know how to sub mean with channel=%d\n", channel);
+    exit(1);
+  }
+  sz = get_size(*img);
+  for (idx = 0; idx < sz; idx += channel) {
+    if (channel == 1) {
+      img->data[idx] -= r_mean;
+    } else {
+      img->data[idx + 0] -= r_mean;
+      img->data[idx + 1] -= g_mean;
+      img->data[idx + 2] -= b_mean;
+    }
+  }
+}
+
+/*!
+ * \brief Scale the image data with specified value.
+ *
+ * \param img The pointer of struct image_data
+ * \param scale All the data in image will be multiplied by this value
+ */
+void data_scale(struct image_data* img, float scale) {
+  uint32_t idx;
+  for (idx = 0; idx < get_size(*img); idx++) {
+    img->data[idx] *= scale;
+  }
+}
+
+/**
+ * \brief Crop the image data with specified shape, using central crop method.
+ *
+ * \param img The pointer of struct image_data
+ * \param height crop the height of data by height value
+ * \param width crop the width of data by width value
+ *
+ */
+void data_crop(struct image_data* img, uint32_t height, uint32_t width) {
+  uint32_t ori_width, ori_height, ori_channel;
+  uint32_t row, col, c;
+  uint32_t start_row, start_col;
+
+  if (img->shape[0] == height && img->shape[1] == width) {
+    return;
+  }
+
+  ori_height = img->shape[0];
+  ori_width = img->shape[1];
+  ori_channel = img->shape[2];
+
+  if (width > ori_width || height > ori_height) {
+    printf("Can not crop data by (%d, %d)\n", height, width);
+    exit(1);
+  }
+  float* new_data = (float*)malloc(sizeof(float) * (height * width * ori_channel));  // NOLINT
+
+  start_row = ori_height / 2 - height / 2;
+  start_col = ori_width / 2 - width / 2;
+  for (row = 0; row < height; row++) {
+    for (col = 0; col < width; col++) {
+      for (c = 0; c < ori_channel; c++) {
+        new_data[row * (width * ori_channel) + col * ori_channel + c] =
+            get_value(*img, start_row + row, start_col + col, c);
+      }
+    }
+  }
+  free(img->data);
+  img->data = new_data;
+  img->shape[0] = height;
+  img->shape[1] = width;
+}
+
+/*!
+ * \brief Resize the image into target image size with bilinear interpolation method.
+ *
+ *            |                   |                  |
+ *            |                   |                  |
+ *  ---p00(srcY_i, srcX_i)--------f1------p01(srcY_i, srcX_i+1)-----
+ *            |                   |                  |
+ *            |   p(srcY_i+h_coff, srcX_i+w_coff)    |
+ *            |                   |                  |
+ *  ---p10(srcY_i+1, srcX_i)------f2-------p11(srcY_i+1, srcY_i)-----
+ *            |                   |                  |
+ *            |                   |                  |
+ *
+ * srcX(or srcY) can be got by:
+ *      src = (dst + 0.5) * scale - 0.5
+ * and
+ *      coff = src - floor(src) which denotes the weight in single Linear interpolation.
+ * Finaly, we can get the value as following:
+ *      f1 = p00 * (1-coff1) + coff1 * p01
+ *      f2 = p10 * (1-coff1) + coff1 * p11
+ *      p = f1 * (1-coff2) + coff2 * f2
+ *
+ * \param img The pointer of struct image_data, which denote the image data that will
+ *              be resized.
+ * \param dst_height The height of image after resizing it.
+ * \param dst_widht The width of image after resize it.
+ *
+ */
+void imresize(struct image_data* img, uint32_t dst_height, uint32_t dst_width) {
+  uint32_t srcX, srcY, dstX, dstY;
+  float srcX_f, srcY_f;  // float index
+  int srcX_i, srcY_i;    // integer index
+  float w_coff, h_coff;
+  float scaleX = (float)img->shape[1] / (float)dst_width;   // NOLINT
+  float scaleY = (float)img->shape[0] / (float)dst_height;  // NOLINT
+
+  float up_left, bottom_left, up_right, bottom_right;
+  uint32_t c;  // index of channel
+  float f1, f2;
+  float* resized_data;
+
+  if (img->shape[0] == dst_height && img->shape[1] == dst_width) {
+    return;
+  }
+  resized_data =
+      (float*)malloc(sizeof(float) * (dst_height * dst_width * img->shape[2]));  // NOLINT
+  for (dstY = 0; dstY < dst_height; dstY++) {
+    for (dstX = 0; dstX < dst_width; dstX++) {
+      // Get the mapping position of the current point in the original image
+      srcX_f = ((float)dstX + 0.5) * scaleX - 0.5;  // NOLINT
+      srcY_f = ((float)dstY + 0.5) * scaleY - 0.5;  // NOLINT
+      // Get weight in interpolation
+      w_coff = srcX_f - floor(srcX_f);
+      h_coff = srcY_f - floor(srcY_f);
+
+      srcX_i = floor(srcX_f);
+      srcY_i = floor(srcY_f);
+
+      for (c = 0; c < img->shape[2]; c++) {
+        // Get the pixel values of four points around
+        up_left = get_value(*img, _clip(srcY_i, 0, img->shape[0] - 1),
+                            _clip(srcX_i, 0, img->shape[1] - 1), c);
+        up_right = get_value(*img, _clip(srcY_i, 0, img->shape[0] - 1),
+                             _clip(srcX_i + 1, 0, img->shape[1] - 1), c);
+        bottom_left = get_value(*img, _clip(srcY_i + 1, 0, img->shape[0] - 1),
+                                _clip(srcX_i, 0, img->shape[1] - 1), c);
+        bottom_right = get_value(*img, _clip(srcY_i + 1, 0, img->shape[0] - 1),
+                                 _clip(srcX_i + 1, 0, img->shape[1] - 1), c);
+
+        // Horizontal linear interpolation
+        f1 = LINEAR_INTERPOLATION(up_left, up_right, w_coff);
+        f2 = LINEAR_INTERPOLATION(bottom_left, bottom_right, w_coff);
+        // Vertical linear interpolation
+        resized_data[dstY * (dst_width * img->shape[2]) + dstX * img->shape[2] + c] =
+            LINEAR_INTERPOLATION(f1, f2, h_coff);
+      }
+    }
+  }
+  // Updata data in place
+  free(img->data);
+  img->data = NULL;
+  img->shape[0] = dst_height;
+  img->shape[1] = dst_width;
+  img->data = resized_data;
+}
+
+/*!
+ * \brief Convert image from RGB to BGR.
+ *
+ * \param img The pointer of struct image_data
+ */
+void imrgb2bgr(struct image_data* img) {
+  uint32_t idx;
+  float tmp;
+  if (img->dim != 3) {
+    printf("Invalid dim: %d\n", img->dim);
+    return;
+  }
+  if (img->shape[2] == 1) {
+    return;
+  } else if (img->shape[2] != 3) {
+    printf("Invalid channel: %d\n", img->shape[2]);
+    return;
+  } else {
+    for (idx = 0; idx < get_size(*img); idx += 3) {
+      tmp = img->data[idx];
+      img->data[idx] = img->data[idx + 2];
+      img->data[idx + 2] = tmp;
+    }
+  }
+}
+
+/*!
+ * \brief Convert image data from HWC to CHW.
+ *
+ * \param img The pointer of struct image_data
+ *
+ */
+void imhwc2chw(struct image_data* img) {
+  uint32_t row, col, channel;
+  float* transposed_data = NULL;
+  uint32_t H, W, C;
+  if (img->dim != 3) {
+    printf("Invalid dim: %d\n", img->dim);
+    return;
+  }
+  H = img->shape[0];
+  W = img->shape[1];
+  C = img->shape[2];
+  transposed_data = (float*)malloc(sizeof(float) * get_size(*img));  // NOLINT
+  for (channel = 0; channel < C; channel++) {
+    for (row = 0; row < H; row++) {
+      for (col = 0; col < W; col++) {
+        transposed_data[channel * (H * W) + row * W + col] = get_value(*img, row, col, channel);
+      }
+    }
+  }
+  // Updata image data
+  free(img->data);
+  img->data = transposed_data;
+  img->shape[0] = C;
+  img->shape[1] = H;
+  img->shape[2] = W;
+}
+
+/*!
+ * \brief Convert non-RGB data to rgb data.
+ * For example, the shape of gray image data is (h ,w, 1) and the shape of
+ * RGBA image data is (h, w, 4), all of these image data should be convert
+ * to (h, w, 3) if neccesary.
+ *
+ * \param img The pointer of struct image_data
+ *
+ */
+void im2rgb(struct image_data* img) {
+  uint32_t idx, cnt = 0;
+  float* new_data = NULL;
+  uint32_t new_size, ori_size;
+  uint32_t ori_channel;
+
+  ori_channel = img->shape[2];
+  if (ori_channel == 3) {
+    return;
+  }
+  if (ori_channel == 2 || ori_channel > 4) {
+    printf("Invalid dim: %d\n", ori_channel);
+    exit(1);
+  }
+  ori_size = get_size(*img);
+  new_size = img->shape[0] * img->shape[1] * 3;
+  new_data = (float*)malloc(sizeof(float) * new_size);  // NOLINT
+
+  for (idx = 0; idx < ori_size; idx++) {
+    if (ori_channel == 1) {
+      new_data[idx * 3 + 0] = img->data[idx];
+      new_data[idx * 3 + 1] = img->data[idx];
+      new_data[idx * 3 + 2] = img->data[idx];
+    } else if (ori_channel == 4) {
+      if ((idx + 1) % 4 == 0) continue;
+      new_data[cnt] = img->data[idx];
+      cnt++;
+    }
+  }
+
+  free(img->data);
+  img->data = new_data;
+  img->shape[2] = 3;
+}

+ 55 - 0
test/face_detect/process.h

@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* auto generate by HHB_VERSION "2.0.21" */
+
+#ifndef PROCESS_H_
+#define PROCESS_H_
+
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "io.h"
+
+struct image_data {
+  float* data;      // the data of image
+  uint32_t* shape;  // the shape of image, default to HWC
+  uint32_t dim;     // the number of shape
+};
+
+/* Utils to process image data*/
+uint32_t get_size(struct image_data img);
+float get_value(struct image_data img, uint32_t h_idx, uint32_t w_idx, uint32_t c_idx);
+struct image_data* get_input_data(const char* filename, uint32_t size);
+void free_image_data(struct image_data* img);
+
+void sub_mean(struct image_data* img, float b_mean, float g_mean, float r_mean);
+void data_scale(struct image_data* img, float scale);
+void data_crop(struct image_data* img, uint32_t height, uint32_t width);
+
+/* Main image processing operators */
+void imresize(struct image_data* img, uint32_t dst_height, uint32_t dst_width);
+void imrgb2bgr(struct image_data* img);
+void imhwc2chw(struct image_data* img);
+void im2rgb(struct image_data* img);
+
+#endif  // PROCESS_H_