Browse Source

Linux_SDK_V0.9.5

thead_admin 1 year ago
commit
67762bd871
14 changed files with 2579 additions and 0 deletions
  1. 28 0
      .gitignore
  2. 116 0
      Makefile
  3. 3 0
      README.md
  4. 47 0
      driver/Makefile
  5. 135 0
      driver/rsvmem_pool.c
  6. 35 0
      driver/rsvmem_pool.h
  7. 1539 0
      driver/video_memory.c
  8. 62 0
      driver/video_memory.h
  9. 16 0
      lib/Makefile
  10. 289 0
      lib/video_mem.c
  11. 64 0
      lib/video_mem.h
  12. 70 0
      test/Makefile
  13. BIN
      test/vidalloc_test
  14. 175 0
      test/video_memory_test.c

+ 28 - 0
.gitignore

@@ -0,0 +1,28 @@
+/.auto.deps
+/.config.cmd
+/.config.old
+/..config.tmp
+/.config
+/.vscode
+*.tmp
+*.depend
+*.o
+*.a
+*.o.d
+*.o.cmd
+*.a.cmd
+*.ko.cmd
+*.ko
+*.mod
+*.mod.c
+*.mod.cmd
+*.symvers.cmd
+*.order.cmd
+*.order
+*.orig
+Module.symvers
+out/
+output/
+obj/
+dependencies/
+test/vidmem_test

+ 116 - 0
Makefile

@@ -0,0 +1,116 @@
+##
+ # Copyright (C) 2020 Alibaba Group Holding Limited
+##
+ifneq ($(wildcard ../.param),)
+  include ../.param
+endif
+
+#CONFIG_DEBUG_MODE=1
+CONFIG_OUT_ENV=hwlinux
+
+CONFIG_BUILD_DRV_EXTRA_PARAM:=""    	
+CONFIG_BUILD_LIB_EXTRA_PARAM:=""
+CONFIG_BUILD_TST_EXTRA_PARAM:=""	
+
+DIR_TARGET_BASE=bsp/vidmem		
+DIR_TARGET_KO  =bsp/vidmem/ko
+DIR_TARGET_TEST=bsp/vidmem/test
+
+MODULE_NAME=vidmem
+BUILD_LOG_START="\033[47;30m>>> $(MODULE_NAME) $@ begin\033[0m"
+BUILD_LOG_END  ="\033[47;30m<<< $(MODULE_NAME) $@ end\033[0m"
+
+#
+# Do a parallel build with multiple jobs, based on the number of CPUs online
+# in this system: 'make -j8' on a 8-CPU system, etc.
+#
+# (To override it, run 'make JOBS=1' and similar.)
+#
+
+ifeq ($(JOBS),)
+  JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null)
+  ifeq ($(JOBS),)
+    JOBS := 1
+  endif
+endif
+
+all:    info driver lib test install_local_output install_rootfs
+.PHONY: info driver lib test install_local_output install_rootfs \
+        install_prepare install_addons clean_driver clean_test clean_output clean
+
+info:
+	@echo $(BUILD_LOG_START)
+	@echo "  ====== Build Info from repo project ======"
+	@echo "    BUILDROOT_DIR="$(BUILDROOT_DIR)
+	@echo "    CROSS_COMPILE="$(CROSS_COMPILE)
+	@echo "    LINUX_DIR="$(LINUX_DIR)
+	@echo "    ARCH="$(ARCH)
+	@echo "    BOARD_NAME="$(BOARD_NAME)
+	@echo "    KERNEL_ID="$(KERNELVERSION)
+	@echo "    KERNEL_DIR="$(LINUX_DIR)
+	@echo "    INSTALL_DIR_ROOTFS="$(INSTALL_DIR_ROOTFS)
+	@echo "    INSTALL_DIR_SDK="$(INSTALL_DIR_SDK)
+	@echo "  ====== Build configuration by settings ======"
+#	@echo "    CONFIG_DEBUG_MODE="$(CONFIG_DEBUG_MODE)
+	@echo "    CONFIG_OUT_ENV="$(CONFIG_OUT_ENV)
+	@echo "    JOBS="$(JOBS)
+	@echo $(BUILD_LOG_END)
+
+driver:
+	@echo $(BUILD_LOG_START)
+	make -C $(LINUX_DIR) M=$(PWD)/driver ARCH=$(ARCH) modules
+	@echo $(BUILD_LOG_END)
+
+clean_driver:
+	@echo $(BUILD_LOG_START)
+	make -C driver KDIR=$(LINUX_DIR) clean
+	@echo $(BUILD_LOG_END)
+
+lib:
+	@echo $(BUILD_LOG_START)
+	make -w -C lib
+	@echo $(BUILD_LOG_END)
+
+clean_lib:
+	@echo $(BUILD_LOG_START)
+	make -C lib KDIR=$(LINUX_DIR) clean
+	@echo $(BUILD_LOG_END)
+
+test: driver
+	@echo $(BUILD_LOG_START)
+	make -w -C test hwlinux
+	@echo $(BUILD_LOG_END)
+
+clean_test:
+	@echo $(BUILD_LOG_START)
+	make clean -C test
+	@echo $(BUILD_LOG_END)
+
+install_prepare:
+	mkdir -p ./output/rootfs/$(DIR_TARGET_KO)
+	mkdir -p ./output/rootfs/$(DIR_TARGET_TEST)
+
+install_addons: install_prepare
+	@echo $(BUILD_LOG_START)
+	@echo $(BUILD_LOG_END)
+
+install_local_output: driver lib test install_addons
+	@echo $(BUILD_LOG_START)
+	find ./driver -name "*.ko" | xargs -i cp -f {} ./output/rootfs/$(DIR_TARGET_KO)
+	cp -f ./test/vidmem_test ./output/rootfs/$(DIR_TARGET_TEST)
+	@if [ `command -v tree` != "" ]; then \
+	    tree ./output/rootfs;             \
+	fi
+	@echo $(BUILD_LOG_END)
+
+install_rootfs: install_local_output
+	@echo $(BUILD_LOG_START)
+	@echo $(BUILD_LOG_END)
+
+clean_output:
+	@echo $(BUILD_LOG_START)
+	rm -rf ./output
+	@echo $(BUILD_LOG_END)
+
+clean: clean_output clean_driver clean_lib clean_test
+

+ 3 - 0
README.md

@@ -0,0 +1,3 @@
+# How to build
+
+# Description of each directories

+ 47 - 0
driver/Makefile

@@ -0,0 +1,47 @@
+# Copyright 2018 VeriSilicon. All Rights Reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+# USA.
+
+DEBUG ?= n
+ifeq ($(DEBUG),y)
+  DEBFLAGS = -O -g -DVIDMEM_DEBUG
+else
+  DEBFLAGS = -O2
+endif
+
+EXTRA_CFLAGS += $(DEBFLAGS)
+
+ifneq ($(KERNELRELEASE),)
+# recursive call from kernel build system
+
+vidmem-objs := video_memory.o rsvmem_pool.o
+obj-m         += vidmem.o
+
+else
+
+#KDIR := /export/Testing/Board_Version_Control/SW_Common/SOCLE_MDK-3D/openlinux/2.6.29/v0_5/android_linux-2.6.29
+KVER := $(shell uname -r)
+KDIR := /lib/modules/$(KVER)/build
+
+PWD  := $(shell pwd)
+
+all:
+	$(MAKE) -C $(KDIR) M=$(PWD) modules
+
+endif
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions *.mod
+	rm -rf modules.order Module.symvers

+ 135 - 0
driver/rsvmem_pool.c

@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2022 Alibaba Group. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/mman.h>
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+#include "rsvmem_pool.h"
+
+/* 12 bits (4096 bytes) */
+#define GEN_POOL_ALLOC_ORDER 12
+
+static rsvmem_pool_info_t rsvmem_pool_regions[MAX_RSVMEM_REGION_COUNT];
+
+int rsvmem_pool_create(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *rsvmem_node;
+	struct resource res;
+	int pool_id = 0;
+	int ret;
+
+	if (!dev || !dev->of_node)
+		return -EINVAL;
+
+	rsvmem_node = of_parse_phandle(np, "memory-region", 0);
+	if (!rsvmem_node) {
+		dev_notice(dev, "No memory region node\n");
+		return -ENODEV;
+	}
+
+	while (pool_id < MAX_RSVMEM_REGION_COUNT &&
+	       of_address_to_resource(rsvmem_node, pool_id, &res) == 0) {
+		struct gen_pool *pool = gen_pool_create(GEN_POOL_ALLOC_ORDER, -1);
+		if (pool == NULL) {
+			dev_err(dev, "Failed to create reserved memory pool region[%d]\n", pool_id);
+			return -ENOMEM;
+		}
+
+		ret = gen_pool_add(pool, res.start, resource_size(&res), -1);
+		if (ret) {
+			dev_err(dev, "%s: gen_pool_add failed\n", __func__);
+			gen_pool_destroy(pool);
+			return ret;
+		}
+
+		rsvmem_pool_regions[pool_id].pool = pool;
+		rsvmem_pool_regions[pool_id].base = res.start;
+		rsvmem_pool_regions[pool_id].size = resource_size(&res);
+
+		dev_err(dev, "%s: rsvmem_pool_region[%d] = {pool=%px, base=0x%llx, size=0x%llx}\n",
+		        __func__, pool_id, rsvmem_pool_regions[pool_id].pool,
+		        rsvmem_pool_regions[pool_id].base, rsvmem_pool_regions[pool_id].size);
+
+		pool_id ++;
+	}
+
+	return 0;
+}
+
+void rsvmem_pool_destroy(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_RSVMEM_REGION_COUNT; i++) {
+		if (rsvmem_pool_regions[i].pool != NULL) {
+			gen_pool_destroy(rsvmem_pool_regions[i].pool);
+			memset(&rsvmem_pool_regions[i], 0, sizeof(rsvmem_pool_info_t));
+		}
+	}
+}
+
+unsigned long rsvmem_pool_alloc(int region_id, size_t size)
+{
+	struct gen_pool *pool;
+	unsigned long addr;
+
+	if (region_id < 0 || region_id >= MAX_RSVMEM_REGION_COUNT) {
+		pr_err("%s: region_id(%d) is invalid\n", __func__, region_id);
+		return 0;
+	}
+
+	pool = rsvmem_pool_regions[region_id].pool;
+	if (pool == NULL) {
+		pr_err("%s: pool region[%d] is invalid\n", __func__, region_id);
+		return 0;
+	}
+
+	addr = gen_pool_alloc(pool, size);
+	pr_debug("%s: Allocated %zu bytes from pool region[%d]: 0x%08lx\n", __func__, size, region_id, addr);
+
+	return addr;
+}
+
+void rsvmem_pool_free(int region_id, size_t size, unsigned long addr)
+{
+	struct gen_pool *pool;
+
+	if (region_id < 0 || region_id >= MAX_RSVMEM_REGION_COUNT) {
+		pr_err("%s: region_id(%d) is invalid\n", __func__, region_id);
+		return;
+	}
+
+	pool = rsvmem_pool_regions[region_id].pool;
+	if (pool == NULL) {
+		pr_err("%s: rsvmem pool region[%d] is invalid\n", __func__, region_id);
+		return;
+	}
+
+	gen_pool_free(pool, addr, size);
+}
+

+ 35 - 0
driver/rsvmem_pool.h

@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 Alibaba Group. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __RSVMEM_POOL_H_
+#define __RSVMEM_POOL_H_
+
+#define MAX_RSVMEM_REGION_COUNT 16
+typedef struct rsvmem_pool_info
+{
+    struct gen_pool *pool;   // NULL means unavalible
+    resource_size_t base;
+    resource_size_t size;
+} rsvmem_pool_info_t;
+
+int rsvmem_pool_create(struct device *dev);
+void rsvmem_pool_destroy(void);
+unsigned long rsvmem_pool_alloc(int region_id, size_t size);
+void rsvmem_pool_free(int region_id, size_t size, unsigned long addr);
+
+#endif /* __RSVMEM_POOL_H_ */

+ 1539 - 0
driver/video_memory.c

@@ -0,0 +1,1539 @@
+/*
+ * Copyright (C) 2021 - 2022  Alibaba Group. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+/****************************************************************************
+*
+*    The MIT License (MIT)
+*
+*    Copyright (c) 2014 - 2021 Vivante Corporation
+*
+*    Permission is hereby granted, free of charge, to any person obtaining a
+*    copy of this software and associated documentation files (the "Software"),
+*    to deal in the Software without restriction, including without limitation
+*    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+*    and/or sell copies of the Software, and to permit persons to whom the
+*    Software is furnished to do so, subject to the following conditions:
+*
+*    The above copyright notice and this permission notice shall be included in
+*    all copies or substantial portions of the Software.
+*
+*    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+*    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+*    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+*    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+*    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+*    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+*    DEALINGS IN THE SOFTWARE.
+*
+*****************************************************************************
+*
+*    The GPL License (GPL)
+*
+*    Copyright (C) 2014 - 2021 Vivante Corporation
+*
+*    This program is free software; you can redistribute it and/or
+*    modify it under the terms of the GNU General Public License
+*    as published by the Free Software Foundation; either version 2
+*    of the License, or (at your option) any later version.
+*
+*    This program is distributed in the hope that it will be useful,
+*    but WITHOUT ANY WARRANTY; without even the implied warranty of
+*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+*    GNU General Public License for more details.
+*
+*    You should have received a copy of the GNU General Public License
+*    along with this program; if not, write to the Free Software Foundation,
+*    Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+*
+*****************************************************************************
+*
+*    Note: This software is released under dual MIT and GPL licenses. A
+*    recipient may use this file under the terms of either the MIT license or
+*    GPL License. If you wish to use only one license not the other, you can
+*    indicate your decision by deleting one of the above license notices in your
+*    version of this file.
+*
+*****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/mman.h>
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+#include "video_memory.h"
+#include "rsvmem_pool.h"
+
+//#define VIDMEM_DMA_MAP
+#define DISCRETE_PAGES 0
+//#define VIDMEM_DEBUG
+
+
+#define IS_ERROR(status)         (status > 0)
+
+/*******************************************************************************
+**
+**  ONERROR
+**
+**      Jump to the error handler in case there is an error.
+**
+**  ASSUMPTIONS:
+**
+**      'status' variable of int type must be defined.
+**
+**  ARGUMENTS:
+**
+**      func    Function to evaluate.
+*/
+#define _ONERROR(prefix, func) \
+    do \
+    { \
+        status = func; \
+        if (IS_ERROR(status)) \
+        { \
+            goto OnError; \
+        } \
+    } \
+    while (false)
+
+#define ONERROR(func)           _ONERROR(, func)
+
+/*******************************************************************************
+**
+**  ERR_BREAK
+**
+**      Executes a break statement on error.
+**
+**  ASSUMPTIONS:
+**
+**      'status' variable of int type must be defined.
+**
+**  ARGUMENTS:
+**
+**      func    Function to evaluate.
+*/
+#define _ERR_BREAK(prefix, func){ \
+    status = func; \
+    if (IS_ERROR(status)) \
+    { \
+        break; \
+    } \
+    do { } while (false); \
+    }
+
+#define ERR_BREAK(func)         _ERR_BREAK(, func)
+
+/*******************************************************************************
+**
+**  VERIFY_ARGUMENT
+**
+**      Assert if an argument does not apply to the specified expression.  If
+**      the argument evaluates to false, EINVAL will be
+**      returned from the current function.  In retail mode this macro does
+**      nothing.
+**
+**  ARGUMENTS:
+**
+**      arg     Argument to evaluate.
+*/
+#define _VERIFY_ARGUMENT(prefix, arg) \
+    do \
+    { \
+        if (!(arg)) \
+        { \
+            return EINVAL; \
+        } \
+    } \
+    while (false)
+#define VERIFY_ARGUMENT(arg)    _VERIFY_ARGUMENT(, arg)
+
+
+#define VM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
+#define current_mm_mmap_sem current->mm->mmap_lock
+#else
+#define current_mm_mmap_sem current->mm->mmap_sem
+#endif
+
+#define GetPageCount(size, offset)     ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+
+#ifndef VIDMEM_DEBUG
+#define DEBUG_PRINT(...) \
+  do {                     \
+  } while (0)
+#else
+#undef DEBUG_PRINT
+#define DEBUG_PRINT(...) pr_info(__VA_ARGS__)
+#endif
+
+
+struct mem_block
+{
+    int contiguous;
+    size_t size;
+    size_t numPages;
+    struct dma_buf *dmabuf;
+    struct dma_buf_attachment * attachment;
+    struct sg_table           * sgt;
+    unsigned long             * pagearray;
+    struct vm_area_struct     * vma;
+    bool is_cma;
+    bool is_vi_mem;
+    void *va;
+
+    union
+    {
+        /* Pointer to a array of pages. */
+        struct
+        {
+            struct page *contiguousPages;
+            dma_addr_t dma_addr;
+            int rsvmem_pool_region_id;
+            int exact;
+        };
+
+        struct
+        {
+            /* Pointer to a array of pointers to page. */
+            struct page **nonContiguousPages;
+
+            struct page **Pages1M;
+            int numPages1M;
+            int *isExact;
+        };
+    };
+};
+
+struct mem_node
+{
+    struct mem_block memBlk;
+    unsigned long busAddr;
+    int isImported;
+    struct list_head link;
+};
+
+struct file_node
+{
+    struct list_head memList;
+    struct file *filp;
+    struct list_head link;
+};
+
+static struct list_head fileList;
+
+static int vidalloc_major = 0;
+static int vidalloc_minor = 0;
+static struct device *gdev = NULL;
+static struct cdev vidalloc_cdev;
+static dev_t vidalloc_devt;
+static struct class *vidalloc_class;
+
+static DEFINE_SPINLOCK(mem_lock);
+#if 1
+static int
+getPhysical(
+    IN struct mem_block *MemBlk,
+    IN unsigned int Offset,
+    OUT unsigned long * Physical
+    );
+
+static struct file_node * find_and_delete_file_node(struct file *filp)
+{
+    struct file_node *node;
+    struct file_node *temp;
+
+    spin_lock(&mem_lock);
+    list_for_each_entry_safe(node, temp, &fileList, link)
+    {
+        if (node->filp == filp)
+        {
+            list_del(&node->link);
+            spin_unlock(&mem_lock);
+            return node;
+        }
+    }
+    spin_unlock(&mem_lock);
+
+    return NULL;
+}
+
+static struct file_node * get_file_node(struct file *filp)
+{
+    struct file_node *node;
+
+    spin_lock(&mem_lock);
+    list_for_each_entry(node, &fileList, link)
+    {
+        if (node->filp == filp)
+        {
+            spin_unlock(&mem_lock);
+            return node;
+        }
+    }
+    spin_unlock(&mem_lock);
+
+    return NULL;
+}
+
+static struct mem_node * get_mem_node(struct file *filp, unsigned long bus_address, int imported)
+{
+    struct file_node *fnode;
+    struct mem_node *node;
+
+    fnode = get_file_node(filp);
+    if (NULL == fnode)
+    {
+        return NULL;
+    }
+
+    spin_lock(&mem_lock);
+    list_for_each_entry(node, &fnode->memList, link)
+    {
+        if (node->busAddr == bus_address && node->isImported == imported)
+        {
+            spin_unlock(&mem_lock);
+            return node;
+        }
+    }
+    spin_unlock(&mem_lock);
+
+    return NULL;
+}
+
+static int
+AllocateMemory(
+    IN size_t Bytes,
+    OUT void * * Memory
+    )
+{
+    void * memory = NULL;
+    int status = 0;
+
+    /* Verify the arguments. */
+    VERIFY_ARGUMENT(Bytes > 0);
+    VERIFY_ARGUMENT(Memory != NULL);
+
+    if (Bytes > PAGE_SIZE)
+    {
+        memory = (void *) vmalloc(Bytes);
+    }
+    else
+    {
+        memory = (void *) kmalloc(Bytes, GFP_KERNEL | __GFP_NOWARN);
+    }
+
+    if (memory == NULL)
+    {
+        /* Out of memory. */
+        ONERROR(ENOMEM);
+    }
+
+    /* Return pointer to the memory allocation. */
+    *Memory = memory;
+
+OnError:
+    /* Return the status. */
+    return status;
+}
+
+int
+static FreeMemory(
+    IN void * Memory
+    )
+{
+    /* Verify the arguments. */
+    VERIFY_ARGUMENT(Memory != NULL);
+
+    /* Free the memory from the OS pool. */
+    if (is_vmalloc_addr(Memory))
+    {
+        vfree(Memory);
+    }
+    else
+    {
+        kfree(Memory);
+    }
+
+    /* Success. */
+    return 0;
+}
+
+
+static int
+GetSGT(
+    IN struct mem_block *MemBlk,
+    IN size_t Offset,
+    IN size_t Bytes,
+    OUT void * *SGT
+    )
+{
+    struct page ** pages = NULL;
+    struct page ** tmpPages = NULL;
+    struct sg_table *sgt = NULL;
+    struct mem_block *memBlk = MemBlk;
+
+    int status = 0;
+    size_t offset = Offset & ~PAGE_MASK; /* Offset to the first page */
+    size_t skipPages = Offset >> PAGE_SHIFT;     /* skipped pages */
+    size_t numPages = (PAGE_ALIGN(Offset + Bytes) >> PAGE_SHIFT) - skipPages;
+    size_t i;
+
+    if (memBlk->contiguous)
+    {
+        DEBUG_PRINT("[vidmem] Contiguous memory, %d pages\n", numPages);
+        ONERROR(AllocateMemory(sizeof(struct page*) * numPages, (void * *)&tmpPages));
+        pages = tmpPages;
+
+        for (i = 0; i < numPages; ++i)
+        {
+            pages[i] = nth_page(memBlk->contiguousPages, i + skipPages);
+        }
+    }
+    else
+    {
+        DEBUG_PRINT("[vidmem] Non-contiguous memory, %d pages\n", numPages);
+        pages = &memBlk->nonContiguousPages[skipPages];
+    }
+
+    ONERROR(AllocateMemory(sizeof(struct sg_table) * numPages, (void * *)&sgt));
+
+    if (sg_alloc_table_from_pages(sgt, pages, numPages, offset, Bytes, GFP_KERNEL) < 0)
+    {
+        ONERROR(EPERM);
+    }
+
+    *SGT = (void *)sgt;
+
+OnError:
+    if (tmpPages)
+    {
+        FreeMemory(tmpPages);
+    }
+
+    if (IS_ERROR(status) && sgt)
+    {
+        FreeMemory(sgt);
+    }
+
+    return status;
+}
+
+static int
+Mmap(
+    IN struct mem_block *MemBlk,
+    IN size_t skipPages,
+    IN size_t numPages,
+    IN struct vm_area_struct *vma
+    )
+{
+    struct mem_block *memBlk = MemBlk;
+    int status = 0;
+
+    vma->vm_flags |= VM_FLAGS;
+
+    /* Make this mapping write combined. */
+    vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+    /* Now map all the vmalloc pages to this user address. */
+    if (memBlk->contiguous)
+    {
+        /* map kernel memory to user space.. */
+        if (memBlk->is_cma == true) {
+            return dma_mmap_coherent(gdev, vma, memBlk->va,
+					   memBlk->dma_addr, vma->vm_end - vma->vm_start);
+        } else {
+            if (remap_pfn_range(vma,
+                                vma->vm_start,
+                                page_to_pfn(memBlk->contiguousPages) + skipPages,
+                                numPages << PAGE_SHIFT,
+                                vma->vm_page_prot) < 0)
+            {
+                ONERROR(ENOMEM);
+            }
+        }
+    }
+    else
+    {
+        size_t i;
+        unsigned long start = vma->vm_start;
+
+        for (i = 0; i < numPages; ++i)
+        {
+            unsigned long pfn = page_to_pfn(memBlk->nonContiguousPages[i + skipPages]);
+
+            if (remap_pfn_range(vma,
+                                start,
+                                pfn,
+                                PAGE_SIZE,
+                                vma->vm_page_prot) < 0)
+            {
+                ONERROR(ENOMEM);
+            }
+
+            start += PAGE_SIZE;
+        }
+    }
+
+OnError:
+    return status;
+}
+
+static int
+Attach(
+    INOUT struct mem_block *MemBlk
+    )
+{
+    int status;
+    struct mem_block *memBlk = MemBlk;
+
+    struct dma_buf *dmabuf = memBlk->dmabuf;
+    struct sg_table *sgt = NULL;
+    struct dma_buf_attachment *attachment = NULL;
+    int npages = 0;
+    unsigned long *pagearray = NULL;
+    int i, j, k = 0;
+    struct scatterlist *s;
+    unsigned int size = 0;
+
+    if (!dmabuf)
+    {
+        ONERROR(EFAULT);
+    }
+
+    attachment = dma_buf_attach(dmabuf, gdev);
+
+    if (!attachment)
+    {
+        ONERROR(EFAULT);
+    }
+
+    sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+
+    if (!sgt)
+    {
+        ONERROR(EFAULT);
+    }
+
+    /* Prepare page array. */
+    /* Get number of pages. */
+    for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+    {
+        npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE;
+    }
+
+    /* Allocate page array. */
+    ONERROR(AllocateMemory(npages * sizeof(*pagearray), (void * *)&pagearray));
+
+    /* Fill page array. */
+    for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+    {
+        for (j = 0; j < (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE; j++)
+        {
+#ifdef VIDMEM_DMA_MAP
+            pagearray[k++] = sg_dma_address(s) + j * PAGE_SIZE;
+#else
+            pagearray[k++] = page_to_phys(nth_page(sg_page(s), j));
+#endif
+        }
+        size += sg_dma_len(s);
+    }
+
+    memBlk->pagearray = pagearray;
+    memBlk->attachment = attachment;
+    memBlk->sgt = sgt;
+    memBlk->numPages = npages;
+    memBlk->size = size;
+    memBlk->contiguous = (sgt->nents == 1) ? true : false;
+
+    return 0;
+
+OnError:
+    if (pagearray)
+    {
+        FreeMemory(pagearray);
+        pagearray = NULL;
+    }
+
+    if (sgt)
+    {
+        dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+    }
+
+    return status;
+}
+
+static struct sg_table *_dmabuf_map(struct dma_buf_attachment *attachment,
+                                    enum dma_data_direction direction)
+{
+    struct sg_table *sgt = NULL;
+    struct dma_buf *dmabuf = attachment->dmabuf;
+    struct mem_block *memBlk = dmabuf->priv;
+    int status = 0;
+
+    DEBUG_PRINT("[vidmem] %s\n", __func__);
+
+    do
+    {
+        ERR_BREAK(GetSGT(memBlk, 0, memBlk->size, (void **)&sgt));
+
+        if (dma_map_sg(attachment->dev, sgt->sgl, sgt->nents, direction) == 0)
+        {
+            sg_free_table(sgt);
+            kfree(sgt);
+            sgt = NULL;
+            ERR_BREAK(EPERM);
+        }
+    }
+    while (false);
+
+#ifdef VIDMEM_DEBUG
+    {
+        DEBUG_PRINT("[vidmem] sgt: nents = %u, sgl: page_link = %#lx, offset = %#x, length = %#x, dma_address = %llx\n",
+            sgt->nents, sgt->sgl->page_link, sgt->sgl->offset, sgt->sgl->length, sgt->sgl->dma_address);
+        int i = 0, j = 0;
+        struct scatterlist *s;
+        for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+        {
+            unsigned long phys = page_to_phys(nth_page(sg_page(s), 0));
+            DEBUG_PRINT("[vidmem] %d, %d: 0x%x, %d pages\n",
+                i, j, phys, ((sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE));
+        }
+    }
+#endif
+
+    return sgt;
+}
+
+static void _dmabuf_unmap(struct dma_buf_attachment *attachment,
+                          struct sg_table *sgt,
+                          enum dma_data_direction direction)
+{
+    DEBUG_PRINT("[vidmem] %s\n", __func__);
+
+    dma_unmap_sg(attachment->dev, sgt->sgl, sgt->nents, direction);
+
+    sg_free_table(sgt);
+    FreeMemory(sgt);
+}
+
+static int _dmabuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+    struct mem_block *memBlk = dmabuf->priv;
+    size_t skipPages = vma->vm_pgoff;
+    size_t numPages = PAGE_ALIGN(vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+    unsigned long physical = 0;
+    int status = 0;
+
+    getPhysical(memBlk, 0, &physical);
+    DEBUG_PRINT("[vidmem] %s, %d: Mmap 0x%lx with %ld pages\n", __func__, __LINE__, physical, numPages);
+
+    ONERROR(Mmap(memBlk, skipPages, numPages, vma));
+
+OnError:
+    return IS_ERROR(status) ? -EINVAL : 0;
+}
+
+static void _dmabuf_release(struct dma_buf *dmabuf)
+{
+}
+
+static void *_dmabuf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+    char *kvaddr = NULL;
+
+    return (void *)kvaddr;
+}
+
+static void _dmabuf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr)
+{
+}
+
+static struct dma_buf_ops _dmabuf_ops =
+{
+    .map_dma_buf = _dmabuf_map,
+    .unmap_dma_buf = _dmabuf_unmap,
+    .mmap = _dmabuf_mmap,
+    .release = _dmabuf_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
+    .map = _dmabuf_kmap,
+    .unmap = _dmabuf_kunmap,
+#endif
+};
+
+
+static int
+DMABUF_Export(
+    IN struct file *filp,
+    IN unsigned long bus_address,
+    IN signed int Flags,
+    OUT signed int *FD
+    )
+{
+    int status = 0;
+    struct dma_buf *dmabuf = NULL;
+    struct mem_block *memBlk = NULL;
+    struct mem_node *mnode = NULL;
+
+    DEBUG_PRINT("[vidmem] Export buffer 0x%lx\n", bus_address);
+    mnode = get_mem_node(filp, bus_address, 0);
+    if (NULL == mnode)
+    {
+        pr_err("[vidmem] Cannot find mem_node with bus address 0x%lx\n", bus_address);
+        ONERROR(EINVAL);
+    }
+
+    memBlk = &mnode->memBlk;
+
+    dmabuf = memBlk->dmabuf;
+    if (dmabuf == NULL)
+    {
+        size_t bytes = memBlk->size;
+
+        {
+            DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+            exp_info.ops = &_dmabuf_ops;
+            exp_info.size = bytes;
+            exp_info.flags = Flags;
+            exp_info.priv = memBlk;
+            dmabuf = dma_buf_export(&exp_info);
+        }
+
+        if (dmabuf == NULL)
+        {
+            ONERROR(EFAULT);
+        }
+
+        memBlk->dmabuf = dmabuf;
+    }
+
+    if (FD)
+    {
+        int fd = dma_buf_fd(dmabuf, Flags);
+
+        if (fd < 0)
+        {
+            ONERROR(EIO);
+        }
+
+        *FD = fd;
+    }
+
+OnError:
+    return status;
+}
+
+static int
+DMABUF_Import(
+    IN struct file *filp,
+    IN signed int FD,
+    OUT unsigned long *bus_address,
+    OUT unsigned int *size
+    )
+{
+    int status;
+
+    struct mem_block *memBlk = NULL;
+    struct file_node *fnode = NULL;
+    struct mem_node *mnode = NULL;
+
+    DEBUG_PRINT("[vidmem] enter %s: fd = 0x%x\n",__func__, FD);
+    mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
+
+    if (!mnode)
+    {
+        ONERROR(ENOMEM);
+    }
+
+    fnode = get_file_node(filp);
+
+    if (NULL == fnode)
+    {
+        ONERROR(EINVAL);
+    }
+
+    memBlk = &mnode->memBlk;
+
+    /* Import dma buf handle. */
+    memBlk->dmabuf = dma_buf_get(FD);
+    if (!memBlk->dmabuf)
+    {
+        ONERROR(EFAULT);
+    }
+
+    ONERROR(Attach(memBlk));
+
+    *bus_address = memBlk->pagearray[0];
+    *size = memBlk->size;
+    DEBUG_PRINT("[vidmem] Imported FD %d at 0x%lx in size of %ld\n", FD, memBlk->pagearray[0], memBlk->size);
+
+    mnode->busAddr = memBlk->pagearray[0];
+    mnode->isImported = 1;
+    spin_lock(&mem_lock);
+    list_add_tail(&mnode->link, &fnode->memList);
+    spin_unlock(&mem_lock);
+
+    return 0;
+
+OnError:
+    if (mnode)
+    {
+        kfree(mnode);
+    }
+
+
+    return status;
+}
+
+void
+DMABUF_Release(
+    IN struct file *filp,
+    IN unsigned long bus_address
+    )
+{
+    struct mem_block *memBlk = NULL;
+    struct mem_node *mnode = NULL;
+
+    mnode = get_mem_node(filp, bus_address, 1);
+    if (NULL == mnode)
+    {
+        return;
+    }
+
+    memBlk = &mnode->memBlk;
+
+    dma_buf_unmap_attachment(memBlk->attachment, memBlk->sgt, DMA_BIDIRECTIONAL);
+
+    dma_buf_detach(memBlk->dmabuf, memBlk->attachment);
+
+    dma_buf_put(memBlk->dmabuf);
+
+    FreeMemory(memBlk->pagearray);
+
+    spin_lock(&mem_lock);
+    list_del(&mnode->link);
+    spin_unlock(&mem_lock);
+    FreeMemory(mnode);
+}
+
+/***************************************************************************\
+************************ GFP Allocator **********************************
+\***************************************************************************/
+static void
+NonContiguousFree(
+    IN struct page ** Pages,
+    IN size_t NumPages
+    )
+{
+    size_t i;
+
+    for (i = 0; i < NumPages; i++)
+    {
+        __free_page(Pages[i]);
+    }
+
+    FreeMemory(Pages);
+}
+
+static int
+NonContiguousAlloc(
+    IN struct mem_block *MemBlk,
+    IN size_t NumPages,
+    IN unsigned int Gfp
+    )
+{
+    struct page ** pages;
+    struct page *p;
+    size_t i, size;
+
+    if (NumPages > totalram_pages())
+    {
+        return ENOMEM;
+    }
+
+    size = NumPages * sizeof(struct page *);
+    if (AllocateMemory(size, (void * *)&pages))
+        return ENOMEM;
+
+    for (i = 0; i < NumPages; i++)
+    {
+        p = alloc_page(Gfp);
+
+        if (!p)
+        {
+            pr_err("Failed to allocate non-contiguous memory\n");
+            NonContiguousFree(pages, i);
+            return ENOMEM;
+        }
+
+#if DISCRETE_PAGES
+        if (i != 0)
+        {
+            if (page_to_pfn(pages[i-1]) == page_to_pfn(p)-1)
+            {
+                /* Replaced page. */
+                struct page *l = p;
+
+                /* Allocate a page which is not contiguous to previous one. */
+                p = alloc_page(Gfp);
+
+                /* Give replaced page back. */
+                __free_page(l);
+
+                if (!p)
+                {
+                    NonContiguousFree(pages, i);
+                    return ENOMEM;
+                }
+            }
+        }
+#endif
+
+        pages[i] = p;
+    }
+
+    MemBlk->nonContiguousPages = pages;
+
+    return 0;
+}
+
+static int
+getPhysical(
+    IN struct mem_block *MemBlk,
+    IN unsigned int Offset,
+    OUT unsigned long * Physical
+    )
+{
+    struct mem_block *memBlk = MemBlk;
+    unsigned int offsetInPage = Offset & ~PAGE_MASK;
+    unsigned int index = Offset / PAGE_SIZE;
+
+    if (memBlk->contiguous)
+    {
+        *Physical = page_to_phys(nth_page(memBlk->contiguousPages, index));
+    }
+    else
+    {
+        *Physical = page_to_phys(memBlk->nonContiguousPages[index]);
+    }
+
+    *Physical += offsetInPage;
+
+    return 0;
+}
+
+int
+GFP_Alloc(
+    IN struct file *filp,
+    IN unsigned int size,
+    IN unsigned int Flags,
+    OUT unsigned long *bus_address
+    )
+{
+    int status;
+    size_t i = 0;
+    unsigned int gfp = GFP_KERNEL | GFP_DMA | __GFP_NOWARN;
+    int contiguous = Flags & (ALLOC_FLAG_CONTIGUOUS | ALLOC_FLAG_CMA | ALLOC_FLAG_VI);
+    size_t numPages = GetPageCount(size, 0);
+    unsigned long physical = 0;
+
+    struct mem_block *memBlk = NULL;
+    struct file_node *fnode = NULL;
+    struct mem_node *mnode = NULL;
+
+    if ((Flags & ALLOC_FLAG_CMA) && (Flags & ALLOC_FLAG_VI))
+    {
+        ONERROR(EINVAL);
+    }
+
+    mnode = kzalloc(sizeof(struct mem_node), GFP_KERNEL | __GFP_NORETRY);
+
+    if (!mnode)
+    {
+        ONERROR(ENOMEM);
+    }
+
+    fnode = get_file_node(filp);
+
+    if (NULL == fnode)
+    {
+        ONERROR(EINVAL);
+    }
+
+    memBlk = &mnode->memBlk;
+
+    if (Flags & ALLOC_FLAG_4GB_ADDR)
+    {
+        /* remove __GFP_HIGHMEM bit, add __GFP_DMA32 bit */
+        gfp &= ~__GFP_HIGHMEM;
+        gfp |= __GFP_DMA32;
+    }
+
+    memBlk->contiguous = contiguous;
+    memBlk->numPages = numPages;
+    memBlk->size = size;
+    memBlk->is_cma = false;
+    memBlk->is_vi_mem = false;
+
+    if (contiguous)
+    {
+        size_t bytes = numPages << PAGE_SHIFT;
+
+        void *addr = NULL;
+
+        if (Flags & ALLOC_FLAG_VI) {
+            int region_id = GET_ALLOC_FLAG_REGION(Flags);
+            memBlk->dma_addr = rsvmem_pool_alloc(region_id, bytes);
+            if (!memBlk->dma_addr)
+            {
+                ONERROR(ENOMEM);
+            }
+            memBlk->rsvmem_pool_region_id = region_id;
+            memBlk->contiguousPages = (phys_addr_t)memBlk->dma_addr ? phys_to_page((phys_addr_t)memBlk->dma_addr) : NULL;
+            memBlk->is_vi_mem = true;
+            physical = memBlk->dma_addr;
+            goto OnDone;
+        }
+        else if (Flags & ALLOC_FLAG_CMA) {
+            memBlk->va = dma_alloc_coherent(gdev,
+                            bytes, &memBlk->dma_addr,
+                            GFP_KERNEL | __GFP_NOWARN);
+            memBlk->contiguousPages = (phys_addr_t)memBlk->dma_addr ? phys_to_page((phys_addr_t)memBlk->dma_addr) : NULL;
+            if (!memBlk->va) {
+                return -ENOMEM;
+            }
+            memBlk->is_cma = true;
+            //pr_debug("got cma vir %p phy 0x%x contiguousPages %p\n", memBlk->va, memBlk->dma_addr, memBlk->contiguousPages);
+            physical = memBlk->dma_addr;
+            goto OnDone;
+        }
+
+        addr = alloc_pages_exact(bytes, (gfp & ~__GFP_HIGHMEM) | __GFP_NORETRY);
+
+        memBlk->contiguousPages = addr ? virt_to_page(addr) : NULL;
+
+        if (memBlk->contiguousPages)
+        {
+            memBlk->exact = true;
+        }
+
+        if (memBlk->contiguousPages == NULL)
+        {
+            int order = get_order(bytes);
+
+            if (order >= MAX_ORDER)
+            {
+                pr_err("Too big buffer size requested. (order %d >= max %d)\n",
+                    order, MAX_ORDER);
+                status = ENOMEM;
+                goto OnError;
+            }
+
+            memBlk->contiguousPages = alloc_pages(gfp, order);
+        }
+
+        if (memBlk->contiguousPages == NULL)
+        {
+            pr_debug("Failed to allocate contiguous memory\n");
+            ONERROR(ENOMEM);
+        }
+
+#ifdef VIDMEM_DMA_MAP
+        memBlk->dma_addr = dma_map_page(gdev,
+                memBlk->contiguousPages, 0, numPages * PAGE_SIZE,
+                DMA_BIDIRECTIONAL);
+
+        if (dma_mapping_error(gdev, memBlk->dma_addr))
+        {
+            if (memBlk->exact)
+            {
+                free_pages_exact(page_address(memBlk->contiguousPages), bytes);
+            }
+            else
+            {
+                __free_pages(memBlk->contiguousPages, get_order(bytes));
+            }
+
+            ONERROR(ENOMEM);
+        }
+#endif
+    }
+    else // non-contiguous pages
+    {
+        ONERROR(NonContiguousAlloc(memBlk, numPages, gfp));
+
+        memBlk->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL | __GFP_NORETRY);
+        if (!memBlk->sgt)
+        {
+            ONERROR(ENOMEM);
+        }
+
+        status = sg_alloc_table_from_pages(memBlk->sgt,
+                    memBlk->nonContiguousPages, numPages, 0,
+                    numPages << PAGE_SHIFT, GFP_KERNEL);
+        memBlk->sgt->orig_nents = memBlk->sgt->nents;
+        if (status < 0)
+        {
+            NonContiguousFree(memBlk->nonContiguousPages, numPages);
+            ONERROR(ENOMEM);
+        }
+
+#ifdef VIDMEM_DMA_MAP
+        status = dma_map_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents, DMA_BIDIRECTIONAL);
+        if (status != memBlk->sgt->nents)
+        {
+            NonContiguousFree(memBlk->nonContiguousPages, numPages);
+            sg_free_table(memBlk->sgt);
+            ONERROR(ENOMEM);
+        }
+#endif
+    }
+
+    for (i = 0; i < numPages; i++)
+    {
+        struct page *page;
+
+        if (contiguous)
+        {
+            page = nth_page(memBlk->contiguousPages, i);
+        }
+        else
+        {
+            page = memBlk->nonContiguousPages[i];
+        }
+
+        SetPageReserved(page);
+    }
+
+    getPhysical(memBlk, 0, &physical);
+
+OnDone:
+    *bus_address = physical;
+    mnode->busAddr = physical;
+    mnode->isImported = 0;
+    list_add_tail(&mnode->link, &fnode->memList);
+
+    DEBUG_PRINT("[vidmem] Allocated %d bytes (%ld pages) at physical address 0x%lx with %d sg table entries\n",
+        size, numPages, physical, contiguous ? 1 : memBlk->sgt->nents);
+
+    return 0;
+
+OnError:
+    if (memBlk->sgt)
+    {
+        kfree(memBlk->sgt);
+    }
+
+    if (mnode)
+    {
+        kfree(mnode);
+    }
+
+    return status;
+}
+
+
+void
+GFP_Free(
+    IN struct file *filp,
+    IN unsigned long bus_address
+    )
+{
+    size_t i;
+    struct page * page;
+    struct mem_block *memBlk = NULL;
+    struct mem_node *mnode = NULL;
+
+    mnode = get_mem_node(filp, bus_address, 0);
+    if (NULL == mnode)
+    {
+        return;
+    }
+
+    memBlk = &mnode->memBlk;
+
+    DEBUG_PRINT("[vidmem] Free %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
+
+    if (memBlk->contiguous)
+    {
+#ifdef VIDMEM_DMA_MAP
+        dma_unmap_page(gdev, memBlk->dma_addr,
+                memBlk->numPages << PAGE_SHIFT, DMA_FROM_DEVICE);
+#endif
+    }
+    else
+    {
+#ifdef VIDMEM_DMA_MAP
+        dma_unmap_sg(gdev, memBlk->sgt->sgl, memBlk->sgt->nents,
+                DMA_FROM_DEVICE);
+#endif
+
+        sg_free_table(memBlk->sgt);
+
+        if (memBlk->sgt)
+        {
+            kfree(memBlk->sgt);
+        }
+    }
+
+    if (memBlk->is_cma == false && memBlk->is_vi_mem == false) {
+        for (i = 0; i < memBlk->numPages; i++)
+        {
+            if (memBlk->contiguous)
+            {
+                page = nth_page(memBlk->contiguousPages, i);
+
+                ClearPageReserved(page);
+            }
+        }
+    }
+
+    if (memBlk->contiguous)
+    {
+        size_t bytes = memBlk->numPages << PAGE_SHIFT;
+        if (memBlk->is_vi_mem == true)
+        {
+            rsvmem_pool_free(memBlk->rsvmem_pool_region_id, bytes, memBlk->dma_addr);
+        }
+        else if (memBlk->is_cma == true)
+        {
+            dma_free_coherent(gdev, bytes,
+                    memBlk->va, memBlk->dma_addr);
+        }
+        else if (memBlk->exact == true)
+        {
+            free_pages_exact(page_address(memBlk->contiguousPages), memBlk->numPages * PAGE_SIZE);
+        }
+        else
+        {
+            __free_pages(memBlk->contiguousPages, get_order(memBlk->numPages * PAGE_SIZE));
+        }
+    }
+    else
+    {
+        NonContiguousFree(memBlk->nonContiguousPages, memBlk->numPages);
+    }
+
+    spin_lock(&mem_lock);
+    list_del(&mnode->link);
+    spin_unlock(&mem_lock);
+    FreeMemory(mnode);
+}
+
+static int
+GFP_MapUser(
+    IN struct file *filp,
+    IN struct vm_area_struct *vma
+    )
+{
+    struct mem_block *memBlk = NULL;
+    struct mem_node *mnode = NULL;
+    unsigned long bus_address = vma->vm_pgoff * PAGE_SIZE;
+    int status = 0;
+
+    mnode = get_mem_node(filp, bus_address, 0);
+    if (NULL == mnode)
+    {
+        return EINVAL;
+    }
+
+    memBlk = &mnode->memBlk;
+
+    if (Mmap(memBlk, 0, memBlk->numPages, vma))
+    {
+        return ENOMEM;
+    }
+
+    memBlk->vma = vma;
+
+    DEBUG_PRINT("[vidmem] Map %ld pages from physical address 0x%lx\n", memBlk->numPages, mnode->busAddr);
+
+    return status;
+}
+
+int vidalloc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+    return GFP_MapUser(filp, vma);
+}
+
+static long vidalloc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+    VidmemParams params;
+    int ret = 0;
+
+    if (_IOC_TYPE(cmd) != MEMORY_IOC_MAGIC) return EINVAL;
+    if (_IOC_NR(cmd) > MEMORY_IOC_MAXNR) return EINVAL;
+
+    if (_IOC_DIR(cmd) & _IOC_READ)
+        ret = !access_ok(arg, _IOC_SIZE(cmd));
+    else if (_IOC_DIR(cmd) & _IOC_WRITE)
+        ret = !access_ok(arg, _IOC_SIZE(cmd));
+    if (ret) return EINVAL;
+
+    switch (cmd)
+    {
+    case MEMORY_IOC_ALLOCATE:
+    {
+        ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
+        if (!ret)
+        {
+            ret = GFP_Alloc(filp, params.size, params.flags, &params.bus_address);
+            params.translation_offset = 0;
+            if (!ret)
+                ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
+        }
+        break;
+    }
+    case MEMORY_IOC_FREE:
+    {
+        ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
+        if (!ret)
+        {
+            GFP_Free(filp, params.bus_address);
+            ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
+        }
+        break;
+    }
+    case MEMORY_IOC_DMABUF_EXPORT:
+    {
+        ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
+        if (!ret)
+        {
+            ret = DMABUF_Export(filp, params.bus_address, params.flags, &params.fd);
+            if (!ret)
+                ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
+        }
+        break;
+    }
+    case MEMORY_IOC_DMABUF_IMPORT:
+    {
+        ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
+        if (!ret)
+        {
+            ret = DMABUF_Import(filp, params.fd, &params.bus_address, &params.size);
+            params.translation_offset = 0;
+            if (!ret)
+                ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
+        }
+        break;
+    }
+    case MEMORY_IOC_DMABUF_RELEASE:
+    {
+        ret = copy_from_user(&params, (void*)arg, sizeof(VidmemParams));
+        if (!ret)
+        {
+            DMABUF_Release(filp, params.bus_address);
+            ret = copy_to_user((VidmemParams *)arg, &params, sizeof(VidmemParams));
+        }
+        break;
+    }
+    default:
+        ret = EINVAL;
+    }
+
+    return ret;
+}
+
+static int vidalloc_open(struct inode *inode, struct file *filp)
+{
+    int ret = 0;
+    struct file_node *fnode = NULL;
+
+    if (AllocateMemory(sizeof(struct file_node), (void * *)&fnode))
+        return ENOMEM;
+
+    fnode->filp = filp;
+    INIT_LIST_HEAD(&fnode->memList);
+    spin_lock(&mem_lock);
+    list_add_tail(&fnode->link, &fileList);
+    spin_unlock(&mem_lock);
+
+    return ret;
+}
+
+static int vidalloc_release(struct inode *inode, struct file *filp)
+{
+    struct file_node *fnode = get_file_node(filp);
+    struct mem_node *node;
+    struct mem_node *temp;
+
+    if (NULL == fnode)
+        return EINVAL;
+
+    list_for_each_entry_safe(node, temp, &fnode->memList, link)
+    {
+        // this is not expected, memory leak detected!
+        pr_debug("vidmem: Found unfreed memory at 0x%lx, isImported = %d\n", node->busAddr, node->isImported);
+        if (node->isImported)
+            DMABUF_Release(filp, node->busAddr);
+        else
+            GFP_Free(filp, node->busAddr);
+    }
+
+
+    spin_lock(&mem_lock);
+    list_del(&fnode->link);
+    spin_unlock(&mem_lock);
+    FreeMemory(fnode);
+
+    return 0;
+}
+
+
+static struct file_operations vidalloc_fops = {
+    .owner= THIS_MODULE,
+    .open = vidalloc_open,
+    .release = vidalloc_release,
+    .unlocked_ioctl = vidalloc_ioctl,
+    .mmap = vidalloc_mmap,
+    .fasync = NULL,
+};
+#endif
+int vidalloc_probe(struct platform_device *pdev)
+{
+    int result = 0;
+
+    DEBUG_PRINT("enter %s\n",__func__);
+#if 1
+    gdev = &pdev->dev;
+    INIT_LIST_HEAD(&fileList);
+
+    result = rsvmem_pool_create(&pdev->dev);
+    if (result && result != -ENODEV)
+    {
+        pr_err("%s: Failed to create reserved memory pool\n", __func__);
+        goto err1;
+    }
+
+    if (vidalloc_major == 0)
+    {
+        result = alloc_chrdev_region(&vidalloc_devt, 0, 1, "vidmem");
+        if (result != 0)
+        {
+            pr_err("%s: alloc_chrdev_region error\n", __func__);
+            goto err1;
+        }
+        vidalloc_major = MAJOR(vidalloc_devt);
+        vidalloc_minor = MINOR(vidalloc_devt);
+    }
+    else
+    {
+        vidalloc_devt = MKDEV(vidalloc_major, vidalloc_minor);
+        result = register_chrdev_region(vidalloc_devt, 1, "vidmem");
+        if (result)
+        {
+            pr_err("%s: register_chrdev_region error\n", __func__);
+            goto err1;
+        }
+    }
+
+    vidalloc_class = class_create(THIS_MODULE, "vidmem");
+    if (IS_ERR(vidalloc_class))
+    {
+        pr_err("%s, %d: class_create error!\n", __func__, __LINE__);
+        goto err;
+    }
+	vidalloc_devt = MKDEV(vidalloc_major, vidalloc_minor);
+
+	cdev_init(&vidalloc_cdev, &vidalloc_fops);
+	result = cdev_add(&vidalloc_cdev, vidalloc_devt, 1);
+	if ( result )
+	{
+		pr_err("%s, %d: cdev_add error!\n", __func__, __LINE__);
+		goto err;
+	}
+
+	device_create(vidalloc_class, NULL, vidalloc_devt,
+			NULL, "vidmem");
+
+    return 0;
+err:
+    unregister_chrdev_region(vidalloc_devt, 1);
+err1:
+    pr_err("vidmem: module not inserted\n");
+#endif
+    return result;
+}
+
+static int vidalloc_remove(struct platform_device *pdev)
+{
+    DEBUG_PRINT("enter %s\n",__func__);
+    rsvmem_pool_destroy();
+	cdev_del(&vidalloc_cdev);
+	device_destroy(vidalloc_class, vidalloc_devt);
+    unregister_chrdev_region(vidalloc_devt, 1);
+    class_destroy(vidalloc_class);
+
+    return 0;
+}
+
+static const struct of_device_id thead_of_match[] = {
+        { .compatible = "thead,light-vidmem",  },
+        { /* sentinel */  },
+};
+
+static struct platform_driver vidalloc_driver = {
+    .probe = vidalloc_probe,
+    .remove = vidalloc_remove,
+    .driver = {
+        .name = "vidmem",
+        .owner = THIS_MODULE,
+        .of_match_table = of_match_ptr(thead_of_match),
+    }
+};
+
+
+int __init vidalloc_init(void)
+{
+    int ret = 0;
+    DEBUG_PRINT("enter %s\n",__func__);
+
+    ret = platform_driver_register(&vidalloc_driver);
+    if (ret)
+    {
+        pr_err("register platform driver failed!\n");
+    }
+    else
+    {
+        pr_info("vidmem: module inserted. Major <%d>\n", vidalloc_major);
+    }
+
+    return ret;
+}
+
+void __exit vidalloc_cleanup(void)
+{
+    DEBUG_PRINT("enter %s\n",__func__);
+    platform_driver_unregister(&vidalloc_driver);
+    pr_info("vidmem: module removed.\n");
+}
+
+
+module_init(vidalloc_init);
+module_exit(vidalloc_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("T-HEAD");
+MODULE_DESCRIPTION("Video Memory Allocator");

+ 62 - 0
driver/video_memory.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2021 - 2022  Alibaba Group. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __VIDEO_MEMORY_H_
+#define __VIDEO_MEMORY_H_
+
+
+#define IN
+#define OUT
+#define INOUT
+#define OPTIONAL
+
+/* No special needs. */
+#define ALLOC_FLAG_NONE                  0x00000000
+/* Physical contiguous. */
+#define ALLOC_FLAG_CONTIGUOUS            0x00000001
+/* Physical non contiguous. */
+#define ALLOC_FLAG_NON_CONTIGUOUS        0x00000002
+/* Need 32bit address. */
+#define ALLOC_FLAG_4GB_ADDR              0x00000004
+/* CMA priority */
+#define ALLOC_FLAG_CMA                   0x00000008
+/* Use VI reserved memory */
+#define ALLOC_FLAG_VI                    0x00000010
+
+/* Alloc rsvmem pool region id should be 0~15 */
+#define SET_ALLOC_FLAG_REGION(flag, region_id) (flag & 0x00ffffff) | (region_id << 24)
+#define GET_ALLOC_FLAG_REGION(flag)            (flag >> 24)
+
+#define MEMORY_IOC_MAGIC  'a'
+
+#define MEMORY_IOC_ALLOCATE         _IOWR(MEMORY_IOC_MAGIC, 1, VidmemParams *)
+#define MEMORY_IOC_FREE             _IOWR(MEMORY_IOC_MAGIC, 2, VidmemParams *)
+#define MEMORY_IOC_DMABUF_EXPORT    _IOWR(MEMORY_IOC_MAGIC, 3, VidmemParams *)
+#define MEMORY_IOC_DMABUF_IMPORT    _IOWR(MEMORY_IOC_MAGIC, 4, VidmemParams *)
+#define MEMORY_IOC_DMABUF_RELEASE   _IOWR(MEMORY_IOC_MAGIC, 5, VidmemParams *)
+#define MEMORY_IOC_MAXNR 5
+
+typedef struct {
+  unsigned long bus_address;
+  unsigned int size;
+  unsigned long translation_offset;
+  int fd;
+  int flags;
+} VidmemParams;
+
+#endif /* __VIDEO_MEMORY_H_ */

+ 16 - 0
lib/Makefile

@@ -0,0 +1,16 @@
+CFLAGS = -Wall -D_GNU_SOURCE -D_REENTRANT -D_THREAD_SAFE -O2 -Werror -Wno-unused -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-strict-overflow -Wno-array-bounds -Wno-shift-negative-value -Wempty-body -Wtype-limits -Wno-unused-result -fPIC -Wmissing-field-initializers -std=gnu99
+
+INCLUDE += -I../driver
+
+SRCS = video_mem.c
+
+OUT_PATH = ../output/rootfs/bsp/vidmem/lib
+TARGET = $(OUT_PATH)/libvmem.so
+
+$(TARGET):
+	$(shell if [ ! -e $(OUT_PATH) ];then mkdir -p $(OUT_PATH); fi)
+	$(CC) $(SRCS) $(CFLAGS) $(INCLUDE) -shared -o $(TARGET)
+
+clean:
+	$(RM) -r $(OUT_PATH)
+

+ 289 - 0
lib/video_mem.c

@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2021-2022 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include "video_memory.h"
+#include "video_mem.h"
+
+
+#define VMEM_PRINT(level, ...) \
+    { \
+        if (log_level >= VMEM_LOG_##level) \
+        { \
+            printf("VMEM[%d] %s: ", pid, #level); \
+            printf(__VA_ARGS__); \
+        } \
+    }
+
+#define VMEM_LOGE(...) VMEM_PRINT(ERROR, __VA_ARGS__)
+#define VMEM_LOGW(...) VMEM_PRINT(WARNING, __VA_ARGS__)
+#define VMEM_LOGI(...) VMEM_PRINT(INFO, __VA_ARGS__)
+#define VMEM_LOGD(...) VMEM_PRINT(DEBUG, __VA_ARGS__)
+#define VMEM_LOGT(...) VMEM_PRINT(TRACE, __VA_ARGS__)
+
+typedef enum _VmemLogLevel
+{
+    VMEM_LOG_QUIET = 0,
+    VMEM_LOG_ERROR,
+    VMEM_LOG_WARNING,
+    VMEM_LOG_INFO,
+    VMEM_LOG_DEBUG,
+    VMEM_LOG_TRACE,
+    VMEM_LOG_MAX
+} VmemLogLevel;
+
+typedef struct _VmemContext
+{
+    int fd_alloc;
+
+} VmemContext;
+
+int log_level = VMEM_LOG_ERROR;
+int pid = 0;
+
+static int getLogLevel();
+
+VmemStatus
+VMEM_create(void **vmem)
+{
+    VmemContext *ctx = NULL;
+
+    log_level = getLogLevel();
+    pid = getpid();
+
+    if (vmem == NULL)
+        return VMEM_STATUS_ERROR;
+
+    ctx = (VmemContext *)malloc(sizeof(*ctx));
+    if (ctx == NULL)
+        return VMEM_STATUS_NO_MEMORY;
+    *vmem = (void *)ctx;
+
+    ctx->fd_alloc = open("/dev/vidmem", O_RDWR);
+    if (ctx->fd_alloc == -1)
+    {
+        VMEM_LOGE("Failed to open /dev/vidmem\n");
+        return VMEM_STATUS_ERROR;
+    }
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_allocate(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+    VidmemParams p;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+
+    ctx = (VmemContext *)vmem;
+    params->phy_address = 0;
+    params->vir_address = NULL;
+    params->fd = -1;
+
+    memset(&p, 0, sizeof(p));
+    p.size = params->size;
+    p.flags = params->flags;
+    ioctl(ctx->fd_alloc, MEMORY_IOC_ALLOCATE, &p);
+    if (p.bus_address == 0)
+    {
+        VMEM_LOGE("Failed to allocate memory\n");
+        return VMEM_STATUS_NO_MEMORY;
+    }
+
+    params->phy_address = p.bus_address;
+    VMEM_LOGI("Allocated %d bytes, phy addr 0x%08x\n", 
+            params->size, params->phy_address);
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_mmap(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+    void *vir_addr;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+    
+    if (params->vir_address != NULL)
+        return VMEM_STATUS_OK;
+
+    ctx = (VmemContext *)vmem;
+    int fd = params->fd > 0 ? params->fd : ctx->fd_alloc;
+    unsigned int offset = params->fd > 0 ? 0 : params->phy_address;
+    vir_addr = mmap(0, params->size, PROT_READ | PROT_WRITE,
+                    MAP_SHARED, fd, offset);
+    if (vir_addr == MAP_FAILED)
+    {
+        VMEM_LOGE("Failed to mmap physical address: 0x%08x, using fd %d\n",
+                params->phy_address, fd);
+        return VMEM_STATUS_ERROR;
+    }
+
+    params->vir_address = vir_addr;
+    VMEM_LOGI("Mapped phy addr 0x%08x to vir addr %p, size %d\n", 
+            params->phy_address, params->vir_address, params->size);
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_free(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+    VidmemParams p;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+
+    ctx = (VmemContext *)vmem;
+
+    VMEM_LOGI("Free virt addr %p, phy addr 0x%08x, size %d\n", 
+            params->vir_address, params->phy_address, params->size);
+    if (params->vir_address != MAP_FAILED && params->vir_address != NULL)
+        munmap(params->vir_address, params->size);
+    params->vir_address = NULL;
+
+    if (params->phy_address != 0)
+    {
+        memset(&p, 0, sizeof(p));
+        p.bus_address = params->phy_address;
+        ioctl(ctx->fd_alloc, MEMORY_IOC_FREE, &p);
+        params->phy_address = 0;
+    }
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_destroy(void *vmem)
+{
+    if (vmem != NULL)
+    {
+        VmemContext *ctx = (VmemContext *)vmem;
+        if (ctx->fd_alloc != -1)
+            close(ctx->fd_alloc);
+        
+        free(vmem);
+    }
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_export(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+    VidmemParams p;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+    ctx = (VmemContext *)vmem;
+
+    memset(&p, 0, sizeof(p));
+    p.bus_address = params->phy_address;
+    p.flags = O_RDWR;
+    ioctl(ctx->fd_alloc, MEMORY_IOC_DMABUF_EXPORT, &p);
+    if (p.fd < 0) {
+        VMEM_LOGE("Failed to export memory\n");
+        return VMEM_STATUS_ERROR;
+    }
+
+    params->fd = p.fd;
+    VMEM_LOGI("Exported phy addr 0x%08x to fd %d, size %d\n", 
+            params->phy_address, params->fd, params->size);
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_import(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+    VidmemParams p;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+    ctx = (VmemContext *)vmem;
+
+    memset(&p, 0, sizeof(p));
+    p.fd = params->fd;
+    ioctl(ctx->fd_alloc, MEMORY_IOC_DMABUF_IMPORT, &p);
+    if (p.bus_address == 0 || p.size == 0) {
+        VMEM_LOGE("Failed to import memory\n");
+        return VMEM_STATUS_ERROR;
+    }
+
+    params->phy_address = p.bus_address;
+    params->size = p.size;
+    VMEM_LOGI("Imported fd %d to phy addr 0x%08x, size %d\n", 
+            params->fd, params->phy_address, params->size);
+
+    return VMEM_STATUS_OK;
+}
+
+VmemStatus
+VMEM_release(void *vmem, VmemParams *params)
+{
+    VmemContext *ctx = NULL;
+
+    if (vmem == NULL || params == NULL)
+        return VMEM_STATUS_ERROR;
+    ctx = (VmemContext *)vmem;
+
+    VMEM_LOGI("Released imported phy addr 0x%08x, fd %d, size %d\n", 
+            params->phy_address, params->fd, params->size);
+    if (params->vir_address != MAP_FAILED && params->vir_address != NULL)
+        munmap(params->vir_address, params->size);
+    params->vir_address = NULL;
+
+    if (params->phy_address != 0) {
+        VidmemParams p;
+        memset(&p, 0, sizeof(p));
+        p.bus_address = params->phy_address;
+        ioctl(ctx->fd_alloc, MEMORY_IOC_DMABUF_RELEASE, &p);
+        params->phy_address = 0;
+    }
+
+    return VMEM_STATUS_OK;
+}
+
+static int getLogLevel()
+{
+    char *env = getenv("VMEM_LOG_LEVEL");
+    if (env == NULL)
+        return VMEM_LOG_ERROR;
+    else
+    {
+        int level = atoi(env);
+        if (level >= VMEM_LOG_MAX || level < VMEM_LOG_QUIET)
+            return VMEM_LOG_ERROR;
+        else
+            return level;
+    }
+}

+ 64 - 0
lib/video_mem.h

@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021-2022 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#ifndef _VIDEO_MEM_H_
+#define _VIDEO_MEM_H_
+
+/* No special needs. */
+#define VMEM_FLAG_NONE                  0x00000000
+/* Physical contiguous. */
+#define VMEM_FLAG_CONTIGUOUS            0x00000001
+/* Physical non contiguous. */
+#define VMEM_FLAG_NON_CONTIGUOUS        0x00000002
+/* Need 32bit address. */
+#define VMEM_FLAG_4GB_ADDR              0x00000004
+/* CMA priority */
+#define VMEM_FLAG_CMA                   0x00000008
+/* Use VI reserved memory */
+#define VMEM_FLAG_VI                    0x00000010
+
+/* Alloc rsvmem pool region id should be 0~15 */
+#define SET_ALLOC_FLAG_REGION(flag, region_id) (flag & 0x00ffffff) | (region_id << 24)
+#define GET_ALLOC_FLAG_REGION(flag)            (flag >> 24)
+
+typedef enum _VmemStatus
+{
+    VMEM_STATUS_OK = 0,
+    VMEM_STATUS_ERROR = -1,     /* general error */
+    VMEM_STATUS_NO_MEMORY = -2, /* not enough memory to allocate buffer */
+} VmemStatus;
+
+typedef struct _VmemParams
+{
+    int size;
+    int flags;
+    unsigned int phy_address;
+    void *vir_address;
+    int fd;
+} VmemParams;
+
+VmemStatus VMEM_create(void **vmem);
+VmemStatus VMEM_allocate(void *vmem, VmemParams *params);
+VmemStatus VMEM_mmap(void *vmem, VmemParams *params);
+VmemStatus VMEM_free(void *vmem, VmemParams *params);
+VmemStatus VMEM_destroy(void *vmem);
+
+VmemStatus VMEM_export(void *vmem, VmemParams *params);
+VmemStatus VMEM_import(void *vmem, VmemParams *params);
+VmemStatus VMEM_release(void *vmem, VmemParams *params);
+
+#endif /* !_VIDEO_MEM_H_ */

+ 70 - 0
test/Makefile

@@ -0,0 +1,70 @@
+CFLAGS = -Wall -D_GNU_SOURCE -D_REENTRANT -D_THREAD_SAFE -O2 -Werror -Wno-unused -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-strict-overflow -Wno-array-bounds -Wno-shift-negative-value -Wempty-body -Wtype-limits -Wno-unused-result -fPIC -Wmissing-field-initializers -std=gnu99
+
+INCLUDE += -I../lib
+
+SRCS = video_memory_test.c
+OBJS = $(SRCS:.c=.o)
+
+#source search path
+vpath %.c
+
+# name of the outputfile (library)
+TARGET = vidmem_test
+
+#Here are rules for building codes and generating object library.
+all:    tags
+	@echo ---------------------------------------
+	@echo "Usage: make [ system | testdata | versatile | integrator | android]"
+	@echo "system     - PC system model (== pclinux)"
+	@echo "testdata   - PC system model for test data creation"
+	@echo "eval       - PC system model for evaluation with frame limit"
+	@echo "versatile  - ARM versatile with FPGA HW"
+	@echo "integrator - ARM integrator with FPGA HW"
+	@echo "android    - ARM android"
+	@echo "NOTE! Make sure to do 'make clean'"
+	@echo "between compiling to different targets!"
+	@echo ---------------------------------------
+
+.PHONY: system_lib system testdata clean depend
+
+evaluation: eval
+eval: system
+
+system_static: testdata
+system: testdata
+
+# for libva
+system_lib: system
+
+testdata: .depend
+
+
+.PHONY: hwlinux
+hwlinux:
+hwlinux: .depend $(TARGET)
+
+$(TARGET): $(OBJS)
+	$(CC) $(CFLAGS) $(OBJS) -L../output/rootfs/bsp/vidmem/lib -lvmem -lm -lpthread -o $(TARGET)
+
+
+
+%.o: %.c
+	$(CC) -c $(CFLAGS) $(INCLUDE) $< -o $@
+
+clean:
+	$(RM) $(TARGET)
+	$(RM) .depend
+	$(RM) *.o *.gcno *.gcda
+
+tags:
+	ctags ../kernel_mode/*h ./*[ch]
+
+depend .depend: $(SRCS)
+	@echo "[CC] $@"
+	@$(CC) -M $(DEBFLAGS) $(INCLUDE) $^ > .depend
+
+ifneq (clean, $(findstring clean, $(MAKECMDGOALS)))
+ifeq (.depend, $(wildcard .depend))
+include .depend
+endif
+endif

BIN
test/vidalloc_test


+ 175 - 0
test/video_memory_test.c

@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2021-2022 Alibaba Group. All rights reserved.
+ * License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include "video_mem.h"
+
+typedef enum MemType
+{
+    MEM_TYPE_CONTIGUOUS,
+    MEM_TYPE_NONCONTIGUOUS,
+    MEM_TYPE_CMA,
+    MEM_TYPE_VI,
+    MEM_TYPE_MAX
+} MemType;
+
+int alloc_flags[MEM_TYPE_MAX] =
+{
+    VMEM_FLAG_CONTIGUOUS | VMEM_FLAG_4GB_ADDR,
+    VMEM_FLAG_NON_CONTIGUOUS,
+    VMEM_FLAG_CMA,
+    VMEM_FLAG_VI
+};
+
+int alloc_num[MEM_TYPE_MAX] = {3, 3, 3, 3};
+
+void printUsage(char *name)
+{
+    printf(" \
+        Usage: %s [buf_size|-h]\n\
+            buf_size:   buffer size to be allocated, in unit of 4K pages\n\
+            -h:         print this message\n",
+            name);
+}
+
+int main(int argc, char **argv)
+{
+    int fd_alloc = -1;
+    int size = 1920*1080*3/2;
+    int pgsize = getpagesize();
+    void *vmem = NULL;
+    VmemParams *vmparams[MEM_TYPE_MAX] = {0};
+    int err = 0;
+
+    if (argc > 1)
+    {
+        if (strcmp(argv[1], "-h") == 0)
+        {
+            printUsage(argv[0]);
+            return 0;
+        }
+        else
+            size = atoi(argv[1]) * pgsize;
+    }
+
+    if (size <= 0)
+    {
+        printf("ERROR: invalid size: %d\n", size);
+        printUsage(argv[0]);
+        return -1;
+    }
+
+    do
+    {
+        if (VMEM_create(&vmem) != VMEM_STATUS_OK)
+            break;
+
+        VmemParams imp_params;
+        for (int type = 0; type < MEM_TYPE_MAX; type++)
+        {
+            vmparams[type] = malloc(sizeof(VmemParams)*alloc_num[type]);
+            if (vmparams[type] == NULL)
+            {
+                printf("ERROR: Failed to allocate VmemParams\n");
+                err = 1;
+                break;
+            }
+
+            for (int i = 0; i < alloc_num[type]; i++)
+            {
+                VmemParams *params = &vmparams[type][i];
+                memset(params, 0, sizeof(*params));
+                params->size = size;
+                params->flags = alloc_flags[type];
+
+                int vi_rsvmem_pool_region_id = i;   // only for type == MEM_TYPE_VI
+                if (type == MEM_TYPE_VI)
+                {
+                    params->flags = SET_ALLOC_FLAG_REGION(params->flags, vi_rsvmem_pool_region_id);
+                }
+
+                if (VMEM_allocate(vmem, params) != VMEM_STATUS_OK)
+                {
+                    if (type == MEM_TYPE_VI)
+                        printf("ERROR: Failed to allocate memory type %d, region_id=%d\n",
+                               type, vi_rsvmem_pool_region_id);
+                    else
+                        printf("ERROR: Failed to allocate memory type %d\n", type);
+                    break;
+                }
+                if (VMEM_mmap(vmem, params) != VMEM_STATUS_OK)
+                {
+                    printf("ERROR: Failed to mmap busAddress: 0x%08x\n",
+                            params->phy_address);
+                    err = 1;
+                    break;
+                }
+                if (VMEM_export(vmem, params) != VMEM_STATUS_OK)
+                {
+                    printf("ERROR: Failed to export buffer: 0x%08x\n",
+                            params->phy_address);
+                    err = 1;
+                    break;
+                }
+
+                printf("Allocated buffer %d of type %d at paddr 0x%08x vaddr %p size %d fd %d\n",
+                    i, type, params->phy_address, params->vir_address, size, params->fd);
+
+                memset(&imp_params, 0, sizeof(imp_params));
+                imp_params.fd = params->fd;
+                if (VMEM_import(vmem, &imp_params) != VMEM_STATUS_OK)
+                {
+                    printf("ERROR: Failed to import fd %d\n", params->fd);
+                    err = 1;
+                    break;
+                }
+
+                printf("Imported fd %d: paddr 0x%08x vaddr %p size %d\n",
+                    params->fd, params->phy_address, params->vir_address, size);
+                VMEM_release(vmem, &imp_params);
+            }
+
+            if (err)
+                break;
+        }
+    } while (0);
+
+    for (int type = 0; type < MEM_TYPE_MAX; type++)
+    {
+        for (int i = 0; i < alloc_num[type]; i++)
+        {
+            VmemParams *params = &vmparams[type][i];
+            VMEM_free(vmem, params);
+            memset(params, 0, sizeof(*params));
+        }
+
+        if (vmparams[type])
+            free(vmparams[type]);
+    }
+
+    VMEM_destroy(vmem);
+
+    return 0;
+}
+