summaryrefslogtreecommitdiff
authorYuxi Sun <yuxi.sun@amlogic.com>2017-02-14 07:39:02 (GMT)
committer Yuxi Sun <yuxi.sun@amlogic.com>2017-02-17 07:49:42 (GMT)
commit33cc5317127793575c7abc1af99692674f28a7e9 (patch)
tree162c0b4d7c8ac56182e4cbdb3175e177d46997d4
parent38f19c23726512a85c1ecefcbb3d7b59844ea4e2 (diff)
downloadmedia-33cc5317127793575c7abc1af99692674f28a7e9.zip
media-33cc5317127793575c7abc1af99692674f28a7e9.tar.gz
media-33cc5317127793575c7abc1af99692674f28a7e9.tar.bz2
Add h265 encoder moduler
Change-Id: I7586dbbb279b90211c2657eaa75cd6e6c6db0daf
Diffstat
-rw-r--r--Media.mk3
-rw-r--r--drivers/frame_sink/encoder/h265/Makefile1
-rw-r--r--drivers/frame_sink/encoder/h265/vmm.h661
-rw-r--r--drivers/frame_sink/encoder/h265/vpu.c1997
-rw-r--r--drivers/frame_sink/encoder/h265/vpu.h288
5 files changed, 2949 insertions, 1 deletions
diff --git a/Media.mk b/Media.mk
index fab1c95..42580d8 100644
--- a/Media.mk
+++ b/Media.mk
@@ -3,7 +3,8 @@ ARCH ?= arm64
TOOLS := aarch64-linux-gnu-
CONFIGS := CONFIG_AMLOGIC_MEDIA_VDEC_H264=m \
CONFIG_AMLOGIC_MEDIA_VDEC_H265=m \
- CONFIG_AMLOGIC_MEDIA_ENCODER_H264=m
+ CONFIG_AMLOGIC_MEDIA_ENCODER_H264=m \
+ CONFIG_AMLOGIC_MEDIA_ENCODER_H265=m
define copy-media-modules
$(foreach m, $(shell find $(strip $(1)) -name "*.ko"),\
diff --git a/drivers/frame_sink/encoder/h265/Makefile b/drivers/frame_sink/encoder/h265/Makefile
new file mode 100644
index 0000000..e7414bf
--- a/dev/null
+++ b/drivers/frame_sink/encoder/h265/Makefile
@@ -0,0 +1 @@
+obj-m += vpu.o
diff --git a/drivers/frame_sink/encoder/h265/vmm.h b/drivers/frame_sink/encoder/h265/vmm.h
new file mode 100644
index 0000000..cb0112e
--- a/dev/null
+++ b/drivers/frame_sink/encoder/h265/vmm.h
@@ -0,0 +1,661 @@
+/*
+ * vmm.h
+ *
+ * memory allocator for VPU
+ *
+ * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+*/
+
+#ifndef __CNM_VIDEO_MEMORY_MANAGEMENT_H__
+#define __CNM_VIDEO_MEMORY_MANAGEMENT_H__
+
+#define VMEM_PAGE_SIZE (16 * 1024)
+#define MAKE_KEY(_a, _b) (((vmem_key_t)_a) << 32 | _b)
+#define KEY_TO_VALUE(_key) (_key >> 32)
+
+#define VMEM_P_ALLOC(_x) vmalloc(_x)
+#define VMEM_P_FREE(_x) vfree(_x)
+
+#define VMEM_ASSERT \
+ pr_info("VMEM_ASSERT at %s:%d\n", __FILE__, __LINE__)
+
+
+#define VMEM_HEIGHT(_tree) (_tree == NULL ? -1 : _tree->height)
+
+#define MAX(_a, _b) (_a >= _b ? _a : _b)
+
+struct avl_node_t;
+#define vmem_key_t unsigned long long
+
+struct vmem_info_t {
+ ulong total_pages;
+ ulong alloc_pages;
+ ulong free_pages;
+ ulong page_size;
+};
+
+struct page_t {
+ s32 pageno;
+ ulong addr;
+ s32 used;
+ s32 alloc_pages;
+ s32 first_pageno;
+};
+
+struct avl_node_t {
+ vmem_key_t key;
+ s32 height;
+ struct page_t *page;
+ struct avl_node_t *left;
+ struct avl_node_t *right;
+};
+
+struct video_mm_t {
+ struct avl_node_t *free_tree;
+ struct avl_node_t *alloc_tree;
+ struct page_t *page_list;
+ s32 num_pages;
+ ulong base_addr;
+ ulong mem_size;
+ s32 free_page_count;
+ s32 alloc_page_count;
+};
+
+enum rotation_dir_t {
+ LEFT,
+ RIGHT
+};
+
+struct avl_node_data_t {
+ s32 key;
+ struct page_t *page;
+};
+
+static struct avl_node_t *make_avl_node(
+ vmem_key_t key,
+ struct page_t *page)
+{
+ struct avl_node_t *node =
+ (struct avl_node_t *)VMEM_P_ALLOC(sizeof(struct avl_node_t));
+ node->key = key;
+ node->page = page;
+ node->height = 0;
+ node->left = NULL;
+ node->right = NULL;
+ return node;
+}
+
+static s32 get_balance_factor(struct avl_node_t *tree)
+{
+ s32 factor = 0;
+ if (tree)
+ factor = VMEM_HEIGHT(tree->right) - VMEM_HEIGHT(tree->left);
+ return factor;
+}
+
+/*
+ * Left Rotation
+ *
+ * A B
+ * \ / \
+ * B => A C
+ * / \ \
+ * D C D
+ *
+ */
+static struct avl_node_t *rotation_left(struct avl_node_t *tree)
+{
+ struct avl_node_t *rchild;
+ struct avl_node_t *lchild;
+
+ if (tree == NULL)
+ return NULL;
+
+ rchild = tree->right;
+ if (rchild == NULL)
+ return tree;
+
+ lchild = rchild->left;
+ rchild->left = tree;
+ tree->right = lchild;
+
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+ rchild->height =
+ MAX(VMEM_HEIGHT(rchild->left), VMEM_HEIGHT(rchild->right)) + 1;
+ return rchild;
+}
+
+
+/*
+ * Reft Rotation
+ *
+ * A B
+ * \ / \
+ * B => D A
+ * / \ /
+ * D C C
+ *
+ */
+static struct avl_node_t *rotation_right(struct avl_node_t *tree)
+{
+ struct avl_node_t *rchild;
+ struct avl_node_t *lchild;
+
+ if (tree == NULL)
+ return NULL;
+
+ lchild = tree->left;
+ if (lchild == NULL)
+ return NULL;
+
+ rchild = lchild->right;
+ lchild->right = tree;
+ tree->left = rchild;
+
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left),
+ VMEM_HEIGHT(tree->right)) + 1;
+ lchild->height =
+ MAX(VMEM_HEIGHT(lchild->left),
+ VMEM_HEIGHT(lchild->right)) + 1;
+ return lchild;
+}
+
+static struct avl_node_t *do_balance(struct avl_node_t *tree)
+{
+ s32 bfactor = 0, child_bfactor;
+ bfactor = get_balance_factor(tree);
+ if (bfactor >= 2) {
+ child_bfactor = get_balance_factor(tree->right);
+ if (child_bfactor == 1 || child_bfactor == 0) {
+ tree = rotation_left(tree);
+ } else if (child_bfactor == -1) {
+ tree->right = rotation_right(tree->right);
+ tree = rotation_left(tree);
+ } else {
+ pr_info(
+ "invalid balancing factor: %d\n",
+ child_bfactor);
+ VMEM_ASSERT;
+ return NULL;
+ }
+ } else if (bfactor <= -2) {
+ child_bfactor = get_balance_factor(tree->left);
+ if (child_bfactor == -1 || child_bfactor == 0) {
+ tree = rotation_right(tree);
+ } else if (child_bfactor == 1) {
+ tree->left = rotation_left(tree->left);
+ tree = rotation_right(tree);
+ } else {
+ pr_info(
+ "invalid balancing factor: %d\n",
+ child_bfactor);
+ VMEM_ASSERT;
+ return NULL;
+ }
+ }
+ return tree;
+}
+
+static struct avl_node_t *unlink_end_node(
+ struct avl_node_t *tree,
+ s32 dir,
+ struct avl_node_t **found_node)
+{
+ struct avl_node_t *node;
+ *found_node = NULL;
+
+ if (tree == NULL)
+ return NULL;
+
+ if (dir == LEFT) {
+ if (tree->left == NULL) {
+ *found_node = tree;
+ return NULL;
+ }
+ } else {
+ if (tree->right == NULL) {
+ *found_node = tree;
+ return NULL;
+ }
+ }
+
+ if (dir == LEFT) {
+ node = tree->left;
+ tree->left = unlink_end_node(tree->left, LEFT, found_node);
+ if (tree->left == NULL) {
+ tree->left = (*found_node)->right;
+ (*found_node)->left = NULL;
+ (*found_node)->right = NULL;
+ }
+ } else {
+ node = tree->right;
+ tree->right = unlink_end_node(tree->right, RIGHT, found_node);
+ if (tree->right == NULL) {
+ tree->right = (*found_node)->left;
+ (*found_node)->left = NULL;
+ (*found_node)->right = NULL;
+ }
+ }
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+ return do_balance(tree);
+}
+
+
+static struct avl_node_t *avltree_insert(
+ struct avl_node_t *tree,
+ vmem_key_t key,
+ struct page_t *page)
+{
+ if (tree == NULL) {
+ tree = make_avl_node(key, page);
+ } else {
+ if (key >= tree->key)
+ tree->right =
+ avltree_insert(tree->right, key, page);
+ else
+ tree->left =
+ avltree_insert(tree->left, key, page);
+ }
+ tree = do_balance(tree);
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+ return tree;
+}
+
+static struct avl_node_t *do_unlink(struct avl_node_t *tree)
+{
+ struct avl_node_t *node;
+ struct avl_node_t *end_node;
+ node = unlink_end_node(tree->right, LEFT, &end_node);
+ if (node) {
+ tree->right = node;
+ } else {
+ node =
+ unlink_end_node(tree->left, RIGHT, &end_node);
+ if (node)
+ tree->left = node;
+ }
+
+ if (node == NULL) {
+ node = tree->right ? tree->right : tree->left;
+ end_node = node;
+ }
+
+ if (end_node) {
+ end_node->left =
+ (tree->left != end_node) ?
+ tree->left : end_node->left;
+ end_node->right =
+ (tree->right != end_node) ?
+ tree->right : end_node->right;
+ end_node->height =
+ MAX(VMEM_HEIGHT(end_node->left),
+ VMEM_HEIGHT(end_node->right)) + 1;
+ }
+ tree = end_node;
+ return tree;
+}
+
+static struct avl_node_t *avltree_remove(
+ struct avl_node_t *tree,
+ struct avl_node_t **found_node,
+ vmem_key_t key)
+{
+ *found_node = NULL;
+ if (tree == NULL) {
+ pr_info("failed to find key %d\n", (s32)key);
+ return NULL;
+ }
+
+ if (key == tree->key) {
+ *found_node = tree;
+ tree = do_unlink(tree);
+ } else if (key > tree->key) {
+ tree->right =
+ avltree_remove(tree->right, found_node, key);
+ } else {
+ tree->left =
+ avltree_remove(tree->left, found_node, key);
+ }
+
+ if (tree)
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left),
+ VMEM_HEIGHT(tree->right)) + 1;
+
+ tree = do_balance(tree);
+ return tree;
+}
+
+void avltree_free(struct avl_node_t *tree)
+{
+ if (tree == NULL)
+ return;
+ if (tree->left == NULL && tree->right == NULL) {
+ VMEM_P_FREE(tree);
+ return;
+ }
+
+ avltree_free(tree->left);
+ tree->left = NULL;
+ avltree_free(tree->right);
+ tree->right = NULL;
+ VMEM_P_FREE(tree);
+}
+
+static struct avl_node_t *remove_approx_value(
+ struct avl_node_t *tree,
+ struct avl_node_t **found,
+ vmem_key_t key)
+{
+ *found = NULL;
+ if (tree == NULL)
+ return NULL;
+
+ if (key == tree->key) {
+ *found = tree;
+ tree = do_unlink(tree);
+ } else if (key > tree->key) {
+ tree->right = remove_approx_value(tree->right, found, key);
+ } else {
+ tree->left = remove_approx_value(tree->left, found, key);
+ if (*found == NULL) {
+ *found = tree;
+ tree = do_unlink(tree);
+ }
+ }
+ if (tree)
+ tree->height =
+ MAX(VMEM_HEIGHT(tree->left),
+ VMEM_HEIGHT(tree->right)) + 1;
+ tree = do_balance(tree);
+ return tree;
+}
+
+static void set_blocks_free(
+ struct video_mm_t *mm,
+ s32 pageno,
+ s32 npages)
+{
+ s32 last_pageno = pageno + npages - 1;
+ s32 i;
+ struct page_t *page;
+ struct page_t *last_page;
+
+ if (npages == 0)
+ VMEM_ASSERT;
+
+ if (last_pageno >= mm->num_pages) {
+ pr_info(
+ "set_blocks_free: invalid last page number: %d\n",
+ last_pageno);
+ VMEM_ASSERT;
+ return;
+ }
+
+ for (i = pageno; i <= last_pageno; i++) {
+ mm->page_list[i].used = 0;
+ mm->page_list[i].alloc_pages = 0;
+ mm->page_list[i].first_pageno = -1;
+ }
+
+ page = &mm->page_list[pageno];
+ page->alloc_pages = npages;
+ last_page = &mm->page_list[last_pageno];
+ last_page->first_pageno = pageno;
+ mm->free_tree =
+ avltree_insert(mm->free_tree, MAKE_KEY(npages, pageno), page);
+}
+
+static void set_blocks_alloc(
+ struct video_mm_t *mm,
+ s32 pageno,
+ s32 npages)
+{
+ s32 last_pageno = pageno + npages - 1;
+ s32 i;
+ struct page_t *page;
+ struct page_t *last_page;
+
+ if (last_pageno >= mm->num_pages) {
+ pr_info(
+ "set_blocks_free: invalid last page number: %d\n",
+ last_pageno);
+ VMEM_ASSERT;
+ return;
+ }
+
+ for (i = pageno; i <= last_pageno; i++) {
+ mm->page_list[i].used = 1;
+ mm->page_list[i].alloc_pages = 0;
+ mm->page_list[i].first_pageno = -1;
+ }
+
+ page = &mm->page_list[pageno];
+ page->alloc_pages = npages;
+ last_page = &mm->page_list[last_pageno];
+ last_page->first_pageno = pageno;
+ mm->alloc_tree =
+ avltree_insert(mm->alloc_tree, MAKE_KEY(page->addr, 0), page);
+}
+
+
+s32 vmem_init(struct video_mm_t *mm, ulong addr, ulong size)
+{
+ s32 i;
+
+ if (NULL == mm)
+ return -1;
+
+ mm->base_addr = (addr + (VMEM_PAGE_SIZE - 1))
+ & ~(VMEM_PAGE_SIZE - 1);
+ mm->mem_size = size & ~VMEM_PAGE_SIZE;
+ mm->num_pages = mm->mem_size / VMEM_PAGE_SIZE;
+ mm->free_tree = NULL;
+ mm->alloc_tree = NULL;
+ mm->free_page_count = mm->num_pages;
+ mm->alloc_page_count = 0;
+ mm->page_list =
+ (struct page_t *)VMEM_P_ALLOC(
+ mm->num_pages * sizeof(struct page_t));
+ if (mm->page_list == NULL) {
+ pr_err("%s:%d failed to kmalloc(%ld)\n",
+ __func__, __LINE__,
+ mm->num_pages * sizeof(struct page_t));
+ return -1;
+ }
+
+ for (i = 0; i < mm->num_pages; i++) {
+ mm->page_list[i].pageno = i;
+ mm->page_list[i].addr =
+ mm->base_addr + i * VMEM_PAGE_SIZE;
+ mm->page_list[i].alloc_pages = 0;
+ mm->page_list[i].used = 0;
+ mm->page_list[i].first_pageno = -1;
+ }
+ set_blocks_free(mm, 0, mm->num_pages);
+ return 0;
+}
+
+s32 vmem_exit(struct video_mm_t *mm)
+{
+ if (mm == NULL) {
+ pr_info("vmem_exit: invalid handle\n");
+ return -1;
+ }
+
+ if (mm->free_tree)
+ avltree_free(mm->free_tree);
+ if (mm->alloc_tree)
+ avltree_free(mm->alloc_tree);
+
+ if (mm->page_list) {
+ VMEM_P_FREE(mm->page_list);
+ mm->page_list = NULL;
+ }
+
+ mm->base_addr = 0;
+ mm->mem_size = 0;
+ mm->num_pages = 0;
+ mm->page_list = NULL;
+ mm->free_tree = NULL;
+ mm->alloc_tree = NULL;
+ mm->free_page_count = 0;
+ mm->alloc_page_count = 0;
+ return 0;
+}
+
+ulong vmem_alloc(struct video_mm_t *mm, s32 size, ulong pid)
+{
+ struct avl_node_t *node;
+ struct page_t *free_page;
+ s32 npages, free_size;
+ s32 alloc_pageno;
+ ulong ptr;
+
+ if (mm == NULL) {
+ pr_info("vmem_alloc: invalid handle\n");
+ return -1;
+ }
+
+ if (size <= 0)
+ return -1;
+
+ npages = (size + VMEM_PAGE_SIZE - 1) / VMEM_PAGE_SIZE;
+ mm->free_tree = remove_approx_value(mm->free_tree,
+ &node, MAKE_KEY(npages, 0));
+
+ if (node == NULL)
+ return -1;
+
+ free_page = node->page;
+ free_size = KEY_TO_VALUE(node->key);
+ alloc_pageno = free_page->pageno;
+ set_blocks_alloc(mm, alloc_pageno, npages);
+ if (npages != free_size) {
+ s32 free_pageno = alloc_pageno + npages;
+ set_blocks_free(mm, free_pageno, (free_size-npages));
+ }
+ VMEM_P_FREE(node);
+
+ ptr = mm->page_list[alloc_pageno].addr;
+ mm->alloc_page_count += npages;
+ mm->free_page_count -= npages;
+ return ptr;
+}
+
+s32 vmem_free(struct video_mm_t *mm, ulong ptr, ulong pid)
+{
+ ulong addr;
+ struct avl_node_t *found;
+ struct page_t *page;
+ s32 pageno, prev_free_pageno, next_free_pageno;
+ s32 prev_size, next_size;
+ s32 merge_page_no, merge_page_size, free_page_size;
+
+ if (mm == NULL) {
+ pr_info("vmem_free: invalid handle\n");
+ return -1;
+ }
+
+ addr = ptr;
+ mm->alloc_tree = avltree_remove(mm->alloc_tree, &found,
+ MAKE_KEY(addr, 0));
+
+ if (found == NULL) {
+ pr_info("vmem_free: 0x%08x not found\n", (s32)addr);
+ VMEM_ASSERT;
+ return -1;
+ }
+
+ /* find previous free block */
+ page = found->page;
+ pageno = page->pageno;
+ free_page_size = page->alloc_pages;
+ prev_free_pageno = pageno - 1;
+ prev_size = -1;
+ if (prev_free_pageno >= 0) {
+ if (mm->page_list[prev_free_pageno].used == 0) {
+ prev_free_pageno =
+ mm->page_list[prev_free_pageno].first_pageno;
+ prev_size =
+ mm->page_list[prev_free_pageno].alloc_pages;
+ }
+ }
+
+ /* find next free block */
+ next_free_pageno = pageno + page->alloc_pages;
+ next_free_pageno =
+ (next_free_pageno == mm->num_pages) ? -1 : next_free_pageno;
+ next_size = -1;
+ if (next_free_pageno >= 0) {
+ if (mm->page_list[next_free_pageno].used == 0) {
+ next_size =
+ mm->page_list[next_free_pageno].alloc_pages;
+ }
+ }
+ VMEM_P_FREE(found);
+
+ /* merge */
+ merge_page_no = page->pageno;
+ merge_page_size = page->alloc_pages;
+ if (prev_size >= 0) {
+ mm->free_tree = avltree_remove(mm->free_tree, &found,
+ MAKE_KEY(prev_size, prev_free_pageno));
+ if (found == NULL) {
+ VMEM_ASSERT;
+ return -1;
+ }
+ merge_page_no = found->page->pageno;
+ merge_page_size += found->page->alloc_pages;
+ VMEM_P_FREE(found);
+ }
+ if (next_size >= 0) {
+ mm->free_tree = avltree_remove(mm->free_tree, &found,
+ MAKE_KEY(next_size, next_free_pageno));
+ if (found == NULL) {
+ VMEM_ASSERT;
+ return -1;
+ }
+ merge_page_size += found->page->alloc_pages;
+ VMEM_P_FREE(found);
+ }
+ page->alloc_pages = 0;
+ page->first_pageno = -1;
+ set_blocks_free(mm, merge_page_no, merge_page_size);
+ mm->alloc_page_count -= free_page_size;
+ mm->free_page_count += free_page_size;
+ return 0;
+}
+
+s32 vmem_get_info(struct video_mm_t *mm, struct vmem_info_t *info)
+{
+ if (mm == NULL) {
+ pr_info("vmem_get_info: invalid handle\n");
+ return -1;
+ }
+
+ if (info == NULL)
+ return -1;
+
+ info->total_pages = mm->num_pages;
+ info->alloc_pages = mm->alloc_page_count;
+ info->free_pages = mm->free_page_count;
+ info->page_size = VMEM_PAGE_SIZE;
+ return 0;
+}
+#endif /* __CNM_VIDEO_MEMORY_MANAGEMENT_H__ */
diff --git a/drivers/frame_sink/encoder/h265/vpu.c b/drivers/frame_sink/encoder/h265/vpu.c
new file mode 100644
index 0000000..26c40fd
--- a/dev/null
+++ b/drivers/frame_sink/encoder/h265/vpu.c
@@ -0,0 +1,1997 @@
+/*
+ * vpu.c
+ *
+ * linux device driver for VPU.
+ *
+ * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+*/
+
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
+#include <linux/amlogic/media/codec_mm/codec_mm.h>
+
+#include <linux/amlogic/media/utils/vdec_reg.h>
+#include "../../../common/media_clock/switch/amports_gate.h"
+
+#include "vpu.h"
+#include "vmm.h"
+
+/* definitions to be changed as customer configuration */
+/* if you want to have clock gating scheme frame by frame */
+/* #define VPU_SUPPORT_CLOCK_CONTROL */
+
+#define VPU_PLATFORM_DEVICE_NAME "HevcEnc"
+#define VPU_DEV_NAME "HevcEnc"
+#define VPU_CLASS_NAME "HevcEnc"
+
+#ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+#define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (64 * SZ_1M)
+
+#define LOG_ALL 0
+#define LOG_INFO 1
+#define LOG_DEBUG 2
+#define LOG_ERROR 3
+
+#define enc_pr(level, x...) \
+ do { \
+ if (level >= print_level) \
+ printk(x); \
+ } while (0)
+
+static s32 print_level = LOG_DEBUG;
+static s32 clock_level = 4;
+
+static struct video_mm_t s_vmem;
+static struct vpudrv_buffer_t s_video_memory = {0};
+static bool use_reserve;
+static ulong cma_pool_size;
+
+/* end customer definition */
+static struct vpudrv_buffer_t s_instance_pool = {0};
+static struct vpudrv_buffer_t s_common_memory = {0};
+static struct vpu_drv_context_t s_vpu_drv_context;
+static s32 s_vpu_major;
+static struct device *hevcenc_dev;
+
+static s32 s_vpu_open_ref_count;
+static s32 s_vpu_irq;
+static bool s_vpu_irq_requested;
+
+static struct vpudrv_buffer_t s_vpu_register = {0};
+
+static s32 s_interrupt_flag;
+static wait_queue_head_t s_interrupt_wait_q;
+
+static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock);
+static DEFINE_SEMAPHORE(s_vpu_sem);
+static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head);
+static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
+static struct tasklet_struct hevc_tasklet;
+static struct platform_device *hevc_pdev;
+
+static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE];
+
+static void dma_flush(u32 buf_start , u32 buf_size)
+{
+ if (hevc_pdev)
+ dma_sync_single_for_device(
+ &hevc_pdev->dev, buf_start,
+ buf_size, DMA_TO_DEVICE);
+}
+
+static void cache_flush(u32 buf_start , u32 buf_size)
+{
+ if (hevc_pdev)
+ dma_sync_single_for_cpu(
+ &hevc_pdev->dev, buf_start,
+ buf_size, DMA_FROM_DEVICE);
+}
+
+s32 vpu_hw_reset(void)
+{
+ enc_pr(LOG_DEBUG, "request vpu reset from application.\n");
+ return 0;
+}
+
+s32 vpu_clk_config(u32 enable)
+{
+ if (enable)
+ HevcEnc_clock_enable(clock_level);
+ else
+ HevcEnc_clock_disable();
+ return 0;
+}
+
+static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb)
+{
+ if (!vb)
+ return -1;
+
+ vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0);
+ if ((ulong)vb->phys_addr == (ulong)-1) {
+ enc_pr(LOG_ERROR,
+ "Physical memory allocation error size=%d\n", vb->size);
+ return -1;
+ }
+
+ vb->base = (ulong)(s_video_memory.base +
+ (vb->phys_addr - s_video_memory.phys_addr));
+ return 0;
+}
+
+static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb)
+{
+ if (!vb)
+ return;
+
+ if (vb->base)
+ vmem_free(&s_vmem, vb->phys_addr, 0);
+}
+
+static s32 vpu_free_instances(struct file *filp)
+{
+ struct vpudrv_instanace_list_t *vil, *n;
+ struct vpudrv_instance_pool_t *vip;
+ void *vip_base;
+
+ enc_pr(LOG_DEBUG, "vpu_free_instances\n");
+
+ list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
+ {
+ if (vil->filp == filp) {
+ vip_base = (void *)s_instance_pool.base;
+ enc_pr(LOG_INFO,
+ "free_instances instIdx=%d, coreIdx=%d, vip_base=%p\n",
+ (s32)vil->inst_idx,
+ (s32)vil->core_idx,
+ vip_base);
+ vip = (struct vpudrv_instance_pool_t *)vip_base;
+ if (vip) {
+ /* only first 4 byte is key point
+ (inUse of CodecInst in vpuapi)
+ to free the corresponding instance. */
+ memset(&vip->codecInstPool[vil->inst_idx],
+ 0x00, 4);
+ }
+ s_vpu_open_ref_count--;
+ list_del(&vil->list);
+ kfree(vil);
+ }
+ }
+ return 1;
+}
+
+static s32 vpu_free_buffers(struct file *filp)
+{
+ struct vpudrv_buffer_pool_t *pool, *n;
+ struct vpudrv_buffer_t vb;
+
+ enc_pr(LOG_DEBUG, "vpu_free_buffers\n");
+
+ list_for_each_entry_safe(pool, n, &s_vbp_head, list)
+ {
+ if (pool->filp == filp) {
+ vb = pool->vb;
+ if (vb.base) {
+ vpu_free_dma_buffer(&vb);
+ list_del(&pool->list);
+ kfree(pool);
+ }
+ }
+ }
+ return 0;
+}
+
+static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff)
+{
+ struct vpudrv_buffer_pool_t *pool, *n;
+ struct vpudrv_buffer_t vb;
+ bool find = false;
+ u32 cached = 0;
+
+ enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n");
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(pool, n, &s_vbp_head, list)
+ {
+ if (pool->filp == filp) {
+ vb = pool->vb;
+ if (((vb.phys_addr >> PAGE_SHIFT) == vm_pgoff)
+ && find == false){
+ cached = vb.cached;
+ find = true;
+ }
+ }
+ }
+ spin_unlock(&s_vpu_lock);
+ enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached);
+ return cached;
+}
+
+static void hevcenc_isr_tasklet(ulong data)
+{
+ struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data;
+ enc_pr(LOG_INFO, "hevcenc_isr_tasklet interruput:0x%08lx\n",
+ dev->interrupt_reason);
+ if (dev->interrupt_reason) {
+ /* notify the interrupt to user space */
+ if (dev->async_queue) {
+ enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__);
+ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
+ }
+ s_interrupt_flag = 1;
+ wake_up_interruptible(&s_interrupt_wait_q);
+ }
+ enc_pr(LOG_ALL, "[-]%s\n", __func__);
+}
+
+static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id)
+{
+ struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id;
+ /* this can be removed.
+ it also work in VPU_WaitInterrupt of API function */
+ u32 core;
+ ulong interrupt_reason = 0;
+ enc_pr(LOG_ALL, "[+]%s\n", __func__);
+
+ for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
+ if (s_bit_firmware_info[core].size == 0) {
+ /* it means that we didn't get an information
+ the current core from API layer.
+ No core activated.*/
+ enc_pr(LOG_ERROR,
+ "s_bit_firmware_info[core].size is zero\n");
+ continue;
+ }
+ if (ReadVpuRegister(W4_VPU_VPU_INT_STS)) {
+ interrupt_reason = ReadVpuRegister(W4_VPU_INT_REASON);
+ WriteVpuRegister(W4_VPU_INT_REASON_CLEAR,
+ interrupt_reason);
+ WriteVpuRegister(W4_VPU_VINT_CLEAR, 0x1);
+ dev->interrupt_reason |= interrupt_reason;
+ }
+ enc_pr(LOG_INFO,
+ "intr_reason: 0x%08lx\n", dev->interrupt_reason);
+ }
+ if (dev->interrupt_reason)
+ tasklet_schedule(&hevc_tasklet);
+ enc_pr(LOG_ALL, "[-]%s\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static s32 vpu_open(struct inode *inode, struct file *filp)
+{
+ bool alloc_buffer = false;
+ s32 r = 0;
+ enc_pr(LOG_DEBUG, "[+] %s\n", __func__);
+ spin_lock(&s_vpu_lock);
+ s_vpu_drv_context.open_count++;
+ if (s_vpu_drv_context.open_count == 1) {
+ alloc_buffer = true;
+ } else {
+ r = -EBUSY;
+ s_vpu_drv_context.open_count--;
+ spin_unlock(&s_vpu_lock);
+ goto Err;
+ }
+ filp->private_data = (void *)(&s_vpu_drv_context);
+ spin_unlock(&s_vpu_lock);
+ if (alloc_buffer && !use_reserve) {
+#ifdef CONFIG_CMA
+ s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE;
+ s_video_memory.phys_addr =
+ (ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME,
+ VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE >> PAGE_SHIFT, 0,
+ CODEC_MM_FLAGS_CPU);
+ if (s_video_memory.phys_addr)
+ s_video_memory.base =
+ (ulong)phys_to_virt(s_video_memory.phys_addr);
+ else
+ s_video_memory.base = 0;
+ if (s_video_memory.base) {
+ enc_pr(LOG_DEBUG,
+ "allocating phys 0x%lx, virt addr 0x%lx, size %dk\n",
+ s_video_memory.phys_addr,
+ s_video_memory.base,
+ s_video_memory.size >> 10);
+ if (vmem_init(&s_vmem,
+ s_video_memory.phys_addr,
+ s_video_memory.size) < 0) {
+ enc_pr(LOG_ERROR, "fail to init vmem system\n");
+ r = -ENOMEM;
+ codec_mm_free_for_dma(
+ VPU_DEV_NAME,
+ (u32)s_video_memory.phys_addr);
+ vmem_exit(&s_vmem);
+ memset(&s_video_memory, 0,
+ sizeof(struct vpudrv_buffer_t));
+ memset(&s_vmem, 0,
+ sizeof(struct video_mm_t));
+ }
+ } else {
+ enc_pr(LOG_ERROR,
+ "CMA failed to allocate dma buffer for %s, phys: 0x%lx\n",
+ VPU_DEV_NAME, s_video_memory.phys_addr);
+ if (s_video_memory.phys_addr)
+ codec_mm_free_for_dma(
+ VPU_DEV_NAME,
+ (u32)s_video_memory.phys_addr);
+ s_video_memory.phys_addr = 0;
+ r = -ENOMEM;
+ }
+#else
+ enc_pr(LOG_ERROR,
+ "No CMA and reserved memory for HevcEnc!!!\n");
+ r = -ENOMEM;
+#endif
+ } else if (!s_video_memory.base) {
+ enc_pr(LOG_ERROR,
+ "HevcEnc memory is not malloced!!!\n");
+ r = -ENOMEM;
+ }
+ if (alloc_buffer) {
+ ulong flags;
+ u32 data32;
+ if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) {
+ s32 err;
+ err = request_irq(s_vpu_irq, vpu_irq_handler, 0,
+ "HevcEnc-irq", (void *)(&s_vpu_drv_context));
+ if (err) {
+ enc_pr(LOG_ERROR,
+ "fail to register interrupt handler\n");
+ return -EFAULT;
+ }
+ s_vpu_irq_requested = true;
+ }
+ amports_switch_gate("vdec", 1);
+ spin_lock_irqsave(&s_vpu_lock, flags);
+ WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
+ READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~(0x3<<24));
+ udelay(10);
+
+ data32 = 0x700;
+ data32 |= READ_VREG(DOS_SW_RESET4);
+ WRITE_VREG(DOS_SW_RESET4, data32);
+ data32 &= ~0x700;
+ WRITE_VREG(DOS_SW_RESET4, data32);
+
+ WRITE_MPEG_REG(RESET0_REGISTER, data32 & ~(1<<21));
+ WRITE_MPEG_REG(RESET0_REGISTER, data32 | (1<<21));
+ READ_MPEG_REG(RESET0_REGISTER);
+ READ_MPEG_REG(RESET0_REGISTER);
+ READ_MPEG_REG(RESET0_REGISTER);
+ READ_MPEG_REG(RESET0_REGISTER);
+#ifndef VPU_SUPPORT_CLOCK_CONTROL
+ vpu_clk_config(1);
+#endif
+ /* Enable wave420l_vpu_idle_rise_irq,
+ Disable wave420l_vpu_idle_fall_irq */
+ WRITE_VREG(DOS_WAVE420L_CNTL_STAT, 0x1);
+ WRITE_VREG(DOS_MEM_PD_WAVE420L, 0x0);
+
+ WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
+ READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~(0x3<<12));
+ udelay(10);
+
+ spin_unlock_irqrestore(&s_vpu_lock, flags);
+ }
+Err:
+ enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r);
+ return r;
+}
+
+static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg)
+{
+ s32 ret = 0;
+ struct vpu_drv_context_t *dev =
+ (struct vpu_drv_context_t *)filp->private_data;
+
+ switch (cmd) {
+ case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
+ {
+ struct vpudrv_buffer_pool_t *vbp;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret == 0) {
+ vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
+ if (!vbp) {
+ up(&s_vpu_sem);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(&(vbp->vb),
+ (struct vpudrv_buffer_t *)arg,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret) {
+ kfree(vbp);
+ up(&s_vpu_sem);
+ return -EFAULT;
+ }
+
+ ret = vpu_alloc_dma_buffer(&(vbp->vb));
+ if (ret == -1) {
+ ret = -ENOMEM;
+ kfree(vbp);
+ up(&s_vpu_sem);
+ break;
+ }
+ ret = copy_to_user((void __user *)arg,
+ &(vbp->vb),
+ sizeof(struct vpudrv_buffer_t));
+ if (ret) {
+ kfree(vbp);
+ ret = -EFAULT;
+ up(&s_vpu_sem);
+ break;
+ }
+
+ vbp->filp = filp;
+ spin_lock(&s_vpu_lock);
+ list_add(&vbp->list, &s_vbp_head);
+ spin_unlock(&s_vpu_lock);
+
+ up(&s_vpu_sem);
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
+ }
+ break;
+ case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32:
+ {
+ struct vpudrv_buffer_pool_t *vbp;
+ struct compat_vpudrv_buffer_t buf32;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret == 0) {
+ vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
+ if (!vbp) {
+ up(&s_vpu_sem);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(&buf32,
+ (struct compat_vpudrv_buffer_t *)arg,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret) {
+ kfree(vbp);
+ up(&s_vpu_sem);
+ return -EFAULT;
+ }
+
+ vbp->vb.size = buf32.size;
+ vbp->vb.cached = buf32.cached;
+ vbp->vb.phys_addr =
+ (ulong)buf32.phys_addr;
+ vbp->vb.base =
+ (ulong)buf32.base;
+ vbp->vb.virt_addr =
+ (ulong)buf32.virt_addr;
+ ret = vpu_alloc_dma_buffer(&(vbp->vb));
+ if (ret == -1) {
+ ret = -ENOMEM;
+ kfree(vbp);
+ up(&s_vpu_sem);
+ break;
+ }
+
+ buf32.size = vbp->vb.size;
+ buf32.phys_addr =
+ (compat_ulong_t)vbp->vb.phys_addr;
+ buf32.base =
+ (compat_ulong_t)vbp->vb.base;
+ buf32.virt_addr =
+ (compat_ulong_t)vbp->vb.virt_addr;
+
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret) {
+ kfree(vbp);
+ ret = -EFAULT;
+ up(&s_vpu_sem);
+ break;
+ }
+
+ vbp->filp = filp;
+ spin_lock(&s_vpu_lock);
+ list_add(&vbp->list, &s_vbp_head);
+ spin_unlock(&s_vpu_lock);
+
+ up(&s_vpu_sem);
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
+ }
+ break;
+ case VDI_IOCTL_FREE_PHYSICALMEMORY:
+ {
+ struct vpudrv_buffer_pool_t *vbp, *n;
+ struct vpudrv_buffer_t vb;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret == 0) {
+ ret = copy_from_user(&vb,
+ (struct vpudrv_buffer_t *)arg,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret) {
+ up(&s_vpu_sem);
+ return -EACCES;
+ }
+
+ if (vb.base)
+ vpu_free_dma_buffer(&vb);
+
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(vbp, n,
+ &s_vbp_head, list)
+ {
+ if (vbp->vb.base == vb.base) {
+ list_del(&vbp->list);
+ kfree(vbp);
+ break;
+ }
+ }
+ spin_unlock(&s_vpu_lock);
+ up(&s_vpu_sem);
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
+ }
+ break;
+ case VDI_IOCTL_FREE_PHYSICALMEMORY32:
+ {
+ struct vpudrv_buffer_pool_t *vbp, *n;
+ struct compat_vpudrv_buffer_t buf32;
+ struct vpudrv_buffer_t vb;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret == 0) {
+ ret = copy_from_user(&buf32,
+ (struct compat_vpudrv_buffer_t *)arg,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret) {
+ up(&s_vpu_sem);
+ return -EACCES;
+ }
+
+ vb.size = buf32.size;
+ vb.phys_addr =
+ (ulong)buf32.phys_addr;
+ vb.base =
+ (ulong)buf32.base;
+ vb.virt_addr =
+ (ulong)buf32.virt_addr;
+
+ if (vb.base)
+ vpu_free_dma_buffer(&vb);
+
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(vbp, n,
+ &s_vbp_head, list)
+ {
+ if ((compat_ulong_t)vbp->vb.base
+ == buf32.base) {
+ list_del(&vbp->list);
+ kfree(vbp);
+ break;
+ }
+ }
+ spin_unlock(&s_vpu_lock);
+ up(&s_vpu_sem);
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
+ }
+ break;
+ case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
+ {
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
+ if (s_video_memory.base != 0) {
+ ret = copy_to_user((void __user *)arg,
+ &s_video_memory,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else {
+ ret = -EFAULT;
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
+ }
+ break;
+ case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32:
+ {
+ struct compat_vpudrv_buffer_t buf32;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
+
+ buf32.size = s_video_memory.size;
+ buf32.phys_addr =
+ (compat_ulong_t)s_video_memory.phys_addr;
+ buf32.base =
+ (compat_ulong_t)s_video_memory.base;
+ buf32.virt_addr =
+ (compat_ulong_t)s_video_memory.virt_addr;
+ if (s_video_memory.base != 0) {
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else {
+ ret = -EFAULT;
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
+ }
+ break;
+ case VDI_IOCTL_WAIT_INTERRUPT:
+ {
+ struct vpudrv_intr_info_t info;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_WAIT_INTERRUPT\n");
+ ret = copy_from_user(&info,
+ (struct vpudrv_intr_info_t *)arg,
+ sizeof(struct vpudrv_intr_info_t));
+ if (ret != 0)
+ return -EFAULT;
+
+ ret = wait_event_interruptible_timeout(
+ s_interrupt_wait_q,
+ s_interrupt_flag != 0,
+ msecs_to_jiffies(info.timeout));
+ if (!ret) {
+ ret = -ETIME;
+ break;
+ }
+ if (dev->interrupt_reason & (1 << W4_INT_ENC_PIC)) {
+ u32 start, end, size, core = 0;
+ start = ReadVpuRegister(W4_BS_RD_PTR);
+ end = ReadVpuRegister(W4_BS_WR_PTR);
+ size = ReadVpuRegister(W4_RET_ENC_PIC_BYTE);
+ enc_pr(LOG_INFO, "flush output buffer, ");
+ enc_pr(LOG_INFO,
+ "start:0x%x, end:0x%x, size:0x%x\n",
+ start, end, size);
+ if (end - start > size && end > start)
+ size = end - start;
+ if (size > 0)
+ cache_flush(start, size);
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ enc_pr(LOG_INFO,
+ "s_interrupt_flag(%d), reason(0x%08lx)\n",
+ s_interrupt_flag, dev->interrupt_reason);
+
+ info.intr_reason = dev->interrupt_reason;
+ s_interrupt_flag = 0;
+ dev->interrupt_reason = 0;
+ ret = copy_to_user((void __user *)arg,
+ &info, sizeof(struct vpudrv_intr_info_t));
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_WAIT_INTERRUPT\n");
+ if (ret != 0)
+ return -EFAULT;
+ }
+ break;
+ case VDI_IOCTL_SET_CLOCK_GATE:
+ {
+ u32 clkgate;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_SET_CLOCK_GATE\n");
+ if (get_user(clkgate, (u32 __user *) arg))
+ return -EFAULT;
+#ifdef VPU_SUPPORT_CLOCK_CONTROL
+ vpu_clk_config(clkgate);
+#endif
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_SET_CLOCK_GATE\n");
+ }
+ break;
+ case VDI_IOCTL_GET_INSTANCE_POOL:
+ {
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_INSTANCE_POOL\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret != 0)
+ break;
+
+ if (s_instance_pool.base != 0) {
+ ret = copy_to_user((void __user *)arg,
+ &s_instance_pool,
+ sizeof(struct vpudrv_buffer_t));
+ ret = (ret != 0) ? -EFAULT : 0;
+ } else {
+ ret = copy_from_user(&s_instance_pool,
+ (struct vpudrv_buffer_t *)arg,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret == 0) {
+ s_instance_pool.size =
+ PAGE_ALIGN(
+ s_instance_pool.size);
+ s_instance_pool.base =
+ (ulong)vmalloc(
+ s_instance_pool.size);
+ s_instance_pool.phys_addr =
+ s_instance_pool.base;
+ if (s_instance_pool.base == 0) {
+ ret = -EFAULT;
+ up(&s_vpu_sem);
+ break;
+ }
+ /*clearing memory*/
+ memset((void *)s_instance_pool.base,
+ 0, s_instance_pool.size);
+ ret = copy_to_user((void __user *)arg,
+ &s_instance_pool,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else
+ ret = -EFAULT;
+ }
+ up(&s_vpu_sem);
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_INSTANCE_POOL\n");
+ }
+ break;
+ case VDI_IOCTL_GET_INSTANCE_POOL32:
+ {
+ struct compat_vpudrv_buffer_t buf32;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_INSTANCE_POOL32\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret != 0)
+ break;
+ if (s_instance_pool.base != 0) {
+ buf32.size = s_instance_pool.size;
+ buf32.phys_addr =
+ (compat_ulong_t)
+ s_instance_pool.phys_addr;
+ buf32.base =
+ (compat_ulong_t)
+ s_instance_pool.base;
+ buf32.virt_addr =
+ (compat_ulong_t)
+ s_instance_pool.virt_addr;
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(struct compat_vpudrv_buffer_t));
+ ret = (ret != 0) ? -EFAULT : 0;
+ } else {
+ ret = copy_from_user(&buf32,
+ (struct compat_vpudrv_buffer_t *)arg,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret == 0) {
+ s_instance_pool.size = buf32.size;
+ s_instance_pool.size =
+ PAGE_ALIGN(
+ s_instance_pool.size);
+ s_instance_pool.base =
+ (ulong)vmalloc(
+ s_instance_pool.size);
+ s_instance_pool.phys_addr =
+ s_instance_pool.base;
+ buf32.size =
+ s_instance_pool.size;
+ buf32.phys_addr =
+ (compat_ulong_t)
+ s_instance_pool.phys_addr;
+ buf32.base =
+ (compat_ulong_t)
+ s_instance_pool.base;
+ buf32.virt_addr =
+ (compat_ulong_t)
+ s_instance_pool.virt_addr;
+ if (s_instance_pool.base == 0) {
+ ret = -EFAULT;
+ up(&s_vpu_sem);
+ break;
+ }
+ /*clearing memory*/
+ memset((void *)s_instance_pool.base,
+ 0x0, s_instance_pool.size);
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(
+ struct compat_vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else
+ ret = -EFAULT;
+ }
+ up(&s_vpu_sem);
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_INSTANCE_POOL32\n");
+ }
+ break;
+ case VDI_IOCTL_GET_COMMON_MEMORY:
+ {
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_COMMON_MEMORY\n");
+ if (s_common_memory.base != 0) {
+ ret = copy_to_user((void __user *)arg,
+ &s_common_memory,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else {
+ ret = copy_from_user(&s_common_memory,
+ (struct vpudrv_buffer_t *)arg,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vpu_alloc_dma_buffer(
+ &s_common_memory) != -1) {
+ ret = copy_to_user((void __user *)arg,
+ &s_common_memory,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else
+ ret = -EFAULT;
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_COMMON_MEMORY\n");
+ }
+ break;
+ case VDI_IOCTL_GET_COMMON_MEMORY32:
+ {
+ struct compat_vpudrv_buffer_t buf32;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_COMMON_MEMORY32\n");
+
+ buf32.size = s_common_memory.size;
+ buf32.phys_addr =
+ (compat_ulong_t)
+ s_common_memory.phys_addr;
+ buf32.base =
+ (compat_ulong_t)
+ s_common_memory.base;
+ buf32.virt_addr =
+ (compat_ulong_t)
+ s_common_memory.virt_addr;
+ if (s_common_memory.base != 0) {
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else {
+ ret = copy_from_user(&buf32,
+ (struct compat_vpudrv_buffer_t *)arg,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ s_common_memory.size = buf32.size;
+ if (vpu_alloc_dma_buffer(
+ &s_common_memory) != -1) {
+ buf32.size =
+ s_common_memory.size;
+ buf32.phys_addr =
+ (compat_ulong_t)
+ s_common_memory.phys_addr;
+ buf32.base =
+ (compat_ulong_t)
+ s_common_memory.base;
+ buf32.virt_addr =
+ (compat_ulong_t)
+ s_common_memory.virt_addr;
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(
+ struct compat_vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ } else
+ ret = -EFAULT;
+ }
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_COMMON_MEMORY32\n");
+ }
+ break;
+ case VDI_IOCTL_OPEN_INSTANCE:
+ {
+ struct vpudrv_inst_info_t inst_info;
+ struct vpudrv_instanace_list_t *vil, *n;
+
+ vil = kzalloc(sizeof(*vil), GFP_KERNEL);
+ if (!vil)
+ return -ENOMEM;
+
+ if (copy_from_user(&inst_info,
+ (struct vpudrv_inst_info_t *)arg,
+ sizeof(struct vpudrv_inst_info_t)))
+ return -EFAULT;
+
+ vil->inst_idx = inst_info.inst_idx;
+ vil->core_idx = inst_info.core_idx;
+ vil->filp = filp;
+
+ spin_lock(&s_vpu_lock);
+ list_add(&vil->list, &s_inst_list_head);
+
+ /* counting the current open instance number */
+ inst_info.inst_open_count = 0;
+ list_for_each_entry_safe(vil, n,
+ &s_inst_list_head, list)
+ {
+ if (vil->core_idx == inst_info.core_idx)
+ inst_info.inst_open_count++;
+ }
+
+ /* flag just for that vpu is in opened or closed */
+ s_vpu_open_ref_count++;
+ spin_unlock(&s_vpu_lock);
+
+ if (copy_to_user((void __user *)arg,
+ &inst_info,
+ sizeof(struct vpudrv_inst_info_t))) {
+ kfree(vil);
+ return -EFAULT;
+ }
+
+ enc_pr(LOG_DEBUG,
+ "VDI_IOCTL_OPEN_INSTANCE ");
+ enc_pr(LOG_DEBUG,
+ "core_idx=%d, inst_idx=%d, ",
+ (u32)inst_info.core_idx,
+ (u32)inst_info.inst_idx);
+ enc_pr(LOG_DEBUG,
+ "s_vpu_open_ref_count=%d, inst_open_count=%d\n",
+ s_vpu_open_ref_count,
+ inst_info.inst_open_count);
+ }
+ break;
+ case VDI_IOCTL_CLOSE_INSTANCE:
+ {
+ struct vpudrv_inst_info_t inst_info;
+ struct vpudrv_instanace_list_t *vil, *n;
+
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_CLOSE_INSTANCE\n");
+ if (copy_from_user(&inst_info,
+ (struct vpudrv_inst_info_t *)arg,
+ sizeof(struct vpudrv_inst_info_t)))
+ return -EFAULT;
+
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(vil, n,
+ &s_inst_list_head, list)
+ {
+ if (vil->inst_idx == inst_info.inst_idx &&
+ vil->core_idx == inst_info.core_idx) {
+ list_del(&vil->list);
+ kfree(vil);
+ break;
+ }
+ }
+
+ /* counting the current open instance number */
+ inst_info.inst_open_count = 0;
+ list_for_each_entry_safe(vil, n,
+ &s_inst_list_head, list)
+ {
+ if (vil->core_idx == inst_info.core_idx)
+ inst_info.inst_open_count++;
+ }
+
+ /* flag just for that vpu is in opened or closed */
+ s_vpu_open_ref_count--;
+ spin_unlock(&s_vpu_lock);
+
+ if (copy_to_user((void __user *)arg,
+ &inst_info,
+ sizeof(struct vpudrv_inst_info_t)))
+ return -EFAULT;
+
+ enc_pr(LOG_DEBUG,
+ "VDI_IOCTL_CLOSE_INSTANCE ");
+ enc_pr(LOG_DEBUG,
+ "core_idx=%d, inst_idx=%d, ",
+ (u32)inst_info.core_idx,
+ (u32)inst_info.inst_idx);
+ enc_pr(LOG_DEBUG,
+ "s_vpu_open_ref_count=%d, inst_open_count=%d\n",
+ s_vpu_open_ref_count,
+ inst_info.inst_open_count);
+ }
+ break;
+ case VDI_IOCTL_GET_INSTANCE_NUM:
+ {
+ struct vpudrv_inst_info_t inst_info;
+ struct vpudrv_instanace_list_t *vil, *n;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_INSTANCE_NUM\n");
+
+ ret = copy_from_user(&inst_info,
+ (struct vpudrv_inst_info_t *)arg,
+ sizeof(struct vpudrv_inst_info_t));
+ if (ret != 0)
+ break;
+
+ inst_info.inst_open_count = 0;
+
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(vil, n,
+ &s_inst_list_head, list)
+ {
+ if (vil->core_idx == inst_info.core_idx)
+ inst_info.inst_open_count++;
+ }
+ spin_unlock(&s_vpu_lock);
+
+ ret = copy_to_user((void __user *)arg,
+ &inst_info,
+ sizeof(struct vpudrv_inst_info_t));
+
+ enc_pr(LOG_DEBUG,
+ "VDI_IOCTL_GET_INSTANCE_NUM ");
+ enc_pr(LOG_DEBUG,
+ "core_idx=%d, inst_idx=%d, open_count=%d\n",
+ (u32)inst_info.core_idx,
+ (u32)inst_info.inst_idx,
+ inst_info.inst_open_count);
+ }
+ break;
+ case VDI_IOCTL_RESET:
+ {
+ vpu_hw_reset();
+ }
+ break;
+ case VDI_IOCTL_GET_REGISTER_INFO:
+ {
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_REGISTER_INFO\n");
+ ret = copy_to_user((void __user *)arg,
+ &s_vpu_register,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_REGISTER_INFO ");
+ enc_pr(LOG_ALL,
+ "s_vpu_register.phys_addr=0x%lx, ",
+ s_vpu_register.phys_addr);
+ enc_pr(LOG_ALL,
+ "s_vpu_register.virt_addr=0x%lx, ",
+ s_vpu_register.virt_addr);
+ enc_pr(LOG_ALL,
+ "s_vpu_register.size=0x%x\n",
+ s_vpu_register.size);
+ }
+ break;
+ case VDI_IOCTL_GET_REGISTER_INFO32:
+ {
+ struct compat_vpudrv_buffer_t buf32;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_GET_REGISTER_INFO32\n");
+
+ buf32.size = s_vpu_register.size;
+ buf32.phys_addr =
+ (compat_ulong_t)
+ s_vpu_register.phys_addr;
+ buf32.base =
+ (compat_ulong_t)
+ s_vpu_register.base;
+ buf32.virt_addr =
+ (compat_ulong_t)
+ s_vpu_register.virt_addr;
+ ret = copy_to_user((void __user *)arg,
+ &buf32,
+ sizeof(
+ struct compat_vpudrv_buffer_t));
+ if (ret != 0)
+ ret = -EFAULT;
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_GET_REGISTER_INFO32 ");
+ enc_pr(LOG_ALL,
+ "s_vpu_register.phys_addr=0x%lx, ",
+ s_vpu_register.phys_addr);
+ enc_pr(LOG_ALL,
+ "s_vpu_register.virt_addr=0x%lx, ",
+ s_vpu_register.virt_addr);
+ enc_pr(LOG_ALL,
+ "s_vpu_register.size=0x%x\n",
+ s_vpu_register.size);
+ }
+ break;
+ case VDI_IOCTL_FLUSH_BUFFER32:
+ {
+ struct vpudrv_buffer_pool_t *pool, *n;
+ struct compat_vpudrv_buffer_t buf32;
+ struct vpudrv_buffer_t vb;
+ bool find = false;
+ u32 cached = 0;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_FLUSH_BUFFER32\n");
+
+ ret = copy_from_user(&buf32,
+ (struct compat_vpudrv_buffer_t *)arg,
+ sizeof(struct compat_vpudrv_buffer_t));
+ if (ret)
+ return -EFAULT;
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(pool, n,
+ &s_vbp_head, list)
+ {
+ if (pool->filp == filp) {
+ vb = pool->vb;
+ if (((compat_ulong_t)vb.phys_addr
+ == buf32.phys_addr)
+ && find == false){
+ cached = vb.cached;
+ find = true;
+ }
+ }
+ }
+ spin_unlock(&s_vpu_lock);
+ if (find && cached)
+ dma_flush(
+ (u32)buf32.phys_addr,
+ (u32)buf32.size);
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_FLUSH_BUFFER32\n");
+ }
+ break;
+ case VDI_IOCTL_FLUSH_BUFFER:
+ {
+ struct vpudrv_buffer_pool_t *pool, *n;
+ struct vpudrv_buffer_t vb, buf;
+ bool find = false;
+ u32 cached = 0;
+ enc_pr(LOG_ALL,
+ "[+]VDI_IOCTL_FLUSH_BUFFER\n");
+
+ ret = copy_from_user(&buf,
+ (struct vpudrv_buffer_t *)arg,
+ sizeof(struct vpudrv_buffer_t));
+ if (ret)
+ return -EFAULT;
+ spin_lock(&s_vpu_lock);
+ list_for_each_entry_safe(pool, n,
+ &s_vbp_head, list)
+ {
+ if (pool->filp == filp) {
+ vb = pool->vb;
+ if ((vb.phys_addr
+ == buf.phys_addr)
+ && find == false){
+ cached = vb.cached;
+ find = true;
+ }
+ }
+ }
+ spin_unlock(&s_vpu_lock);
+ if (find && cached)
+ dma_flush(
+ (u32)buf.phys_addr,
+ (u32)buf.size);
+ enc_pr(LOG_ALL,
+ "[-]VDI_IOCTL_FLUSH_BUFFER\n");
+ }
+ break;
+ default:
+ {
+ enc_pr(LOG_ERROR,
+ "No such IOCTL, cmd is %d\n", cmd);
+ }
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg)
+{
+ long ret;
+
+ arg = (ulong)compat_ptr(arg);
+ ret = vpu_ioctl(filp, cmd, arg);
+ return ret;
+}
+#endif
+
+static ssize_t vpu_write(struct file *filp,
+ const char *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len);
+
+ if (!buf) {
+ enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n");
+ return -EFAULT;
+ }
+
+ if (len == sizeof(struct vpu_bit_firmware_info_t)) {
+ struct vpu_bit_firmware_info_t *bit_firmware_info;
+ bit_firmware_info =
+ kmalloc(sizeof(struct vpu_bit_firmware_info_t),
+ GFP_KERNEL);
+ if (!bit_firmware_info) {
+ enc_pr(LOG_ERROR,
+ "vpu_write bit_firmware_info allocation error\n");
+ return -EFAULT;
+ }
+
+ if (copy_from_user(bit_firmware_info, buf, len)) {
+ enc_pr(LOG_ERROR,
+ "vpu_write copy_from_user error for bit_firmware_info\n");
+ return -EFAULT;
+ }
+
+ if (bit_firmware_info->size ==
+ sizeof(struct vpu_bit_firmware_info_t)) {
+ enc_pr(LOG_INFO,
+ "vpu_write set bit_firmware_info coreIdx=0x%x, ",
+ bit_firmware_info->core_idx);
+ enc_pr(LOG_INFO,
+ "reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n",
+ bit_firmware_info->reg_base_offset,
+ bit_firmware_info->size,
+ bit_firmware_info->bit_code[0]);
+
+ if (bit_firmware_info->core_idx
+ > MAX_NUM_VPU_CORE) {
+ enc_pr(LOG_ERROR,
+ "vpu_write coreIdx[%d] is ",
+ bit_firmware_info->core_idx);
+ enc_pr(LOG_ERROR,
+ "exceeded than MAX_NUM_VPU_CORE[%d]\n",
+ MAX_NUM_VPU_CORE);
+ return -ENODEV;
+ }
+
+ memcpy((void *)&s_bit_firmware_info
+ [bit_firmware_info->core_idx],
+ bit_firmware_info,
+ sizeof(struct vpu_bit_firmware_info_t));
+ kfree(bit_firmware_info);
+ return len;
+ }
+ kfree(bit_firmware_info);
+ }
+ return -1;
+}
+
+static s32 vpu_release(struct inode *inode, struct file *filp)
+{
+ s32 ret = 0;
+ ulong flags;
+ enc_pr(LOG_DEBUG, "vpu_release\n");
+ ret = down_interruptible(&s_vpu_sem);
+ if (ret == 0) {
+ vpu_free_buffers(filp);
+ vpu_free_instances(filp);
+ s_vpu_drv_context.open_count--;
+ if (s_vpu_drv_context.open_count == 0) {
+ if (s_instance_pool.base) {
+ enc_pr(LOG_DEBUG, "free instance pool\n");
+ vfree((const void *)s_instance_pool.base);
+ s_instance_pool.base = 0;
+ }
+ if (s_common_memory.base) {
+ enc_pr(LOG_DEBUG, "free common memory\n");
+ vpu_free_dma_buffer(&s_common_memory);
+ s_common_memory.base = 0;
+ }
+
+ if (s_video_memory.base && !use_reserve) {
+ codec_mm_free_for_dma(
+ VPU_DEV_NAME,
+ (u32)s_video_memory.phys_addr);
+ vmem_exit(&s_vmem);
+ memset(&s_video_memory,
+ 0, sizeof(struct vpudrv_buffer_t));
+ memset(&s_vmem,
+ 0, sizeof(struct video_mm_t));
+ }
+ if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == true)) {
+ free_irq(s_vpu_irq, &s_vpu_drv_context);
+ s_vpu_irq_requested = false;
+ }
+ spin_lock_irqsave(&s_vpu_lock, flags);
+ WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
+ READ_AOREG(AO_RTI_GEN_PWR_ISO0) | (0x3<<12));
+ udelay(10);
+
+ WRITE_VREG(DOS_MEM_PD_WAVE420L, 0xffffffff);
+#ifndef VPU_SUPPORT_CLOCK_CONTROL
+ vpu_clk_config(0);
+#endif
+ WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
+ READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | (0x3<<24));
+ udelay(10);
+ spin_unlock_irqrestore(&s_vpu_lock, flags);
+ amports_switch_gate("vdec", 0);
+ }
+ }
+ up(&s_vpu_sem);
+ return 0;
+}
+
+static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode)
+{
+ struct vpu_drv_context_t *dev =
+ (struct vpu_drv_context_t *)filp->private_data;
+ return fasync_helper(fd, filp, mode, &dev->async_queue);
+}
+
+static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
+{
+ ulong pfn;
+ vm->vm_flags |= VM_IO | VM_RESERVED;
+ vm->vm_page_prot =
+ pgprot_noncached(vm->vm_page_prot);
+ pfn = s_vpu_register.phys_addr >> PAGE_SHIFT;
+ return remap_pfn_range(vm, vm->vm_start, pfn,
+ vm->vm_end - vm->vm_start,
+ vm->vm_page_prot) ? -EAGAIN : 0;
+}
+
+static s32 vpu_map_to_physical_memory(
+ struct file *fp, struct vm_area_struct *vm)
+{
+ vm->vm_flags |= VM_IO | VM_RESERVED;
+ if (vm->vm_pgoff ==
+ (s_common_memory.phys_addr >> PAGE_SHIFT)) {
+ vm->vm_page_prot =
+ pgprot_noncached(vm->vm_page_prot);
+ } else {
+ if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0)
+ vm->vm_page_prot =
+ pgprot_noncached(vm->vm_page_prot);
+ }
+ /* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */
+ return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
+ vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
+}
+
+static s32 vpu_map_to_instance_pool_memory(
+ struct file *fp, struct vm_area_struct *vm)
+{
+ s32 ret;
+ long length = vm->vm_end - vm->vm_start;
+ ulong start = vm->vm_start;
+ s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base;
+ ulong pfn;
+
+ vm->vm_flags |= VM_RESERVED;
+
+ /* loop over all pages, map it page individually */
+ while (length > 0) {
+ pfn = vmalloc_to_pfn(vmalloc_area_ptr);
+ ret = remap_pfn_range(vm, start, pfn,
+ PAGE_SIZE, PAGE_SHARED);
+ if (ret < 0)
+ return ret;
+ start += PAGE_SIZE;
+ vmalloc_area_ptr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ }
+ return 0;
+}
+
+/*
+ * @brief memory map interface for vpu file operation
+ * @return 0 on success or negative error code on error
+ */
+static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm)
+{
+ /* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */
+ if ((vm->vm_end - vm->vm_start == s_vpu_register.size + 1) &&
+ (vm->vm_pgoff == 0)) {
+ vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT);
+ return vpu_map_to_register(fp, vm);
+ }
+
+ if (vm->vm_pgoff == 0)
+ return vpu_map_to_instance_pool_memory(fp, vm);
+
+ return vpu_map_to_physical_memory(fp, vm);
+}
+
+static const struct file_operations vpu_fops = {
+ .owner = THIS_MODULE,
+ .open = vpu_open,
+ .release = vpu_release,
+ .write = vpu_write,
+ .unlocked_ioctl = vpu_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vpu_compat_ioctl,
+#endif
+ .fasync = vpu_fasync,
+ .mmap = vpu_mmap,
+};
+
+static ssize_t hevcenc_status_show(struct class *cla,
+ struct class_attribute *attr, char *buf)
+{
+ return snprintf(buf, 40, "hevcenc_status_show\n");
+}
+
+static struct class_attribute hevcenc_class_attrs[] = {
+ __ATTR(encode_status,
+ S_IRUGO | S_IWUSR,
+ hevcenc_status_show,
+ NULL),
+ __ATTR_NULL
+};
+
+static struct class hevcenc_class = {
+ .name = VPU_CLASS_NAME,
+ .class_attrs = hevcenc_class_attrs,
+};
+
+s32 init_HevcEnc_device(void)
+{
+ s32 r = 0;
+ r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops);
+ if (r <= 0) {
+ enc_pr(LOG_ERROR, "register hevcenc device error.\n");
+ return r;
+ }
+ s_vpu_major = r;
+
+ r = class_register(&hevcenc_class);
+ if (r < 0) {
+ enc_pr(LOG_ERROR, "error create hevcenc class.\n");
+ return r;
+ }
+
+ hevcenc_dev = device_create(&hevcenc_class, NULL,
+ MKDEV(s_vpu_major, 0), NULL,
+ VPU_DEV_NAME);
+
+ if (IS_ERR(hevcenc_dev)) {
+ enc_pr(LOG_ERROR, "create hevcenc device error.\n");
+ class_unregister(&hevcenc_class);
+ return -1;
+ }
+ return r;
+}
+
+s32 uninit_HevcEnc_device(void)
+{
+ if (hevcenc_dev)
+ device_destroy(&hevcenc_class, MKDEV(s_vpu_major, 0));
+
+ class_destroy(&hevcenc_class);
+
+ unregister_chrdev(s_vpu_major, VPU_DEV_NAME);
+ return 0;
+}
+
+static s32 hevc_mem_device_init(
+ struct reserved_mem *rmem, struct device *dev)
+{
+ s32 r;
+ if (!rmem) {
+ enc_pr(LOG_ERROR,
+ "Can not obtain I/O memory, will allocate hevc buffer!\n");
+ r = -EFAULT;
+ return r;
+ }
+
+ if ((!rmem->base) ||
+ (rmem->size < VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) {
+ enc_pr(LOG_ERROR,
+ "memory range error, 0x%lx - 0x%lx\n",
+ (ulong)rmem->base, (ulong)rmem->size);
+ r = -EFAULT;
+ return r;
+ }
+ r = 0;
+ s_video_memory.size = rmem->size;
+ s_video_memory.phys_addr = (ulong)rmem->base;
+ s_video_memory.base =
+ (ulong)phys_to_virt(s_video_memory.phys_addr);
+ if (!s_video_memory.base) {
+ enc_pr(LOG_ERROR, "fail to remap video memory ");
+ enc_pr(LOG_ERROR,
+ "physical phys_addr=0x%lx, base=0x%lx, size=0x%x\n",
+ (ulong)s_video_memory.phys_addr,
+ (ulong)s_video_memory.base,
+ (u32)s_video_memory.size);
+ s_video_memory.phys_addr = 0;
+ r = -EFAULT;
+ }
+ return r;
+}
+
+static s32 vpu_probe(struct platform_device *pdev)
+{
+ s32 err = 0, irq, reg_count, idx;
+ struct resource res;
+ struct device_node *np, *child;
+
+ enc_pr(LOG_DEBUG, "vpu_probe\n");
+
+ s_vpu_major = 0;
+ use_reserve = false;
+ s_vpu_irq = -1;
+ cma_pool_size = 0;
+ s_vpu_irq_requested = false;
+ s_vpu_open_ref_count = 0;
+ hevcenc_dev = NULL;
+ hevc_pdev = NULL;
+ memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
+ memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
+ memset(&s_vmem, 0, sizeof(struct video_mm_t));
+ memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info));
+ memset(&res, 0, sizeof(struct resource));
+
+ idx = of_reserved_mem_device_init(&pdev->dev);
+ if (idx != 0) {
+ enc_pr(LOG_DEBUG,
+ "HevcEnc reserved memory config fail.\n");
+ } else if (s_video_memory.phys_addr) {
+ use_reserve = true;
+ }
+
+ if (use_reserve == false) {
+#ifndef CONFIG_CMA
+ enc_pr(LOG_ERROR,
+ "HevcEnc reserved memory is invaild, probe fail!\n");
+ err = -EFAULT;
+ goto ERROR_PROVE_DEVICE;
+#else
+ cma_pool_size =
+ (codec_mm_get_total_size() >
+ (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) ?
+ (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE) :
+ codec_mm_get_total_size();
+ enc_pr(LOG_DEBUG,
+ "HevcEnc - cma memory pool size: %d MB\n",
+ (u32)cma_pool_size / SZ_1M);
+#endif
+ }
+
+ /* get interrupt resource */
+ irq = platform_get_irq_byname(pdev, "wave420l_irq");
+ if (irq < 0) {
+ enc_pr(LOG_ERROR, "get HevcEnc irq resource error\n");
+ err = -ENXIO;
+ goto ERROR_PROVE_DEVICE;
+ }
+ s_vpu_irq = irq;
+ enc_pr(LOG_DEBUG, "HevcEnc - wave420l_irq: %d\n", s_vpu_irq);
+#if 0
+ rstc = devm_reset_control_get(&pdev->dev, "HevcEnc");
+ if (IS_ERR(rstc)) {
+ enc_pr(LOG_ERROR,
+ "get HevcEnc rstc error: %lx\n", PTR_ERR(rstc));
+ rstc = NULL;
+ err = -ENOENT;
+ goto ERROR_PROVE_DEVICE;
+ }
+ reset_control_assert(rstc);
+ s_vpu_rstc = rstc;
+
+ clk = clk_get(&pdev->dev, "clk_HevcEnc");
+ if (IS_ERR(clk)) {
+ enc_pr(LOG_ERROR, "cannot get clock\n");
+ clk = NULL;
+ err = -ENOENT;
+ goto ERROR_PROVE_DEVICE;
+ }
+ s_vpu_clk = clk;
+#endif
+
+#ifdef VPU_SUPPORT_CLOCK_CONTROL
+#else
+ vpu_clk_config(1);
+#endif
+
+ np = pdev->dev.of_node;
+ reg_count = 0;
+ for_each_child_of_node(np, child) {
+ if (of_address_to_resource(child, 0, &res)
+ || (reg_count > 1)) {
+ enc_pr(LOG_ERROR,
+ "no reg ranges or more reg ranges %d\n",
+ reg_count);
+ err = -ENXIO;
+ goto ERROR_PROVE_DEVICE;
+ }
+ /* if platform driver is implemented */
+ if (res.start != 0) {
+ s_vpu_register.phys_addr = res.start;
+ s_vpu_register.virt_addr =
+ (ulong)ioremap_nocache(
+ res.start, resource_size(&res));
+ s_vpu_register.size = res.end - res.start;
+ enc_pr(LOG_DEBUG,
+ "vpu base address get from platform driver ");
+ enc_pr(LOG_DEBUG,
+ "physical base addr=0x%lx, virtual base=0x%lx\n",
+ s_vpu_register.phys_addr,
+ s_vpu_register.virt_addr);
+ } else {
+ s_vpu_register.phys_addr = VPU_REG_BASE_ADDR;
+ s_vpu_register.virt_addr =
+ (ulong)ioremap_nocache(
+ s_vpu_register.phys_addr, VPU_REG_SIZE);
+ s_vpu_register.size = VPU_REG_SIZE;
+ enc_pr(LOG_DEBUG,
+ "vpu base address get from defined value ");
+ enc_pr(LOG_DEBUG,
+ "physical base addr=0x%lx, virtual base=0x%lx\n",
+ s_vpu_register.phys_addr,
+ s_vpu_register.virt_addr);
+ }
+ reg_count++;
+ }
+
+ /* get the major number of the character device */
+ if (init_HevcEnc_device()) {
+ err = -EBUSY;
+ enc_pr(LOG_ERROR, "could not allocate major number\n");
+ goto ERROR_PROVE_DEVICE;
+ }
+ enc_pr(LOG_INFO, "SUCCESS alloc_chrdev_region\n");
+
+ init_waitqueue_head(&s_interrupt_wait_q);
+ tasklet_init(&hevc_tasklet,
+ hevcenc_isr_tasklet,
+ (ulong)&s_vpu_drv_context);
+ s_common_memory.base = 0;
+ s_instance_pool.base = 0;
+
+ if (use_reserve == true) {
+ if (vmem_init(&s_vmem, s_video_memory.phys_addr,
+ s_video_memory.size) < 0) {
+ enc_pr(LOG_ERROR, "fail to init vmem system\n");
+ goto ERROR_PROVE_DEVICE;
+ }
+ enc_pr(LOG_DEBUG,
+ "success to probe vpu device with video memory ");
+ enc_pr(LOG_DEBUG,
+ "phys_addr=0x%lx, base = 0x%lx\n",
+ (ulong)s_video_memory.phys_addr,
+ (ulong)s_video_memory.base);
+ } else
+ enc_pr(LOG_DEBUG,
+ "success to probe vpu device with video memory from cma\n");
+ hevc_pdev = pdev;
+ return 0;
+
+ERROR_PROVE_DEVICE:
+ if (s_vpu_register.virt_addr) {
+ iounmap((void *)s_vpu_register.virt_addr);
+ memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
+ }
+
+ if (s_video_memory.base) {
+ vmem_exit(&s_vmem);
+ memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
+ memset(&s_vmem, 0, sizeof(struct video_mm_t));
+ }
+
+ vpu_clk_config(0);
+
+ if (s_vpu_irq_requested == true) {
+ if (s_vpu_irq >= 0) {
+ free_irq(s_vpu_irq, &s_vpu_drv_context);
+ s_vpu_irq = -1;
+ }
+ s_vpu_irq_requested = false;
+ }
+ uninit_HevcEnc_device();
+ return err;
+}
+
+static s32 vpu_remove(struct platform_device *pdev)
+{
+ enc_pr(LOG_DEBUG, "vpu_remove\n");
+
+ if (s_instance_pool.base) {
+ vfree((const void *)s_instance_pool.base);
+ s_instance_pool.base = 0;
+ }
+
+ if (s_common_memory.base) {
+ vpu_free_dma_buffer(&s_common_memory);
+ s_common_memory.base = 0;
+ }
+
+ if (s_video_memory.base) {
+ if (!use_reserve)
+ codec_mm_free_for_dma(
+ VPU_DEV_NAME,
+ (u32)s_video_memory.phys_addr);
+ vmem_exit(&s_vmem);
+ memset(&s_video_memory,
+ 0, sizeof(struct vpudrv_buffer_t));
+ memset(&s_vmem,
+ 0, sizeof(struct video_mm_t));
+ }
+
+ if (s_vpu_irq_requested == true) {
+ if (s_vpu_irq >= 0) {
+ free_irq(s_vpu_irq, &s_vpu_drv_context);
+ s_vpu_irq = -1;
+ }
+ s_vpu_irq_requested = false;
+ }
+
+ if (s_vpu_register.virt_addr) {
+ iounmap((void *)s_vpu_register.virt_addr);
+ memset(&s_vpu_register,
+ 0, sizeof(struct vpudrv_buffer_t));
+ }
+ hevc_pdev = NULL;
+ vpu_clk_config(0);
+
+ uninit_HevcEnc_device();
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void Wave4BitIssueCommand(u32 core, u32 cmd)
+{
+ WriteVpuRegister(W4_VPU_BUSY_STATUS, 1);
+ WriteVpuRegister(W4_CORE_INDEX, 0);
+ /* coreIdx = ReadVpuRegister(W4_VPU_BUSY_STATUS); */
+ /* coreIdx = 0; */
+ /* WriteVpuRegister(W4_INST_INDEX,
+ (instanceIndex & 0xffff) | (codecMode << 16)); */
+ WriteVpuRegister(W4_COMMAND, cmd);
+ WriteVpuRegister(W4_VPU_HOST_INT_REQ, 1);
+ return;
+}
+
+static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ u32 core;
+ ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
+ enc_pr(LOG_DEBUG, "vpu_suspend\n");
+
+ vpu_clk_config(1);
+
+ if (s_vpu_open_ref_count > 0) {
+ for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
+ if (s_bit_firmware_info[core].size == 0)
+ continue;
+ while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
+ if (time_after(jiffies, timeout)) {
+ enc_pr(LOG_ERROR,
+ "SLEEP_VPU BUSY timeout");
+ goto DONE_SUSPEND;
+ }
+ }
+ Wave4BitIssueCommand(core, W4_CMD_SLEEP_VPU);
+
+ while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
+ if (time_after(jiffies, timeout)) {
+ enc_pr(LOG_ERROR,
+ "SLEEP_VPU BUSY timeout");
+ goto DONE_SUSPEND;
+ }
+ }
+ if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
+ enc_pr(LOG_ERROR,
+ "SLEEP_VPU failed [0x%x]",
+ ReadVpuRegister(W4_RET_FAIL_REASON));
+ goto DONE_SUSPEND;
+ }
+ }
+ }
+
+ vpu_clk_config(0);
+ return 0;
+
+DONE_SUSPEND:
+ vpu_clk_config(0);
+ return -EAGAIN;
+}
+static s32 vpu_resume(struct platform_device *pdev)
+{
+ u32 i;
+ u32 core;
+ u32 val;
+ ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
+ ulong code_base;
+ u32 code_size;
+ u32 remap_size;
+ u32 regVal;
+ u32 hwOption = 0;
+
+ enc_pr(LOG_DEBUG, "vpu_resume\n");
+
+ vpu_clk_config(1);
+
+ for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
+ if (s_bit_firmware_info[core].size == 0)
+ continue;
+ code_base = s_common_memory.phys_addr;
+ /* ALIGN TO 4KB */
+ code_size = (s_common_memory.size & ~0xfff);
+ if (code_size < s_bit_firmware_info[core].size * 2)
+ goto DONE_WAKEUP;
+
+ /*---- LOAD BOOT CODE */
+ for (i = 0; i < 512; i += 2) {
+ val = s_bit_firmware_info[core].bit_code[i];
+ val |= (s_bit_firmware_info[core].bit_code[i+1] << 16);
+ WriteVpu(code_base+(i*2), val);
+ }
+
+ regVal = 0;
+ WriteVpuRegister(W4_PO_CONF, regVal);
+
+ /* Reset All blocks */
+ regVal = 0x7ffffff;
+ WriteVpuRegister(W4_VPU_RESET_REQ, regVal);
+
+ /* Waiting reset done */
+ while (ReadVpuRegister(W4_VPU_RESET_STATUS)) {
+ if (time_after(jiffies, timeout))
+ goto DONE_WAKEUP;
+ }
+
+ WriteVpuRegister(W4_VPU_RESET_REQ, 0);
+
+ /* remap page size */
+ remap_size = (code_size >> 12) & 0x1ff;
+ regVal = 0x80000000 | (W4_REMAP_CODE_INDEX<<12)
+ | (0 << 16) | (1<<11) | remap_size;
+ WriteVpuRegister(W4_VPU_REMAP_CTRL, regVal);
+ /* DO NOT CHANGE! */
+ WriteVpuRegister(W4_VPU_REMAP_VADDR, 0x00000000);
+ WriteVpuRegister(W4_VPU_REMAP_PADDR, code_base);
+ WriteVpuRegister(W4_ADDR_CODE_BASE, code_base);
+ WriteVpuRegister(W4_CODE_SIZE, code_size);
+ WriteVpuRegister(W4_CODE_PARAM, 0);
+ WriteVpuRegister(W4_INIT_VPU_TIME_OUT_CNT, timeout);
+ WriteVpuRegister(W4_HW_OPTION, hwOption);
+
+ /* Interrupt */
+ regVal = (1 << W4_INT_DEC_PIC_HDR);
+ regVal |= (1 << W4_INT_DEC_PIC);
+ regVal |= (1 << W4_INT_QUERY_DEC);
+ regVal |= (1 << W4_INT_SLEEP_VPU);
+ regVal |= (1 << W4_INT_BSBUF_EMPTY);
+ regVal = 0xfffffefe;
+ WriteVpuRegister(W4_VPU_VINT_ENABLE, regVal);
+ Wave4BitIssueCommand(core, W4_CMD_INIT_VPU);
+ WriteVpuRegister(W4_VPU_REMAP_CORE_START, 1);
+ while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
+ if (time_after(jiffies, timeout))
+ goto DONE_WAKEUP;
+ }
+
+ if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
+ enc_pr(LOG_ERROR,
+ "WAKEUP_VPU failed [0x%x]",
+ ReadVpuRegister(W4_RET_FAIL_REASON));
+ goto DONE_WAKEUP;
+ }
+ }
+
+ if (s_vpu_open_ref_count == 0)
+ vpu_clk_config(0);
+DONE_WAKEUP:
+ if (s_vpu_open_ref_count > 0)
+ vpu_clk_config(1);
+ return 0;
+}
+#else
+#define vpu_suspend NULL
+#define vpu_resume NULL
+#endif /* !CONFIG_PM */
+
+static const struct of_device_id cnm_hevcenc_dt_match[] = {
+ {
+ .compatible = "cnm, HevcEnc",
+ },
+ {},
+};
+
+static struct platform_driver vpu_driver = {
+ .driver = {
+ .name = VPU_PLATFORM_DEVICE_NAME,
+ .of_match_table = cnm_hevcenc_dt_match,
+ },
+ .probe = vpu_probe,
+ .remove = vpu_remove,
+ .suspend = vpu_suspend,
+ .resume = vpu_resume,
+};
+
+static s32 __init vpu_init(void)
+{
+ s32 res;
+ enc_pr(LOG_DEBUG, "vpu_init\n");
+ if (get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) {
+ enc_pr(LOG_DEBUG,
+ "The chip is not support hevc encoder\n");
+ return -1;
+ }
+ res = platform_driver_register(&vpu_driver);
+ enc_pr(LOG_INFO,
+ "end vpu_init result=0x%x\n", res);
+ return res;
+}
+
+static void __exit vpu_exit(void)
+{
+ enc_pr(LOG_DEBUG, "vpu_exit\n");
+ if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXM)
+ platform_driver_unregister(&vpu_driver);
+ return;
+}
+
+static const struct reserved_mem_ops rmem_hevc_ops = {
+ .device_init = hevc_mem_device_init,
+};
+
+static s32 __init hevc_mem_setup(struct reserved_mem *rmem)
+{
+ rmem->ops = &rmem_hevc_ops;
+ enc_pr(LOG_DEBUG, "HevcEnc reserved mem setup.\n");
+ return 0;
+}
+
+module_param(print_level, uint, 0664);
+MODULE_PARM_DESC(print_level, "\n print_level\n");
+
+module_param(clock_level, uint, 0664);
+MODULE_PARM_DESC(clock_level, "\n clock_level\n");
+
+MODULE_AUTHOR("Amlogic using C&M VPU, Inc.");
+MODULE_DESCRIPTION("VPU linux driver");
+MODULE_LICENSE("GPL");
+
+module_init(vpu_init);
+module_exit(vpu_exit);
+RESERVEDMEM_OF_DECLARE(cnm_hevc, "cnm, HevcEnc-memory", hevc_mem_setup);
diff --git a/drivers/frame_sink/encoder/h265/vpu.h b/drivers/frame_sink/encoder/h265/vpu.h
new file mode 100644
index 0000000..eaed0b7
--- a/dev/null
+++ b/drivers/frame_sink/encoder/h265/vpu.h
@@ -0,0 +1,288 @@
+/*
+ * vpu.h
+ *
+ * linux device driver for VPU.
+ *
+ * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+*/
+
+#ifndef __VPU_DRV_H__
+#define __VPU_DRV_H__
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+
+#define MAX_INST_HANDLE_SIZE (32*1024)
+#define MAX_NUM_INSTANCE 4
+#define MAX_NUM_VPU_CORE 1
+
+#define W4_CMD_INIT_VPU (0x0001)
+#define W4_CMD_SLEEP_VPU (0x0400)
+#define W4_CMD_WAKEUP_VPU (0x0800)
+
+/* GXM: 2000/10 = 200M */
+#define HevcEnc_L0() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (3 << 25) | (1 << 16) | (3 << 9) | (1 << 0))
+/* GXM: 2000/8 = 250M */
+#define HevcEnc_L1() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (1 << 25) | (1 << 16) | (1 << 9) | (1 << 0))
+/* GXM: 2000/7 = 285M */
+#define HevcEnc_L2() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (4 << 25) | (0 << 16) | (4 << 9) | (0 << 0))
+/*GXM: 2000/6 = 333M */
+#define HevcEnc_L3() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (2 << 25) | (1 << 16) | (2 << 9) | (1 << 0))
+/* GXM: 2000/5 = 400M */
+#define HevcEnc_L4() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (3 << 25) | (0 << 16) | (3 << 9) | (0 << 0))
+/* GXM: 2000/4 = 500M */
+#define HevcEnc_L5() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (1 << 25) | (0 << 16) | (1 << 9) | (0 << 0))
+/* GXM: 2000/3 = 667M */
+#define HevcEnc_L6() WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ (2 << 25) | (0 << 16) | (2 << 9) | (0 << 0))
+
+#define HevcEnc_clock_enable(level) \
+ do { \
+ WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \
+ & (~(1 << 8)) & (~(1 << 24))); \
+ if (level == 0) \
+ HevcEnc_L0(); \
+ else if (level == 1) \
+ HevcEnc_L1(); \
+ else if (level == 2) \
+ HevcEnc_L2(); \
+ else if (level == 3) \
+ HevcEnc_L3(); \
+ else if (level == 4) \
+ HevcEnc_L4(); \
+ else if (level == 5) \
+ HevcEnc_L5(); \
+ else if (level == 6) \
+ HevcEnc_L6(); \
+ WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \
+ | (1 << 8) | (1 << 24)); \
+ } while (0)
+
+#define HevcEnc_clock_disable() \
+ WRITE_HHI_REG(HHI_WAVE420L_CLK_CNTL, \
+ READ_HHI_REG(HHI_WAVE420L_CLK_CNTL) \
+ & (~(1 << 8)) & (~(1 << 24)));
+
+struct compat_vpudrv_buffer_t {
+ u32 size;
+ u32 cached;
+ compat_ulong_t phys_addr;
+ compat_ulong_t base; /* kernel logical address in use kernel */
+ compat_ulong_t virt_addr; /* virtual user space address */
+};
+
+struct vpudrv_buffer_t {
+ u32 size;
+ u32 cached;
+ ulong phys_addr;
+ ulong base; /* kernel logical address in use kernel */
+ ulong virt_addr; /* virtual user space address */
+};
+
+struct vpu_bit_firmware_info_t {
+ u32 size; /* size of this structure*/
+ u32 core_idx;
+ u32 reg_base_offset;
+ u16 bit_code[512];
+};
+
+struct vpudrv_inst_info_t {
+ u32 core_idx;
+ u32 inst_idx;
+ s32 inst_open_count; /* for output only*/
+};
+
+struct vpudrv_intr_info_t {
+ u32 timeout;
+ s32 intr_reason;
+};
+
+struct vpu_drv_context_t {
+ struct fasync_struct *async_queue;
+ ulong interrupt_reason;
+ u32 open_count; /*!<< device reference count. Not instance count */
+};
+
+/* To track the allocated memory buffer */
+struct vpudrv_buffer_pool_t {
+ struct list_head list;
+ struct vpudrv_buffer_t vb;
+ struct file *filp;
+};
+
+/* To track the instance index and buffer in instance pool */
+struct vpudrv_instanace_list_t {
+ struct list_head list;
+ ulong inst_idx;
+ ulong core_idx;
+ struct file *filp;
+};
+
+struct vpudrv_instance_pool_t {
+ u8 codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
+};
+
+#define VPUDRV_BUF_LEN struct vpudrv_buffer_t
+#define VPUDRV_BUF_LEN32 struct compat_vpudrv_buffer_t
+#define VPUDRV_INST_LEN struct vpudrv_inst_info_t
+
+#define VDI_MAGIC 'V'
+#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY \
+ _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_FREE_PHYSICALMEMORY \
+ _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_WAIT_INTERRUPT \
+ _IOW(VDI_MAGIC, 2, struct vpudrv_intr_info_t)
+
+#define VDI_IOCTL_SET_CLOCK_GATE \
+ _IOW(VDI_MAGIC, 3, u32)
+
+#define VDI_IOCTL_RESET \
+ _IOW(VDI_MAGIC, 4, u32)
+
+#define VDI_IOCTL_GET_INSTANCE_POOL \
+ _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_GET_COMMON_MEMORY \
+ _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO \
+ _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_OPEN_INSTANCE \
+ _IOW(VDI_MAGIC, 9, VPUDRV_INST_LEN)
+
+#define VDI_IOCTL_CLOSE_INSTANCE \
+ _IOW(VDI_MAGIC, 10, VPUDRV_INST_LEN)
+
+#define VDI_IOCTL_GET_INSTANCE_NUM \
+ _IOW(VDI_MAGIC, 11, VPUDRV_INST_LEN)
+
+#define VDI_IOCTL_GET_REGISTER_INFO \
+ _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_FLUSH_BUFFER \
+ _IOW(VDI_MAGIC, 13, VPUDRV_BUF_LEN)
+
+#define VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32 \
+ _IOW(VDI_MAGIC, 0, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_FREE_PHYSICALMEMORY32 \
+ _IOW(VDI_MAGIC, 1, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_GET_INSTANCE_POOL32 \
+ _IOW(VDI_MAGIC, 5, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_GET_COMMON_MEMORY32 \
+ _IOW(VDI_MAGIC, 6, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32 \
+ _IOW(VDI_MAGIC, 8, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_GET_REGISTER_INFO32 \
+ _IOW(VDI_MAGIC, 12, VPUDRV_BUF_LEN32)
+
+#define VDI_IOCTL_FLUSH_BUFFER32 \
+ _IOW(VDI_MAGIC, 13, VPUDRV_BUF_LEN32)
+
+enum {
+ W4_INT_INIT_VPU = 0,
+ W4_INT_DEC_PIC_HDR = 1,
+ W4_INT_SET_PARAM = 1,
+ W4_INT_ENC_INIT_SEQ = 1,
+ W4_INT_FINI_SEQ = 2,
+ W4_INT_DEC_PIC = 3,
+ W4_INT_ENC_PIC = 3,
+ W4_INT_SET_FRAMEBUF = 4,
+ W4_INT_FLUSH_DEC = 5,
+ W4_INT_ENC_SLICE_INT = 7,
+ W4_INT_GET_FW_VERSION = 8,
+ W4_INT_QUERY_DEC = 9,
+ W4_INT_SLEEP_VPU = 10,
+ W4_INT_WAKEUP_VPU = 11,
+ W4_INT_CHANGE_INT = 12,
+ W4_INT_CREATE_INSTANCE = 14,
+ W4_INT_BSBUF_EMPTY = 15,
+ /*!<< Bitstream buffer empty[dec]/full[enc] */
+};
+
+/* WAVE4 registers */
+#define VPU_REG_BASE_ADDR 0xc8810000
+#define VPU_REG_SIZE (0x4000 * MAX_NUM_VPU_CORE)
+
+#define W4_REG_BASE 0x0000
+#define W4_VPU_BUSY_STATUS (W4_REG_BASE + 0x0070)
+#define W4_VPU_INT_REASON_CLEAR (W4_REG_BASE + 0x0034)
+#define W4_VPU_VINT_CLEAR (W4_REG_BASE + 0x003C)
+#define W4_VPU_VPU_INT_STS (W4_REG_BASE + 0x0044)
+#define W4_VPU_INT_REASON (W4_REG_BASE + 0x004c)
+
+#define W4_RET_SUCCESS (W4_REG_BASE + 0x0110)
+#define W4_RET_FAIL_REASON (W4_REG_BASE + 0x0114)
+
+/* WAVE4 INIT, WAKEUP */
+#define W4_PO_CONF (W4_REG_BASE + 0x0000)
+#define W4_VCPU_CUR_PC (W4_REG_BASE + 0x0004)
+
+#define W4_VPU_VINT_ENABLE (W4_REG_BASE + 0x0048)
+
+#define W4_VPU_RESET_REQ (W4_REG_BASE + 0x0050)
+#define W4_VPU_RESET_STATUS (W4_REG_BASE + 0x0054)
+
+#define W4_VPU_REMAP_CTRL (W4_REG_BASE + 0x0060)
+#define W4_VPU_REMAP_VADDR (W4_REG_BASE + 0x0064)
+#define W4_VPU_REMAP_PADDR (W4_REG_BASE + 0x0068)
+#define W4_VPU_REMAP_CORE_START (W4_REG_BASE + 0x006C)
+#define W4_VPU_BUSY_STATUS (W4_REG_BASE + 0x0070)
+
+#define W4_HW_OPTION (W4_REG_BASE + 0x0124)
+#define W4_CODE_SIZE (W4_REG_BASE + 0x011C)
+/* Note: W4_INIT_CODE_BASE_ADDR should be aligned to 4KB */
+#define W4_ADDR_CODE_BASE (W4_REG_BASE + 0x0118)
+#define W4_CODE_PARAM (W4_REG_BASE + 0x0120)
+#define W4_INIT_VPU_TIME_OUT_CNT (W4_REG_BASE + 0x0134)
+
+/* WAVE4 Wave4BitIssueCommand */
+#define W4_CORE_INDEX (W4_REG_BASE + 0x0104)
+#define W4_INST_INDEX (W4_REG_BASE + 0x0108)
+#define W4_COMMAND (W4_REG_BASE + 0x0100)
+#define W4_VPU_HOST_INT_REQ (W4_REG_BASE + 0x0038)
+
+#define W4_BS_RD_PTR (W4_REG_BASE + 0x0130)
+#define W4_BS_WR_PTR (W4_REG_BASE + 0x0134)
+#define W4_RET_ENC_PIC_BYTE (W4_REG_BASE + 0x01C8)
+
+#define W4_REMAP_CODE_INDEX 0
+
+#define ReadVpuRegister(addr) \
+ readl((void __iomem *)(s_vpu_register.virt_addr \
+ + s_bit_firmware_info[core].reg_base_offset + addr))
+
+#define WriteVpuRegister(addr, val) \
+ writel((u32)val, (void __iomem *)(s_vpu_register.virt_addr \
+ + s_bit_firmware_info[core].reg_base_offset + addr))
+
+#define WriteVpu(addr, val) writel((u32)val, (void __iomem *)addr)
+#endif