summaryrefslogtreecommitdiff
path: root/drivers/frame_sink/encoder/h265/vpu.c (plain)
blob: 26c40fd1c428c1d72aa4641c1bcf66490c55e595
1/*
2 * vpu.c
3 *
4 * linux device driver for VPU.
5 *
6 * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18*/
19
20
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/module.h>
26#include <linux/dma-mapping.h>
27#include <linux/wait.h>
28#include <linux/list.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/uaccess.h>
32#include <linux/cdev.h>
33#include <linux/slab.h>
34#include <linux/sched.h>
35#include <linux/platform_device.h>
36#include <linux/of.h>
37#include <linux/of_fdt.h>
38#include <linux/reset.h>
39#include <linux/clk.h>
40#include <linux/compat.h>
41#include <linux/of_reserved_mem.h>
42#include <linux/of_address.h>
43#include <linux/amlogic/media/codec_mm/codec_mm.h>
44
45#include <linux/amlogic/media/utils/vdec_reg.h>
46#include "../../../common/media_clock/switch/amports_gate.h"
47
48#include "vpu.h"
49#include "vmm.h"
50
51/* definitions to be changed as customer configuration */
52/* if you want to have clock gating scheme frame by frame */
53/* #define VPU_SUPPORT_CLOCK_CONTROL */
54
55#define VPU_PLATFORM_DEVICE_NAME "HevcEnc"
56#define VPU_DEV_NAME "HevcEnc"
57#define VPU_CLASS_NAME "HevcEnc"
58
59#ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
60#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
61#endif
62
63#define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (64 * SZ_1M)
64
65#define LOG_ALL 0
66#define LOG_INFO 1
67#define LOG_DEBUG 2
68#define LOG_ERROR 3
69
70#define enc_pr(level, x...) \
71 do { \
72 if (level >= print_level) \
73 printk(x); \
74 } while (0)
75
76static s32 print_level = LOG_DEBUG;
77static s32 clock_level = 4;
78
79static struct video_mm_t s_vmem;
80static struct vpudrv_buffer_t s_video_memory = {0};
81static bool use_reserve;
82static ulong cma_pool_size;
83
84/* end customer definition */
85static struct vpudrv_buffer_t s_instance_pool = {0};
86static struct vpudrv_buffer_t s_common_memory = {0};
87static struct vpu_drv_context_t s_vpu_drv_context;
88static s32 s_vpu_major;
89static struct device *hevcenc_dev;
90
91static s32 s_vpu_open_ref_count;
92static s32 s_vpu_irq;
93static bool s_vpu_irq_requested;
94
95static struct vpudrv_buffer_t s_vpu_register = {0};
96
97static s32 s_interrupt_flag;
98static wait_queue_head_t s_interrupt_wait_q;
99
100static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock);
101static DEFINE_SEMAPHORE(s_vpu_sem);
102static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head);
103static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
104static struct tasklet_struct hevc_tasklet;
105static struct platform_device *hevc_pdev;
106
107static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE];
108
109static void dma_flush(u32 buf_start , u32 buf_size)
110{
111 if (hevc_pdev)
112 dma_sync_single_for_device(
113 &hevc_pdev->dev, buf_start,
114 buf_size, DMA_TO_DEVICE);
115}
116
117static void cache_flush(u32 buf_start , u32 buf_size)
118{
119 if (hevc_pdev)
120 dma_sync_single_for_cpu(
121 &hevc_pdev->dev, buf_start,
122 buf_size, DMA_FROM_DEVICE);
123}
124
125s32 vpu_hw_reset(void)
126{
127 enc_pr(LOG_DEBUG, "request vpu reset from application.\n");
128 return 0;
129}
130
131s32 vpu_clk_config(u32 enable)
132{
133 if (enable)
134 HevcEnc_clock_enable(clock_level);
135 else
136 HevcEnc_clock_disable();
137 return 0;
138}
139
140static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb)
141{
142 if (!vb)
143 return -1;
144
145 vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0);
146 if ((ulong)vb->phys_addr == (ulong)-1) {
147 enc_pr(LOG_ERROR,
148 "Physical memory allocation error size=%d\n", vb->size);
149 return -1;
150 }
151
152 vb->base = (ulong)(s_video_memory.base +
153 (vb->phys_addr - s_video_memory.phys_addr));
154 return 0;
155}
156
157static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb)
158{
159 if (!vb)
160 return;
161
162 if (vb->base)
163 vmem_free(&s_vmem, vb->phys_addr, 0);
164}
165
166static s32 vpu_free_instances(struct file *filp)
167{
168 struct vpudrv_instanace_list_t *vil, *n;
169 struct vpudrv_instance_pool_t *vip;
170 void *vip_base;
171
172 enc_pr(LOG_DEBUG, "vpu_free_instances\n");
173
174 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
175 {
176 if (vil->filp == filp) {
177 vip_base = (void *)s_instance_pool.base;
178 enc_pr(LOG_INFO,
179 "free_instances instIdx=%d, coreIdx=%d, vip_base=%p\n",
180 (s32)vil->inst_idx,
181 (s32)vil->core_idx,
182 vip_base);
183 vip = (struct vpudrv_instance_pool_t *)vip_base;
184 if (vip) {
185 /* only first 4 byte is key point
186 (inUse of CodecInst in vpuapi)
187 to free the corresponding instance. */
188 memset(&vip->codecInstPool[vil->inst_idx],
189 0x00, 4);
190 }
191 s_vpu_open_ref_count--;
192 list_del(&vil->list);
193 kfree(vil);
194 }
195 }
196 return 1;
197}
198
199static s32 vpu_free_buffers(struct file *filp)
200{
201 struct vpudrv_buffer_pool_t *pool, *n;
202 struct vpudrv_buffer_t vb;
203
204 enc_pr(LOG_DEBUG, "vpu_free_buffers\n");
205
206 list_for_each_entry_safe(pool, n, &s_vbp_head, list)
207 {
208 if (pool->filp == filp) {
209 vb = pool->vb;
210 if (vb.base) {
211 vpu_free_dma_buffer(&vb);
212 list_del(&pool->list);
213 kfree(pool);
214 }
215 }
216 }
217 return 0;
218}
219
220static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff)
221{
222 struct vpudrv_buffer_pool_t *pool, *n;
223 struct vpudrv_buffer_t vb;
224 bool find = false;
225 u32 cached = 0;
226
227 enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n");
228 spin_lock(&s_vpu_lock);
229 list_for_each_entry_safe(pool, n, &s_vbp_head, list)
230 {
231 if (pool->filp == filp) {
232 vb = pool->vb;
233 if (((vb.phys_addr >> PAGE_SHIFT) == vm_pgoff)
234 && find == false){
235 cached = vb.cached;
236 find = true;
237 }
238 }
239 }
240 spin_unlock(&s_vpu_lock);
241 enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached);
242 return cached;
243}
244
245static void hevcenc_isr_tasklet(ulong data)
246{
247 struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data;
248 enc_pr(LOG_INFO, "hevcenc_isr_tasklet interruput:0x%08lx\n",
249 dev->interrupt_reason);
250 if (dev->interrupt_reason) {
251 /* notify the interrupt to user space */
252 if (dev->async_queue) {
253 enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__);
254 kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
255 }
256 s_interrupt_flag = 1;
257 wake_up_interruptible(&s_interrupt_wait_q);
258 }
259 enc_pr(LOG_ALL, "[-]%s\n", __func__);
260}
261
262static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id)
263{
264 struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id;
265 /* this can be removed.
266 it also work in VPU_WaitInterrupt of API function */
267 u32 core;
268 ulong interrupt_reason = 0;
269 enc_pr(LOG_ALL, "[+]%s\n", __func__);
270
271 for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
272 if (s_bit_firmware_info[core].size == 0) {
273 /* it means that we didn't get an information
274 the current core from API layer.
275 No core activated.*/
276 enc_pr(LOG_ERROR,
277 "s_bit_firmware_info[core].size is zero\n");
278 continue;
279 }
280 if (ReadVpuRegister(W4_VPU_VPU_INT_STS)) {
281 interrupt_reason = ReadVpuRegister(W4_VPU_INT_REASON);
282 WriteVpuRegister(W4_VPU_INT_REASON_CLEAR,
283 interrupt_reason);
284 WriteVpuRegister(W4_VPU_VINT_CLEAR, 0x1);
285 dev->interrupt_reason |= interrupt_reason;
286 }
287 enc_pr(LOG_INFO,
288 "intr_reason: 0x%08lx\n", dev->interrupt_reason);
289 }
290 if (dev->interrupt_reason)
291 tasklet_schedule(&hevc_tasklet);
292 enc_pr(LOG_ALL, "[-]%s\n", __func__);
293 return IRQ_HANDLED;
294}
295
296static s32 vpu_open(struct inode *inode, struct file *filp)
297{
298 bool alloc_buffer = false;
299 s32 r = 0;
300 enc_pr(LOG_DEBUG, "[+] %s\n", __func__);
301 spin_lock(&s_vpu_lock);
302 s_vpu_drv_context.open_count++;
303 if (s_vpu_drv_context.open_count == 1) {
304 alloc_buffer = true;
305 } else {
306 r = -EBUSY;
307 s_vpu_drv_context.open_count--;
308 spin_unlock(&s_vpu_lock);
309 goto Err;
310 }
311 filp->private_data = (void *)(&s_vpu_drv_context);
312 spin_unlock(&s_vpu_lock);
313 if (alloc_buffer && !use_reserve) {
314#ifdef CONFIG_CMA
315 s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE;
316 s_video_memory.phys_addr =
317 (ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME,
318 VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE >> PAGE_SHIFT, 0,
319 CODEC_MM_FLAGS_CPU);
320 if (s_video_memory.phys_addr)
321 s_video_memory.base =
322 (ulong)phys_to_virt(s_video_memory.phys_addr);
323 else
324 s_video_memory.base = 0;
325 if (s_video_memory.base) {
326 enc_pr(LOG_DEBUG,
327 "allocating phys 0x%lx, virt addr 0x%lx, size %dk\n",
328 s_video_memory.phys_addr,
329 s_video_memory.base,
330 s_video_memory.size >> 10);
331 if (vmem_init(&s_vmem,
332 s_video_memory.phys_addr,
333 s_video_memory.size) < 0) {
334 enc_pr(LOG_ERROR, "fail to init vmem system\n");
335 r = -ENOMEM;
336 codec_mm_free_for_dma(
337 VPU_DEV_NAME,
338 (u32)s_video_memory.phys_addr);
339 vmem_exit(&s_vmem);
340 memset(&s_video_memory, 0,
341 sizeof(struct vpudrv_buffer_t));
342 memset(&s_vmem, 0,
343 sizeof(struct video_mm_t));
344 }
345 } else {
346 enc_pr(LOG_ERROR,
347 "CMA failed to allocate dma buffer for %s, phys: 0x%lx\n",
348 VPU_DEV_NAME, s_video_memory.phys_addr);
349 if (s_video_memory.phys_addr)
350 codec_mm_free_for_dma(
351 VPU_DEV_NAME,
352 (u32)s_video_memory.phys_addr);
353 s_video_memory.phys_addr = 0;
354 r = -ENOMEM;
355 }
356#else
357 enc_pr(LOG_ERROR,
358 "No CMA and reserved memory for HevcEnc!!!\n");
359 r = -ENOMEM;
360#endif
361 } else if (!s_video_memory.base) {
362 enc_pr(LOG_ERROR,
363 "HevcEnc memory is not malloced!!!\n");
364 r = -ENOMEM;
365 }
366 if (alloc_buffer) {
367 ulong flags;
368 u32 data32;
369 if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) {
370 s32 err;
371 err = request_irq(s_vpu_irq, vpu_irq_handler, 0,
372 "HevcEnc-irq", (void *)(&s_vpu_drv_context));
373 if (err) {
374 enc_pr(LOG_ERROR,
375 "fail to register interrupt handler\n");
376 return -EFAULT;
377 }
378 s_vpu_irq_requested = true;
379 }
380 amports_switch_gate("vdec", 1);
381 spin_lock_irqsave(&s_vpu_lock, flags);
382 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
383 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~(0x3<<24));
384 udelay(10);
385
386 data32 = 0x700;
387 data32 |= READ_VREG(DOS_SW_RESET4);
388 WRITE_VREG(DOS_SW_RESET4, data32);
389 data32 &= ~0x700;
390 WRITE_VREG(DOS_SW_RESET4, data32);
391
392 WRITE_MPEG_REG(RESET0_REGISTER, data32 & ~(1<<21));
393 WRITE_MPEG_REG(RESET0_REGISTER, data32 | (1<<21));
394 READ_MPEG_REG(RESET0_REGISTER);
395 READ_MPEG_REG(RESET0_REGISTER);
396 READ_MPEG_REG(RESET0_REGISTER);
397 READ_MPEG_REG(RESET0_REGISTER);
398#ifndef VPU_SUPPORT_CLOCK_CONTROL
399 vpu_clk_config(1);
400#endif
401 /* Enable wave420l_vpu_idle_rise_irq,
402 Disable wave420l_vpu_idle_fall_irq */
403 WRITE_VREG(DOS_WAVE420L_CNTL_STAT, 0x1);
404 WRITE_VREG(DOS_MEM_PD_WAVE420L, 0x0);
405
406 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
407 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~(0x3<<12));
408 udelay(10);
409
410 spin_unlock_irqrestore(&s_vpu_lock, flags);
411 }
412Err:
413 enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r);
414 return r;
415}
416
417static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg)
418{
419 s32 ret = 0;
420 struct vpu_drv_context_t *dev =
421 (struct vpu_drv_context_t *)filp->private_data;
422
423 switch (cmd) {
424 case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
425 {
426 struct vpudrv_buffer_pool_t *vbp;
427 enc_pr(LOG_ALL,
428 "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
429 ret = down_interruptible(&s_vpu_sem);
430 if (ret == 0) {
431 vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
432 if (!vbp) {
433 up(&s_vpu_sem);
434 return -ENOMEM;
435 }
436
437 ret = copy_from_user(&(vbp->vb),
438 (struct vpudrv_buffer_t *)arg,
439 sizeof(struct vpudrv_buffer_t));
440 if (ret) {
441 kfree(vbp);
442 up(&s_vpu_sem);
443 return -EFAULT;
444 }
445
446 ret = vpu_alloc_dma_buffer(&(vbp->vb));
447 if (ret == -1) {
448 ret = -ENOMEM;
449 kfree(vbp);
450 up(&s_vpu_sem);
451 break;
452 }
453 ret = copy_to_user((void __user *)arg,
454 &(vbp->vb),
455 sizeof(struct vpudrv_buffer_t));
456 if (ret) {
457 kfree(vbp);
458 ret = -EFAULT;
459 up(&s_vpu_sem);
460 break;
461 }
462
463 vbp->filp = filp;
464 spin_lock(&s_vpu_lock);
465 list_add(&vbp->list, &s_vbp_head);
466 spin_unlock(&s_vpu_lock);
467
468 up(&s_vpu_sem);
469 }
470 enc_pr(LOG_ALL,
471 "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
472 }
473 break;
474 case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32:
475 {
476 struct vpudrv_buffer_pool_t *vbp;
477 struct compat_vpudrv_buffer_t buf32;
478 enc_pr(LOG_ALL,
479 "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
480 ret = down_interruptible(&s_vpu_sem);
481 if (ret == 0) {
482 vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
483 if (!vbp) {
484 up(&s_vpu_sem);
485 return -ENOMEM;
486 }
487
488 ret = copy_from_user(&buf32,
489 (struct compat_vpudrv_buffer_t *)arg,
490 sizeof(struct compat_vpudrv_buffer_t));
491 if (ret) {
492 kfree(vbp);
493 up(&s_vpu_sem);
494 return -EFAULT;
495 }
496
497 vbp->vb.size = buf32.size;
498 vbp->vb.cached = buf32.cached;
499 vbp->vb.phys_addr =
500 (ulong)buf32.phys_addr;
501 vbp->vb.base =
502 (ulong)buf32.base;
503 vbp->vb.virt_addr =
504 (ulong)buf32.virt_addr;
505 ret = vpu_alloc_dma_buffer(&(vbp->vb));
506 if (ret == -1) {
507 ret = -ENOMEM;
508 kfree(vbp);
509 up(&s_vpu_sem);
510 break;
511 }
512
513 buf32.size = vbp->vb.size;
514 buf32.phys_addr =
515 (compat_ulong_t)vbp->vb.phys_addr;
516 buf32.base =
517 (compat_ulong_t)vbp->vb.base;
518 buf32.virt_addr =
519 (compat_ulong_t)vbp->vb.virt_addr;
520
521 ret = copy_to_user((void __user *)arg,
522 &buf32,
523 sizeof(struct compat_vpudrv_buffer_t));
524 if (ret) {
525 kfree(vbp);
526 ret = -EFAULT;
527 up(&s_vpu_sem);
528 break;
529 }
530
531 vbp->filp = filp;
532 spin_lock(&s_vpu_lock);
533 list_add(&vbp->list, &s_vbp_head);
534 spin_unlock(&s_vpu_lock);
535
536 up(&s_vpu_sem);
537 }
538 enc_pr(LOG_ALL,
539 "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n");
540 }
541 break;
542 case VDI_IOCTL_FREE_PHYSICALMEMORY:
543 {
544 struct vpudrv_buffer_pool_t *vbp, *n;
545 struct vpudrv_buffer_t vb;
546 enc_pr(LOG_ALL,
547 "[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
548 ret = down_interruptible(&s_vpu_sem);
549 if (ret == 0) {
550 ret = copy_from_user(&vb,
551 (struct vpudrv_buffer_t *)arg,
552 sizeof(struct vpudrv_buffer_t));
553 if (ret) {
554 up(&s_vpu_sem);
555 return -EACCES;
556 }
557
558 if (vb.base)
559 vpu_free_dma_buffer(&vb);
560
561 spin_lock(&s_vpu_lock);
562 list_for_each_entry_safe(vbp, n,
563 &s_vbp_head, list)
564 {
565 if (vbp->vb.base == vb.base) {
566 list_del(&vbp->list);
567 kfree(vbp);
568 break;
569 }
570 }
571 spin_unlock(&s_vpu_lock);
572 up(&s_vpu_sem);
573 }
574 enc_pr(LOG_ALL,
575 "[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
576 }
577 break;
578 case VDI_IOCTL_FREE_PHYSICALMEMORY32:
579 {
580 struct vpudrv_buffer_pool_t *vbp, *n;
581 struct compat_vpudrv_buffer_t buf32;
582 struct vpudrv_buffer_t vb;
583 enc_pr(LOG_ALL,
584 "[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
585 ret = down_interruptible(&s_vpu_sem);
586 if (ret == 0) {
587 ret = copy_from_user(&buf32,
588 (struct compat_vpudrv_buffer_t *)arg,
589 sizeof(struct compat_vpudrv_buffer_t));
590 if (ret) {
591 up(&s_vpu_sem);
592 return -EACCES;
593 }
594
595 vb.size = buf32.size;
596 vb.phys_addr =
597 (ulong)buf32.phys_addr;
598 vb.base =
599 (ulong)buf32.base;
600 vb.virt_addr =
601 (ulong)buf32.virt_addr;
602
603 if (vb.base)
604 vpu_free_dma_buffer(&vb);
605
606 spin_lock(&s_vpu_lock);
607 list_for_each_entry_safe(vbp, n,
608 &s_vbp_head, list)
609 {
610 if ((compat_ulong_t)vbp->vb.base
611 == buf32.base) {
612 list_del(&vbp->list);
613 kfree(vbp);
614 break;
615 }
616 }
617 spin_unlock(&s_vpu_lock);
618 up(&s_vpu_sem);
619 }
620 enc_pr(LOG_ALL,
621 "[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n");
622 }
623 break;
624 case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
625 {
626 enc_pr(LOG_ALL,
627 "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
628 if (s_video_memory.base != 0) {
629 ret = copy_to_user((void __user *)arg,
630 &s_video_memory,
631 sizeof(struct vpudrv_buffer_t));
632 if (ret != 0)
633 ret = -EFAULT;
634 } else {
635 ret = -EFAULT;
636 }
637 enc_pr(LOG_ALL,
638 "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
639 }
640 break;
641 case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32:
642 {
643 struct compat_vpudrv_buffer_t buf32;
644 enc_pr(LOG_ALL,
645 "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
646
647 buf32.size = s_video_memory.size;
648 buf32.phys_addr =
649 (compat_ulong_t)s_video_memory.phys_addr;
650 buf32.base =
651 (compat_ulong_t)s_video_memory.base;
652 buf32.virt_addr =
653 (compat_ulong_t)s_video_memory.virt_addr;
654 if (s_video_memory.base != 0) {
655 ret = copy_to_user((void __user *)arg,
656 &buf32,
657 sizeof(struct compat_vpudrv_buffer_t));
658 if (ret != 0)
659 ret = -EFAULT;
660 } else {
661 ret = -EFAULT;
662 }
663 enc_pr(LOG_ALL,
664 "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n");
665 }
666 break;
667 case VDI_IOCTL_WAIT_INTERRUPT:
668 {
669 struct vpudrv_intr_info_t info;
670 enc_pr(LOG_ALL,
671 "[+]VDI_IOCTL_WAIT_INTERRUPT\n");
672 ret = copy_from_user(&info,
673 (struct vpudrv_intr_info_t *)arg,
674 sizeof(struct vpudrv_intr_info_t));
675 if (ret != 0)
676 return -EFAULT;
677
678 ret = wait_event_interruptible_timeout(
679 s_interrupt_wait_q,
680 s_interrupt_flag != 0,
681 msecs_to_jiffies(info.timeout));
682 if (!ret) {
683 ret = -ETIME;
684 break;
685 }
686 if (dev->interrupt_reason & (1 << W4_INT_ENC_PIC)) {
687 u32 start, end, size, core = 0;
688 start = ReadVpuRegister(W4_BS_RD_PTR);
689 end = ReadVpuRegister(W4_BS_WR_PTR);
690 size = ReadVpuRegister(W4_RET_ENC_PIC_BYTE);
691 enc_pr(LOG_INFO, "flush output buffer, ");
692 enc_pr(LOG_INFO,
693 "start:0x%x, end:0x%x, size:0x%x\n",
694 start, end, size);
695 if (end - start > size && end > start)
696 size = end - start;
697 if (size > 0)
698 cache_flush(start, size);
699 }
700
701 if (signal_pending(current)) {
702 ret = -ERESTARTSYS;
703 break;
704 }
705
706 enc_pr(LOG_INFO,
707 "s_interrupt_flag(%d), reason(0x%08lx)\n",
708 s_interrupt_flag, dev->interrupt_reason);
709
710 info.intr_reason = dev->interrupt_reason;
711 s_interrupt_flag = 0;
712 dev->interrupt_reason = 0;
713 ret = copy_to_user((void __user *)arg,
714 &info, sizeof(struct vpudrv_intr_info_t));
715 enc_pr(LOG_ALL,
716 "[-]VDI_IOCTL_WAIT_INTERRUPT\n");
717 if (ret != 0)
718 return -EFAULT;
719 }
720 break;
721 case VDI_IOCTL_SET_CLOCK_GATE:
722 {
723 u32 clkgate;
724 enc_pr(LOG_ALL,
725 "[+]VDI_IOCTL_SET_CLOCK_GATE\n");
726 if (get_user(clkgate, (u32 __user *) arg))
727 return -EFAULT;
728#ifdef VPU_SUPPORT_CLOCK_CONTROL
729 vpu_clk_config(clkgate);
730#endif
731 enc_pr(LOG_ALL,
732 "[-]VDI_IOCTL_SET_CLOCK_GATE\n");
733 }
734 break;
735 case VDI_IOCTL_GET_INSTANCE_POOL:
736 {
737 enc_pr(LOG_ALL,
738 "[+]VDI_IOCTL_GET_INSTANCE_POOL\n");
739 ret = down_interruptible(&s_vpu_sem);
740 if (ret != 0)
741 break;
742
743 if (s_instance_pool.base != 0) {
744 ret = copy_to_user((void __user *)arg,
745 &s_instance_pool,
746 sizeof(struct vpudrv_buffer_t));
747 ret = (ret != 0) ? -EFAULT : 0;
748 } else {
749 ret = copy_from_user(&s_instance_pool,
750 (struct vpudrv_buffer_t *)arg,
751 sizeof(struct vpudrv_buffer_t));
752 if (ret == 0) {
753 s_instance_pool.size =
754 PAGE_ALIGN(
755 s_instance_pool.size);
756 s_instance_pool.base =
757 (ulong)vmalloc(
758 s_instance_pool.size);
759 s_instance_pool.phys_addr =
760 s_instance_pool.base;
761 if (s_instance_pool.base == 0) {
762 ret = -EFAULT;
763 up(&s_vpu_sem);
764 break;
765 }
766 /*clearing memory*/
767 memset((void *)s_instance_pool.base,
768 0, s_instance_pool.size);
769 ret = copy_to_user((void __user *)arg,
770 &s_instance_pool,
771 sizeof(struct vpudrv_buffer_t));
772 if (ret != 0)
773 ret = -EFAULT;
774 } else
775 ret = -EFAULT;
776 }
777 up(&s_vpu_sem);
778 enc_pr(LOG_ALL,
779 "[-]VDI_IOCTL_GET_INSTANCE_POOL\n");
780 }
781 break;
782 case VDI_IOCTL_GET_INSTANCE_POOL32:
783 {
784 struct compat_vpudrv_buffer_t buf32;
785 enc_pr(LOG_ALL,
786 "[+]VDI_IOCTL_GET_INSTANCE_POOL32\n");
787 ret = down_interruptible(&s_vpu_sem);
788 if (ret != 0)
789 break;
790 if (s_instance_pool.base != 0) {
791 buf32.size = s_instance_pool.size;
792 buf32.phys_addr =
793 (compat_ulong_t)
794 s_instance_pool.phys_addr;
795 buf32.base =
796 (compat_ulong_t)
797 s_instance_pool.base;
798 buf32.virt_addr =
799 (compat_ulong_t)
800 s_instance_pool.virt_addr;
801 ret = copy_to_user((void __user *)arg,
802 &buf32,
803 sizeof(struct compat_vpudrv_buffer_t));
804 ret = (ret != 0) ? -EFAULT : 0;
805 } else {
806 ret = copy_from_user(&buf32,
807 (struct compat_vpudrv_buffer_t *)arg,
808 sizeof(struct compat_vpudrv_buffer_t));
809 if (ret == 0) {
810 s_instance_pool.size = buf32.size;
811 s_instance_pool.size =
812 PAGE_ALIGN(
813 s_instance_pool.size);
814 s_instance_pool.base =
815 (ulong)vmalloc(
816 s_instance_pool.size);
817 s_instance_pool.phys_addr =
818 s_instance_pool.base;
819 buf32.size =
820 s_instance_pool.size;
821 buf32.phys_addr =
822 (compat_ulong_t)
823 s_instance_pool.phys_addr;
824 buf32.base =
825 (compat_ulong_t)
826 s_instance_pool.base;
827 buf32.virt_addr =
828 (compat_ulong_t)
829 s_instance_pool.virt_addr;
830 if (s_instance_pool.base == 0) {
831 ret = -EFAULT;
832 up(&s_vpu_sem);
833 break;
834 }
835 /*clearing memory*/
836 memset((void *)s_instance_pool.base,
837 0x0, s_instance_pool.size);
838 ret = copy_to_user((void __user *)arg,
839 &buf32,
840 sizeof(
841 struct compat_vpudrv_buffer_t));
842 if (ret != 0)
843 ret = -EFAULT;
844 } else
845 ret = -EFAULT;
846 }
847 up(&s_vpu_sem);
848 enc_pr(LOG_ALL,
849 "[-]VDI_IOCTL_GET_INSTANCE_POOL32\n");
850 }
851 break;
852 case VDI_IOCTL_GET_COMMON_MEMORY:
853 {
854 enc_pr(LOG_ALL,
855 "[+]VDI_IOCTL_GET_COMMON_MEMORY\n");
856 if (s_common_memory.base != 0) {
857 ret = copy_to_user((void __user *)arg,
858 &s_common_memory,
859 sizeof(struct vpudrv_buffer_t));
860 if (ret != 0)
861 ret = -EFAULT;
862 } else {
863 ret = copy_from_user(&s_common_memory,
864 (struct vpudrv_buffer_t *)arg,
865 sizeof(struct vpudrv_buffer_t));
866 if (ret != 0) {
867 ret = -EFAULT;
868 break;
869 }
870 if (vpu_alloc_dma_buffer(
871 &s_common_memory) != -1) {
872 ret = copy_to_user((void __user *)arg,
873 &s_common_memory,
874 sizeof(struct vpudrv_buffer_t));
875 if (ret != 0)
876 ret = -EFAULT;
877 } else
878 ret = -EFAULT;
879 }
880 enc_pr(LOG_ALL,
881 "[-]VDI_IOCTL_GET_COMMON_MEMORY\n");
882 }
883 break;
884 case VDI_IOCTL_GET_COMMON_MEMORY32:
885 {
886 struct compat_vpudrv_buffer_t buf32;
887 enc_pr(LOG_ALL,
888 "[+]VDI_IOCTL_GET_COMMON_MEMORY32\n");
889
890 buf32.size = s_common_memory.size;
891 buf32.phys_addr =
892 (compat_ulong_t)
893 s_common_memory.phys_addr;
894 buf32.base =
895 (compat_ulong_t)
896 s_common_memory.base;
897 buf32.virt_addr =
898 (compat_ulong_t)
899 s_common_memory.virt_addr;
900 if (s_common_memory.base != 0) {
901 ret = copy_to_user((void __user *)arg,
902 &buf32,
903 sizeof(struct compat_vpudrv_buffer_t));
904 if (ret != 0)
905 ret = -EFAULT;
906 } else {
907 ret = copy_from_user(&buf32,
908 (struct compat_vpudrv_buffer_t *)arg,
909 sizeof(struct compat_vpudrv_buffer_t));
910 if (ret != 0) {
911 ret = -EFAULT;
912 break;
913 }
914 s_common_memory.size = buf32.size;
915 if (vpu_alloc_dma_buffer(
916 &s_common_memory) != -1) {
917 buf32.size =
918 s_common_memory.size;
919 buf32.phys_addr =
920 (compat_ulong_t)
921 s_common_memory.phys_addr;
922 buf32.base =
923 (compat_ulong_t)
924 s_common_memory.base;
925 buf32.virt_addr =
926 (compat_ulong_t)
927 s_common_memory.virt_addr;
928 ret = copy_to_user((void __user *)arg,
929 &buf32,
930 sizeof(
931 struct compat_vpudrv_buffer_t));
932 if (ret != 0)
933 ret = -EFAULT;
934 } else
935 ret = -EFAULT;
936 }
937 enc_pr(LOG_ALL,
938 "[-]VDI_IOCTL_GET_COMMON_MEMORY32\n");
939 }
940 break;
941 case VDI_IOCTL_OPEN_INSTANCE:
942 {
943 struct vpudrv_inst_info_t inst_info;
944 struct vpudrv_instanace_list_t *vil, *n;
945
946 vil = kzalloc(sizeof(*vil), GFP_KERNEL);
947 if (!vil)
948 return -ENOMEM;
949
950 if (copy_from_user(&inst_info,
951 (struct vpudrv_inst_info_t *)arg,
952 sizeof(struct vpudrv_inst_info_t)))
953 return -EFAULT;
954
955 vil->inst_idx = inst_info.inst_idx;
956 vil->core_idx = inst_info.core_idx;
957 vil->filp = filp;
958
959 spin_lock(&s_vpu_lock);
960 list_add(&vil->list, &s_inst_list_head);
961
962 /* counting the current open instance number */
963 inst_info.inst_open_count = 0;
964 list_for_each_entry_safe(vil, n,
965 &s_inst_list_head, list)
966 {
967 if (vil->core_idx == inst_info.core_idx)
968 inst_info.inst_open_count++;
969 }
970
971 /* flag just for that vpu is in opened or closed */
972 s_vpu_open_ref_count++;
973 spin_unlock(&s_vpu_lock);
974
975 if (copy_to_user((void __user *)arg,
976 &inst_info,
977 sizeof(struct vpudrv_inst_info_t))) {
978 kfree(vil);
979 return -EFAULT;
980 }
981
982 enc_pr(LOG_DEBUG,
983 "VDI_IOCTL_OPEN_INSTANCE ");
984 enc_pr(LOG_DEBUG,
985 "core_idx=%d, inst_idx=%d, ",
986 (u32)inst_info.core_idx,
987 (u32)inst_info.inst_idx);
988 enc_pr(LOG_DEBUG,
989 "s_vpu_open_ref_count=%d, inst_open_count=%d\n",
990 s_vpu_open_ref_count,
991 inst_info.inst_open_count);
992 }
993 break;
994 case VDI_IOCTL_CLOSE_INSTANCE:
995 {
996 struct vpudrv_inst_info_t inst_info;
997 struct vpudrv_instanace_list_t *vil, *n;
998
999 enc_pr(LOG_ALL,
1000 "[+]VDI_IOCTL_CLOSE_INSTANCE\n");
1001 if (copy_from_user(&inst_info,
1002 (struct vpudrv_inst_info_t *)arg,
1003 sizeof(struct vpudrv_inst_info_t)))
1004 return -EFAULT;
1005
1006 spin_lock(&s_vpu_lock);
1007 list_for_each_entry_safe(vil, n,
1008 &s_inst_list_head, list)
1009 {
1010 if (vil->inst_idx == inst_info.inst_idx &&
1011 vil->core_idx == inst_info.core_idx) {
1012 list_del(&vil->list);
1013 kfree(vil);
1014 break;
1015 }
1016 }
1017
1018 /* counting the current open instance number */
1019 inst_info.inst_open_count = 0;
1020 list_for_each_entry_safe(vil, n,
1021 &s_inst_list_head, list)
1022 {
1023 if (vil->core_idx == inst_info.core_idx)
1024 inst_info.inst_open_count++;
1025 }
1026
1027 /* flag just for that vpu is in opened or closed */
1028 s_vpu_open_ref_count--;
1029 spin_unlock(&s_vpu_lock);
1030
1031 if (copy_to_user((void __user *)arg,
1032 &inst_info,
1033 sizeof(struct vpudrv_inst_info_t)))
1034 return -EFAULT;
1035
1036 enc_pr(LOG_DEBUG,
1037 "VDI_IOCTL_CLOSE_INSTANCE ");
1038 enc_pr(LOG_DEBUG,
1039 "core_idx=%d, inst_idx=%d, ",
1040 (u32)inst_info.core_idx,
1041 (u32)inst_info.inst_idx);
1042 enc_pr(LOG_DEBUG,
1043 "s_vpu_open_ref_count=%d, inst_open_count=%d\n",
1044 s_vpu_open_ref_count,
1045 inst_info.inst_open_count);
1046 }
1047 break;
1048 case VDI_IOCTL_GET_INSTANCE_NUM:
1049 {
1050 struct vpudrv_inst_info_t inst_info;
1051 struct vpudrv_instanace_list_t *vil, *n;
1052 enc_pr(LOG_ALL,
1053 "[+]VDI_IOCTL_GET_INSTANCE_NUM\n");
1054
1055 ret = copy_from_user(&inst_info,
1056 (struct vpudrv_inst_info_t *)arg,
1057 sizeof(struct vpudrv_inst_info_t));
1058 if (ret != 0)
1059 break;
1060
1061 inst_info.inst_open_count = 0;
1062
1063 spin_lock(&s_vpu_lock);
1064 list_for_each_entry_safe(vil, n,
1065 &s_inst_list_head, list)
1066 {
1067 if (vil->core_idx == inst_info.core_idx)
1068 inst_info.inst_open_count++;
1069 }
1070 spin_unlock(&s_vpu_lock);
1071
1072 ret = copy_to_user((void __user *)arg,
1073 &inst_info,
1074 sizeof(struct vpudrv_inst_info_t));
1075
1076 enc_pr(LOG_DEBUG,
1077 "VDI_IOCTL_GET_INSTANCE_NUM ");
1078 enc_pr(LOG_DEBUG,
1079 "core_idx=%d, inst_idx=%d, open_count=%d\n",
1080 (u32)inst_info.core_idx,
1081 (u32)inst_info.inst_idx,
1082 inst_info.inst_open_count);
1083 }
1084 break;
1085 case VDI_IOCTL_RESET:
1086 {
1087 vpu_hw_reset();
1088 }
1089 break;
1090 case VDI_IOCTL_GET_REGISTER_INFO:
1091 {
1092 enc_pr(LOG_ALL,
1093 "[+]VDI_IOCTL_GET_REGISTER_INFO\n");
1094 ret = copy_to_user((void __user *)arg,
1095 &s_vpu_register,
1096 sizeof(struct vpudrv_buffer_t));
1097 if (ret != 0)
1098 ret = -EFAULT;
1099 enc_pr(LOG_ALL,
1100 "[-]VDI_IOCTL_GET_REGISTER_INFO ");
1101 enc_pr(LOG_ALL,
1102 "s_vpu_register.phys_addr=0x%lx, ",
1103 s_vpu_register.phys_addr);
1104 enc_pr(LOG_ALL,
1105 "s_vpu_register.virt_addr=0x%lx, ",
1106 s_vpu_register.virt_addr);
1107 enc_pr(LOG_ALL,
1108 "s_vpu_register.size=0x%x\n",
1109 s_vpu_register.size);
1110 }
1111 break;
1112 case VDI_IOCTL_GET_REGISTER_INFO32:
1113 {
1114 struct compat_vpudrv_buffer_t buf32;
1115 enc_pr(LOG_ALL,
1116 "[+]VDI_IOCTL_GET_REGISTER_INFO32\n");
1117
1118 buf32.size = s_vpu_register.size;
1119 buf32.phys_addr =
1120 (compat_ulong_t)
1121 s_vpu_register.phys_addr;
1122 buf32.base =
1123 (compat_ulong_t)
1124 s_vpu_register.base;
1125 buf32.virt_addr =
1126 (compat_ulong_t)
1127 s_vpu_register.virt_addr;
1128 ret = copy_to_user((void __user *)arg,
1129 &buf32,
1130 sizeof(
1131 struct compat_vpudrv_buffer_t));
1132 if (ret != 0)
1133 ret = -EFAULT;
1134 enc_pr(LOG_ALL,
1135 "[-]VDI_IOCTL_GET_REGISTER_INFO32 ");
1136 enc_pr(LOG_ALL,
1137 "s_vpu_register.phys_addr=0x%lx, ",
1138 s_vpu_register.phys_addr);
1139 enc_pr(LOG_ALL,
1140 "s_vpu_register.virt_addr=0x%lx, ",
1141 s_vpu_register.virt_addr);
1142 enc_pr(LOG_ALL,
1143 "s_vpu_register.size=0x%x\n",
1144 s_vpu_register.size);
1145 }
1146 break;
1147 case VDI_IOCTL_FLUSH_BUFFER32:
1148 {
1149 struct vpudrv_buffer_pool_t *pool, *n;
1150 struct compat_vpudrv_buffer_t buf32;
1151 struct vpudrv_buffer_t vb;
1152 bool find = false;
1153 u32 cached = 0;
1154 enc_pr(LOG_ALL,
1155 "[+]VDI_IOCTL_FLUSH_BUFFER32\n");
1156
1157 ret = copy_from_user(&buf32,
1158 (struct compat_vpudrv_buffer_t *)arg,
1159 sizeof(struct compat_vpudrv_buffer_t));
1160 if (ret)
1161 return -EFAULT;
1162 spin_lock(&s_vpu_lock);
1163 list_for_each_entry_safe(pool, n,
1164 &s_vbp_head, list)
1165 {
1166 if (pool->filp == filp) {
1167 vb = pool->vb;
1168 if (((compat_ulong_t)vb.phys_addr
1169 == buf32.phys_addr)
1170 && find == false){
1171 cached = vb.cached;
1172 find = true;
1173 }
1174 }
1175 }
1176 spin_unlock(&s_vpu_lock);
1177 if (find && cached)
1178 dma_flush(
1179 (u32)buf32.phys_addr,
1180 (u32)buf32.size);
1181 enc_pr(LOG_ALL,
1182 "[-]VDI_IOCTL_FLUSH_BUFFER32\n");
1183 }
1184 break;
1185 case VDI_IOCTL_FLUSH_BUFFER:
1186 {
1187 struct vpudrv_buffer_pool_t *pool, *n;
1188 struct vpudrv_buffer_t vb, buf;
1189 bool find = false;
1190 u32 cached = 0;
1191 enc_pr(LOG_ALL,
1192 "[+]VDI_IOCTL_FLUSH_BUFFER\n");
1193
1194 ret = copy_from_user(&buf,
1195 (struct vpudrv_buffer_t *)arg,
1196 sizeof(struct vpudrv_buffer_t));
1197 if (ret)
1198 return -EFAULT;
1199 spin_lock(&s_vpu_lock);
1200 list_for_each_entry_safe(pool, n,
1201 &s_vbp_head, list)
1202 {
1203 if (pool->filp == filp) {
1204 vb = pool->vb;
1205 if ((vb.phys_addr
1206 == buf.phys_addr)
1207 && find == false){
1208 cached = vb.cached;
1209 find = true;
1210 }
1211 }
1212 }
1213 spin_unlock(&s_vpu_lock);
1214 if (find && cached)
1215 dma_flush(
1216 (u32)buf.phys_addr,
1217 (u32)buf.size);
1218 enc_pr(LOG_ALL,
1219 "[-]VDI_IOCTL_FLUSH_BUFFER\n");
1220 }
1221 break;
1222 default:
1223 {
1224 enc_pr(LOG_ERROR,
1225 "No such IOCTL, cmd is %d\n", cmd);
1226 }
1227 break;
1228 }
1229 return ret;
1230}
1231
1232#ifdef CONFIG_COMPAT
1233static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg)
1234{
1235 long ret;
1236
1237 arg = (ulong)compat_ptr(arg);
1238 ret = vpu_ioctl(filp, cmd, arg);
1239 return ret;
1240}
1241#endif
1242
1243static ssize_t vpu_write(struct file *filp,
1244 const char *buf,
1245 size_t len,
1246 loff_t *ppos)
1247{
1248 enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len);
1249
1250 if (!buf) {
1251 enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n");
1252 return -EFAULT;
1253 }
1254
1255 if (len == sizeof(struct vpu_bit_firmware_info_t)) {
1256 struct vpu_bit_firmware_info_t *bit_firmware_info;
1257 bit_firmware_info =
1258 kmalloc(sizeof(struct vpu_bit_firmware_info_t),
1259 GFP_KERNEL);
1260 if (!bit_firmware_info) {
1261 enc_pr(LOG_ERROR,
1262 "vpu_write bit_firmware_info allocation error\n");
1263 return -EFAULT;
1264 }
1265
1266 if (copy_from_user(bit_firmware_info, buf, len)) {
1267 enc_pr(LOG_ERROR,
1268 "vpu_write copy_from_user error for bit_firmware_info\n");
1269 return -EFAULT;
1270 }
1271
1272 if (bit_firmware_info->size ==
1273 sizeof(struct vpu_bit_firmware_info_t)) {
1274 enc_pr(LOG_INFO,
1275 "vpu_write set bit_firmware_info coreIdx=0x%x, ",
1276 bit_firmware_info->core_idx);
1277 enc_pr(LOG_INFO,
1278 "reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n",
1279 bit_firmware_info->reg_base_offset,
1280 bit_firmware_info->size,
1281 bit_firmware_info->bit_code[0]);
1282
1283 if (bit_firmware_info->core_idx
1284 > MAX_NUM_VPU_CORE) {
1285 enc_pr(LOG_ERROR,
1286 "vpu_write coreIdx[%d] is ",
1287 bit_firmware_info->core_idx);
1288 enc_pr(LOG_ERROR,
1289 "exceeded than MAX_NUM_VPU_CORE[%d]\n",
1290 MAX_NUM_VPU_CORE);
1291 return -ENODEV;
1292 }
1293
1294 memcpy((void *)&s_bit_firmware_info
1295 [bit_firmware_info->core_idx],
1296 bit_firmware_info,
1297 sizeof(struct vpu_bit_firmware_info_t));
1298 kfree(bit_firmware_info);
1299 return len;
1300 }
1301 kfree(bit_firmware_info);
1302 }
1303 return -1;
1304}
1305
1306static s32 vpu_release(struct inode *inode, struct file *filp)
1307{
1308 s32 ret = 0;
1309 ulong flags;
1310 enc_pr(LOG_DEBUG, "vpu_release\n");
1311 ret = down_interruptible(&s_vpu_sem);
1312 if (ret == 0) {
1313 vpu_free_buffers(filp);
1314 vpu_free_instances(filp);
1315 s_vpu_drv_context.open_count--;
1316 if (s_vpu_drv_context.open_count == 0) {
1317 if (s_instance_pool.base) {
1318 enc_pr(LOG_DEBUG, "free instance pool\n");
1319 vfree((const void *)s_instance_pool.base);
1320 s_instance_pool.base = 0;
1321 }
1322 if (s_common_memory.base) {
1323 enc_pr(LOG_DEBUG, "free common memory\n");
1324 vpu_free_dma_buffer(&s_common_memory);
1325 s_common_memory.base = 0;
1326 }
1327
1328 if (s_video_memory.base && !use_reserve) {
1329 codec_mm_free_for_dma(
1330 VPU_DEV_NAME,
1331 (u32)s_video_memory.phys_addr);
1332 vmem_exit(&s_vmem);
1333 memset(&s_video_memory,
1334 0, sizeof(struct vpudrv_buffer_t));
1335 memset(&s_vmem,
1336 0, sizeof(struct video_mm_t));
1337 }
1338 if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == true)) {
1339 free_irq(s_vpu_irq, &s_vpu_drv_context);
1340 s_vpu_irq_requested = false;
1341 }
1342 spin_lock_irqsave(&s_vpu_lock, flags);
1343 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
1344 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | (0x3<<12));
1345 udelay(10);
1346
1347 WRITE_VREG(DOS_MEM_PD_WAVE420L, 0xffffffff);
1348#ifndef VPU_SUPPORT_CLOCK_CONTROL
1349 vpu_clk_config(0);
1350#endif
1351 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
1352 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | (0x3<<24));
1353 udelay(10);
1354 spin_unlock_irqrestore(&s_vpu_lock, flags);
1355 amports_switch_gate("vdec", 0);
1356 }
1357 }
1358 up(&s_vpu_sem);
1359 return 0;
1360}
1361
1362static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode)
1363{
1364 struct vpu_drv_context_t *dev =
1365 (struct vpu_drv_context_t *)filp->private_data;
1366 return fasync_helper(fd, filp, mode, &dev->async_queue);
1367}
1368
1369static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
1370{
1371 ulong pfn;
1372 vm->vm_flags |= VM_IO | VM_RESERVED;
1373 vm->vm_page_prot =
1374 pgprot_noncached(vm->vm_page_prot);
1375 pfn = s_vpu_register.phys_addr >> PAGE_SHIFT;
1376 return remap_pfn_range(vm, vm->vm_start, pfn,
1377 vm->vm_end - vm->vm_start,
1378 vm->vm_page_prot) ? -EAGAIN : 0;
1379}
1380
1381static s32 vpu_map_to_physical_memory(
1382 struct file *fp, struct vm_area_struct *vm)
1383{
1384 vm->vm_flags |= VM_IO | VM_RESERVED;
1385 if (vm->vm_pgoff ==
1386 (s_common_memory.phys_addr >> PAGE_SHIFT)) {
1387 vm->vm_page_prot =
1388 pgprot_noncached(vm->vm_page_prot);
1389 } else {
1390 if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0)
1391 vm->vm_page_prot =
1392 pgprot_noncached(vm->vm_page_prot);
1393 }
1394 /* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */
1395 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
1396 vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
1397}
1398
1399static s32 vpu_map_to_instance_pool_memory(
1400 struct file *fp, struct vm_area_struct *vm)
1401{
1402 s32 ret;
1403 long length = vm->vm_end - vm->vm_start;
1404 ulong start = vm->vm_start;
1405 s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base;
1406 ulong pfn;
1407
1408 vm->vm_flags |= VM_RESERVED;
1409
1410 /* loop over all pages, map it page individually */
1411 while (length > 0) {
1412 pfn = vmalloc_to_pfn(vmalloc_area_ptr);
1413 ret = remap_pfn_range(vm, start, pfn,
1414 PAGE_SIZE, PAGE_SHARED);
1415 if (ret < 0)
1416 return ret;
1417 start += PAGE_SIZE;
1418 vmalloc_area_ptr += PAGE_SIZE;
1419 length -= PAGE_SIZE;
1420 }
1421 return 0;
1422}
1423
1424/*
1425 * @brief memory map interface for vpu file operation
1426 * @return 0 on success or negative error code on error
1427 */
1428static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm)
1429{
1430 /* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */
1431 if ((vm->vm_end - vm->vm_start == s_vpu_register.size + 1) &&
1432 (vm->vm_pgoff == 0)) {
1433 vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT);
1434 return vpu_map_to_register(fp, vm);
1435 }
1436
1437 if (vm->vm_pgoff == 0)
1438 return vpu_map_to_instance_pool_memory(fp, vm);
1439
1440 return vpu_map_to_physical_memory(fp, vm);
1441}
1442
1443static const struct file_operations vpu_fops = {
1444 .owner = THIS_MODULE,
1445 .open = vpu_open,
1446 .release = vpu_release,
1447 .write = vpu_write,
1448 .unlocked_ioctl = vpu_ioctl,
1449#ifdef CONFIG_COMPAT
1450 .compat_ioctl = vpu_compat_ioctl,
1451#endif
1452 .fasync = vpu_fasync,
1453 .mmap = vpu_mmap,
1454};
1455
1456static ssize_t hevcenc_status_show(struct class *cla,
1457 struct class_attribute *attr, char *buf)
1458{
1459 return snprintf(buf, 40, "hevcenc_status_show\n");
1460}
1461
1462static struct class_attribute hevcenc_class_attrs[] = {
1463 __ATTR(encode_status,
1464 S_IRUGO | S_IWUSR,
1465 hevcenc_status_show,
1466 NULL),
1467 __ATTR_NULL
1468};
1469
1470static struct class hevcenc_class = {
1471 .name = VPU_CLASS_NAME,
1472 .class_attrs = hevcenc_class_attrs,
1473};
1474
1475s32 init_HevcEnc_device(void)
1476{
1477 s32 r = 0;
1478 r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops);
1479 if (r <= 0) {
1480 enc_pr(LOG_ERROR, "register hevcenc device error.\n");
1481 return r;
1482 }
1483 s_vpu_major = r;
1484
1485 r = class_register(&hevcenc_class);
1486 if (r < 0) {
1487 enc_pr(LOG_ERROR, "error create hevcenc class.\n");
1488 return r;
1489 }
1490
1491 hevcenc_dev = device_create(&hevcenc_class, NULL,
1492 MKDEV(s_vpu_major, 0), NULL,
1493 VPU_DEV_NAME);
1494
1495 if (IS_ERR(hevcenc_dev)) {
1496 enc_pr(LOG_ERROR, "create hevcenc device error.\n");
1497 class_unregister(&hevcenc_class);
1498 return -1;
1499 }
1500 return r;
1501}
1502
1503s32 uninit_HevcEnc_device(void)
1504{
1505 if (hevcenc_dev)
1506 device_destroy(&hevcenc_class, MKDEV(s_vpu_major, 0));
1507
1508 class_destroy(&hevcenc_class);
1509
1510 unregister_chrdev(s_vpu_major, VPU_DEV_NAME);
1511 return 0;
1512}
1513
1514static s32 hevc_mem_device_init(
1515 struct reserved_mem *rmem, struct device *dev)
1516{
1517 s32 r;
1518 if (!rmem) {
1519 enc_pr(LOG_ERROR,
1520 "Can not obtain I/O memory, will allocate hevc buffer!\n");
1521 r = -EFAULT;
1522 return r;
1523 }
1524
1525 if ((!rmem->base) ||
1526 (rmem->size < VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) {
1527 enc_pr(LOG_ERROR,
1528 "memory range error, 0x%lx - 0x%lx\n",
1529 (ulong)rmem->base, (ulong)rmem->size);
1530 r = -EFAULT;
1531 return r;
1532 }
1533 r = 0;
1534 s_video_memory.size = rmem->size;
1535 s_video_memory.phys_addr = (ulong)rmem->base;
1536 s_video_memory.base =
1537 (ulong)phys_to_virt(s_video_memory.phys_addr);
1538 if (!s_video_memory.base) {
1539 enc_pr(LOG_ERROR, "fail to remap video memory ");
1540 enc_pr(LOG_ERROR,
1541 "physical phys_addr=0x%lx, base=0x%lx, size=0x%x\n",
1542 (ulong)s_video_memory.phys_addr,
1543 (ulong)s_video_memory.base,
1544 (u32)s_video_memory.size);
1545 s_video_memory.phys_addr = 0;
1546 r = -EFAULT;
1547 }
1548 return r;
1549}
1550
1551static s32 vpu_probe(struct platform_device *pdev)
1552{
1553 s32 err = 0, irq, reg_count, idx;
1554 struct resource res;
1555 struct device_node *np, *child;
1556
1557 enc_pr(LOG_DEBUG, "vpu_probe\n");
1558
1559 s_vpu_major = 0;
1560 use_reserve = false;
1561 s_vpu_irq = -1;
1562 cma_pool_size = 0;
1563 s_vpu_irq_requested = false;
1564 s_vpu_open_ref_count = 0;
1565 hevcenc_dev = NULL;
1566 hevc_pdev = NULL;
1567 memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
1568 memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
1569 memset(&s_vmem, 0, sizeof(struct video_mm_t));
1570 memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info));
1571 memset(&res, 0, sizeof(struct resource));
1572
1573 idx = of_reserved_mem_device_init(&pdev->dev);
1574 if (idx != 0) {
1575 enc_pr(LOG_DEBUG,
1576 "HevcEnc reserved memory config fail.\n");
1577 } else if (s_video_memory.phys_addr) {
1578 use_reserve = true;
1579 }
1580
1581 if (use_reserve == false) {
1582#ifndef CONFIG_CMA
1583 enc_pr(LOG_ERROR,
1584 "HevcEnc reserved memory is invaild, probe fail!\n");
1585 err = -EFAULT;
1586 goto ERROR_PROVE_DEVICE;
1587#else
1588 cma_pool_size =
1589 (codec_mm_get_total_size() >
1590 (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) ?
1591 (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE) :
1592 codec_mm_get_total_size();
1593 enc_pr(LOG_DEBUG,
1594 "HevcEnc - cma memory pool size: %d MB\n",
1595 (u32)cma_pool_size / SZ_1M);
1596#endif
1597 }
1598
1599 /* get interrupt resource */
1600 irq = platform_get_irq_byname(pdev, "wave420l_irq");
1601 if (irq < 0) {
1602 enc_pr(LOG_ERROR, "get HevcEnc irq resource error\n");
1603 err = -ENXIO;
1604 goto ERROR_PROVE_DEVICE;
1605 }
1606 s_vpu_irq = irq;
1607 enc_pr(LOG_DEBUG, "HevcEnc - wave420l_irq: %d\n", s_vpu_irq);
1608#if 0
1609 rstc = devm_reset_control_get(&pdev->dev, "HevcEnc");
1610 if (IS_ERR(rstc)) {
1611 enc_pr(LOG_ERROR,
1612 "get HevcEnc rstc error: %lx\n", PTR_ERR(rstc));
1613 rstc = NULL;
1614 err = -ENOENT;
1615 goto ERROR_PROVE_DEVICE;
1616 }
1617 reset_control_assert(rstc);
1618 s_vpu_rstc = rstc;
1619
1620 clk = clk_get(&pdev->dev, "clk_HevcEnc");
1621 if (IS_ERR(clk)) {
1622 enc_pr(LOG_ERROR, "cannot get clock\n");
1623 clk = NULL;
1624 err = -ENOENT;
1625 goto ERROR_PROVE_DEVICE;
1626 }
1627 s_vpu_clk = clk;
1628#endif
1629
1630#ifdef VPU_SUPPORT_CLOCK_CONTROL
1631#else
1632 vpu_clk_config(1);
1633#endif
1634
1635 np = pdev->dev.of_node;
1636 reg_count = 0;
1637 for_each_child_of_node(np, child) {
1638 if (of_address_to_resource(child, 0, &res)
1639 || (reg_count > 1)) {
1640 enc_pr(LOG_ERROR,
1641 "no reg ranges or more reg ranges %d\n",
1642 reg_count);
1643 err = -ENXIO;
1644 goto ERROR_PROVE_DEVICE;
1645 }
1646 /* if platform driver is implemented */
1647 if (res.start != 0) {
1648 s_vpu_register.phys_addr = res.start;
1649 s_vpu_register.virt_addr =
1650 (ulong)ioremap_nocache(
1651 res.start, resource_size(&res));
1652 s_vpu_register.size = res.end - res.start;
1653 enc_pr(LOG_DEBUG,
1654 "vpu base address get from platform driver ");
1655 enc_pr(LOG_DEBUG,
1656 "physical base addr=0x%lx, virtual base=0x%lx\n",
1657 s_vpu_register.phys_addr,
1658 s_vpu_register.virt_addr);
1659 } else {
1660 s_vpu_register.phys_addr = VPU_REG_BASE_ADDR;
1661 s_vpu_register.virt_addr =
1662 (ulong)ioremap_nocache(
1663 s_vpu_register.phys_addr, VPU_REG_SIZE);
1664 s_vpu_register.size = VPU_REG_SIZE;
1665 enc_pr(LOG_DEBUG,
1666 "vpu base address get from defined value ");
1667 enc_pr(LOG_DEBUG,
1668 "physical base addr=0x%lx, virtual base=0x%lx\n",
1669 s_vpu_register.phys_addr,
1670 s_vpu_register.virt_addr);
1671 }
1672 reg_count++;
1673 }
1674
1675 /* get the major number of the character device */
1676 if (init_HevcEnc_device()) {
1677 err = -EBUSY;
1678 enc_pr(LOG_ERROR, "could not allocate major number\n");
1679 goto ERROR_PROVE_DEVICE;
1680 }
1681 enc_pr(LOG_INFO, "SUCCESS alloc_chrdev_region\n");
1682
1683 init_waitqueue_head(&s_interrupt_wait_q);
1684 tasklet_init(&hevc_tasklet,
1685 hevcenc_isr_tasklet,
1686 (ulong)&s_vpu_drv_context);
1687 s_common_memory.base = 0;
1688 s_instance_pool.base = 0;
1689
1690 if (use_reserve == true) {
1691 if (vmem_init(&s_vmem, s_video_memory.phys_addr,
1692 s_video_memory.size) < 0) {
1693 enc_pr(LOG_ERROR, "fail to init vmem system\n");
1694 goto ERROR_PROVE_DEVICE;
1695 }
1696 enc_pr(LOG_DEBUG,
1697 "success to probe vpu device with video memory ");
1698 enc_pr(LOG_DEBUG,
1699 "phys_addr=0x%lx, base = 0x%lx\n",
1700 (ulong)s_video_memory.phys_addr,
1701 (ulong)s_video_memory.base);
1702 } else
1703 enc_pr(LOG_DEBUG,
1704 "success to probe vpu device with video memory from cma\n");
1705 hevc_pdev = pdev;
1706 return 0;
1707
1708ERROR_PROVE_DEVICE:
1709 if (s_vpu_register.virt_addr) {
1710 iounmap((void *)s_vpu_register.virt_addr);
1711 memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t));
1712 }
1713
1714 if (s_video_memory.base) {
1715 vmem_exit(&s_vmem);
1716 memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t));
1717 memset(&s_vmem, 0, sizeof(struct video_mm_t));
1718 }
1719
1720 vpu_clk_config(0);
1721
1722 if (s_vpu_irq_requested == true) {
1723 if (s_vpu_irq >= 0) {
1724 free_irq(s_vpu_irq, &s_vpu_drv_context);
1725 s_vpu_irq = -1;
1726 }
1727 s_vpu_irq_requested = false;
1728 }
1729 uninit_HevcEnc_device();
1730 return err;
1731}
1732
1733static s32 vpu_remove(struct platform_device *pdev)
1734{
1735 enc_pr(LOG_DEBUG, "vpu_remove\n");
1736
1737 if (s_instance_pool.base) {
1738 vfree((const void *)s_instance_pool.base);
1739 s_instance_pool.base = 0;
1740 }
1741
1742 if (s_common_memory.base) {
1743 vpu_free_dma_buffer(&s_common_memory);
1744 s_common_memory.base = 0;
1745 }
1746
1747 if (s_video_memory.base) {
1748 if (!use_reserve)
1749 codec_mm_free_for_dma(
1750 VPU_DEV_NAME,
1751 (u32)s_video_memory.phys_addr);
1752 vmem_exit(&s_vmem);
1753 memset(&s_video_memory,
1754 0, sizeof(struct vpudrv_buffer_t));
1755 memset(&s_vmem,
1756 0, sizeof(struct video_mm_t));
1757 }
1758
1759 if (s_vpu_irq_requested == true) {
1760 if (s_vpu_irq >= 0) {
1761 free_irq(s_vpu_irq, &s_vpu_drv_context);
1762 s_vpu_irq = -1;
1763 }
1764 s_vpu_irq_requested = false;
1765 }
1766
1767 if (s_vpu_register.virt_addr) {
1768 iounmap((void *)s_vpu_register.virt_addr);
1769 memset(&s_vpu_register,
1770 0, sizeof(struct vpudrv_buffer_t));
1771 }
1772 hevc_pdev = NULL;
1773 vpu_clk_config(0);
1774
1775 uninit_HevcEnc_device();
1776 return 0;
1777}
1778
1779#ifdef CONFIG_PM
1780static void Wave4BitIssueCommand(u32 core, u32 cmd)
1781{
1782 WriteVpuRegister(W4_VPU_BUSY_STATUS, 1);
1783 WriteVpuRegister(W4_CORE_INDEX, 0);
1784 /* coreIdx = ReadVpuRegister(W4_VPU_BUSY_STATUS); */
1785 /* coreIdx = 0; */
1786 /* WriteVpuRegister(W4_INST_INDEX,
1787 (instanceIndex & 0xffff) | (codecMode << 16)); */
1788 WriteVpuRegister(W4_COMMAND, cmd);
1789 WriteVpuRegister(W4_VPU_HOST_INT_REQ, 1);
1790 return;
1791}
1792
1793static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state)
1794{
1795 u32 core;
1796 ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
1797 enc_pr(LOG_DEBUG, "vpu_suspend\n");
1798
1799 vpu_clk_config(1);
1800
1801 if (s_vpu_open_ref_count > 0) {
1802 for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
1803 if (s_bit_firmware_info[core].size == 0)
1804 continue;
1805 while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
1806 if (time_after(jiffies, timeout)) {
1807 enc_pr(LOG_ERROR,
1808 "SLEEP_VPU BUSY timeout");
1809 goto DONE_SUSPEND;
1810 }
1811 }
1812 Wave4BitIssueCommand(core, W4_CMD_SLEEP_VPU);
1813
1814 while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
1815 if (time_after(jiffies, timeout)) {
1816 enc_pr(LOG_ERROR,
1817 "SLEEP_VPU BUSY timeout");
1818 goto DONE_SUSPEND;
1819 }
1820 }
1821 if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
1822 enc_pr(LOG_ERROR,
1823 "SLEEP_VPU failed [0x%x]",
1824 ReadVpuRegister(W4_RET_FAIL_REASON));
1825 goto DONE_SUSPEND;
1826 }
1827 }
1828 }
1829
1830 vpu_clk_config(0);
1831 return 0;
1832
1833DONE_SUSPEND:
1834 vpu_clk_config(0);
1835 return -EAGAIN;
1836}
1837static s32 vpu_resume(struct platform_device *pdev)
1838{
1839 u32 i;
1840 u32 core;
1841 u32 val;
1842 ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
1843 ulong code_base;
1844 u32 code_size;
1845 u32 remap_size;
1846 u32 regVal;
1847 u32 hwOption = 0;
1848
1849 enc_pr(LOG_DEBUG, "vpu_resume\n");
1850
1851 vpu_clk_config(1);
1852
1853 for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
1854 if (s_bit_firmware_info[core].size == 0)
1855 continue;
1856 code_base = s_common_memory.phys_addr;
1857 /* ALIGN TO 4KB */
1858 code_size = (s_common_memory.size & ~0xfff);
1859 if (code_size < s_bit_firmware_info[core].size * 2)
1860 goto DONE_WAKEUP;
1861
1862 /*---- LOAD BOOT CODE */
1863 for (i = 0; i < 512; i += 2) {
1864 val = s_bit_firmware_info[core].bit_code[i];
1865 val |= (s_bit_firmware_info[core].bit_code[i+1] << 16);
1866 WriteVpu(code_base+(i*2), val);
1867 }
1868
1869 regVal = 0;
1870 WriteVpuRegister(W4_PO_CONF, regVal);
1871
1872 /* Reset All blocks */
1873 regVal = 0x7ffffff;
1874 WriteVpuRegister(W4_VPU_RESET_REQ, regVal);
1875
1876 /* Waiting reset done */
1877 while (ReadVpuRegister(W4_VPU_RESET_STATUS)) {
1878 if (time_after(jiffies, timeout))
1879 goto DONE_WAKEUP;
1880 }
1881
1882 WriteVpuRegister(W4_VPU_RESET_REQ, 0);
1883
1884 /* remap page size */
1885 remap_size = (code_size >> 12) & 0x1ff;
1886 regVal = 0x80000000 | (W4_REMAP_CODE_INDEX<<12)
1887 | (0 << 16) | (1<<11) | remap_size;
1888 WriteVpuRegister(W4_VPU_REMAP_CTRL, regVal);
1889 /* DO NOT CHANGE! */
1890 WriteVpuRegister(W4_VPU_REMAP_VADDR, 0x00000000);
1891 WriteVpuRegister(W4_VPU_REMAP_PADDR, code_base);
1892 WriteVpuRegister(W4_ADDR_CODE_BASE, code_base);
1893 WriteVpuRegister(W4_CODE_SIZE, code_size);
1894 WriteVpuRegister(W4_CODE_PARAM, 0);
1895 WriteVpuRegister(W4_INIT_VPU_TIME_OUT_CNT, timeout);
1896 WriteVpuRegister(W4_HW_OPTION, hwOption);
1897
1898 /* Interrupt */
1899 regVal = (1 << W4_INT_DEC_PIC_HDR);
1900 regVal |= (1 << W4_INT_DEC_PIC);
1901 regVal |= (1 << W4_INT_QUERY_DEC);
1902 regVal |= (1 << W4_INT_SLEEP_VPU);
1903 regVal |= (1 << W4_INT_BSBUF_EMPTY);
1904 regVal = 0xfffffefe;
1905 WriteVpuRegister(W4_VPU_VINT_ENABLE, regVal);
1906 Wave4BitIssueCommand(core, W4_CMD_INIT_VPU);
1907 WriteVpuRegister(W4_VPU_REMAP_CORE_START, 1);
1908 while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) {
1909 if (time_after(jiffies, timeout))
1910 goto DONE_WAKEUP;
1911 }
1912
1913 if (ReadVpuRegister(W4_RET_SUCCESS) == 0) {
1914 enc_pr(LOG_ERROR,
1915 "WAKEUP_VPU failed [0x%x]",
1916 ReadVpuRegister(W4_RET_FAIL_REASON));
1917 goto DONE_WAKEUP;
1918 }
1919 }
1920
1921 if (s_vpu_open_ref_count == 0)
1922 vpu_clk_config(0);
1923DONE_WAKEUP:
1924 if (s_vpu_open_ref_count > 0)
1925 vpu_clk_config(1);
1926 return 0;
1927}
1928#else
1929#define vpu_suspend NULL
1930#define vpu_resume NULL
1931#endif /* !CONFIG_PM */
1932
1933static const struct of_device_id cnm_hevcenc_dt_match[] = {
1934 {
1935 .compatible = "cnm, HevcEnc",
1936 },
1937 {},
1938};
1939
1940static struct platform_driver vpu_driver = {
1941 .driver = {
1942 .name = VPU_PLATFORM_DEVICE_NAME,
1943 .of_match_table = cnm_hevcenc_dt_match,
1944 },
1945 .probe = vpu_probe,
1946 .remove = vpu_remove,
1947 .suspend = vpu_suspend,
1948 .resume = vpu_resume,
1949};
1950
1951static s32 __init vpu_init(void)
1952{
1953 s32 res;
1954 enc_pr(LOG_DEBUG, "vpu_init\n");
1955 if (get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) {
1956 enc_pr(LOG_DEBUG,
1957 "The chip is not support hevc encoder\n");
1958 return -1;
1959 }
1960 res = platform_driver_register(&vpu_driver);
1961 enc_pr(LOG_INFO,
1962 "end vpu_init result=0x%x\n", res);
1963 return res;
1964}
1965
1966static void __exit vpu_exit(void)
1967{
1968 enc_pr(LOG_DEBUG, "vpu_exit\n");
1969 if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXM)
1970 platform_driver_unregister(&vpu_driver);
1971 return;
1972}
1973
1974static const struct reserved_mem_ops rmem_hevc_ops = {
1975 .device_init = hevc_mem_device_init,
1976};
1977
1978static s32 __init hevc_mem_setup(struct reserved_mem *rmem)
1979{
1980 rmem->ops = &rmem_hevc_ops;
1981 enc_pr(LOG_DEBUG, "HevcEnc reserved mem setup.\n");
1982 return 0;
1983}
1984
1985module_param(print_level, uint, 0664);
1986MODULE_PARM_DESC(print_level, "\n print_level\n");
1987
1988module_param(clock_level, uint, 0664);
1989MODULE_PARM_DESC(clock_level, "\n clock_level\n");
1990
1991MODULE_AUTHOR("Amlogic using C&M VPU, Inc.");
1992MODULE_DESCRIPTION("VPU linux driver");
1993MODULE_LICENSE("GPL");
1994
1995module_init(vpu_init);
1996module_exit(vpu_exit);
1997RESERVEDMEM_OF_DECLARE(cnm_hevc, "cnm, HevcEnc-memory", hevc_mem_setup);
1998