blob: 746a9352191e143c371cd3cc3b05aa08ed0fa41b
1 | /* |
2 | * vpu.c |
3 | * |
4 | * linux device driver for VPU. |
5 | * |
6 | * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC. |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
16 | * more details. |
17 | * |
18 | */ |
19 | |
20 | |
21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/ioport.h> |
25 | #include <linux/module.h> |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/wait.h> |
28 | #include <linux/list.h> |
29 | #include <linux/clk.h> |
30 | #include <linux/delay.h> |
31 | #include <linux/uaccess.h> |
32 | #include <linux/cdev.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/sched.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/of.h> |
37 | #include <linux/of_fdt.h> |
38 | #include <linux/reset.h> |
39 | #include <linux/clk.h> |
40 | #include <linux/compat.h> |
41 | #include <linux/of_reserved_mem.h> |
42 | #include <linux/of_address.h> |
43 | #include <linux/amlogic/media/codec_mm/codec_mm.h> |
44 | |
45 | #include <linux/amlogic/media/utils/vdec_reg.h> |
46 | #include "../../../common/media_clock/switch/amports_gate.h" |
47 | |
48 | #include "vpu.h" |
49 | #include "vmm.h" |
50 | |
51 | /* definitions to be changed as customer configuration */ |
52 | /* if you want to have clock gating scheme frame by frame */ |
53 | /* #define VPU_SUPPORT_CLOCK_CONTROL */ |
54 | |
55 | #define VPU_PLATFORM_DEVICE_NAME "HevcEnc" |
56 | #define VPU_DEV_NAME "HevcEnc" |
57 | #define VPU_CLASS_NAME "HevcEnc" |
58 | |
59 | #ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/ |
60 | #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) |
61 | #endif |
62 | |
63 | #define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (64 * SZ_1M) |
64 | |
65 | #define LOG_ALL 0 |
66 | #define LOG_INFO 1 |
67 | #define LOG_DEBUG 2 |
68 | #define LOG_ERROR 3 |
69 | |
70 | #define enc_pr(level, x...) \ |
71 | do { \ |
72 | if (level >= print_level) \ |
73 | printk(x); \ |
74 | } while (0) |
75 | |
76 | static s32 print_level = LOG_DEBUG; |
77 | static s32 force_release = 0; |
78 | static s32 clock_level = 4; |
79 | |
80 | static struct video_mm_t s_vmem; |
81 | static struct vpudrv_buffer_t s_video_memory = {0}; |
82 | static bool use_reserve; |
83 | static ulong cma_pool_size; |
84 | |
85 | /* end customer definition */ |
86 | static struct vpudrv_buffer_t s_instance_pool = {0}; |
87 | static struct vpudrv_buffer_t s_common_memory = {0}; |
88 | static struct vpu_drv_context_t s_vpu_drv_context; |
89 | static s32 s_vpu_major; |
90 | static struct device *hevcenc_dev; |
91 | |
92 | static s32 s_vpu_open_ref_count; |
93 | static s32 s_vpu_irq; |
94 | static bool s_vpu_irq_requested; |
95 | |
96 | static struct vpudrv_buffer_t s_vpu_register = {0}; |
97 | |
98 | static s32 s_interrupt_flag; |
99 | static wait_queue_head_t s_interrupt_wait_q; |
100 | |
101 | static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock); |
102 | static DEFINE_SEMAPHORE(s_vpu_sem); |
103 | static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head); |
104 | static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head); |
105 | static struct tasklet_struct hevc_tasklet; |
106 | static struct platform_device *hevc_pdev; |
107 | |
108 | static struct vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE]; |
109 | |
110 | static struct vpu_dma_cfg dma_cfg[3]; |
111 | |
112 | static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t); |
113 | static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg); |
114 | |
115 | static void dma_flush(u32 buf_start, u32 buf_size) |
116 | { |
117 | if (hevc_pdev) |
118 | dma_sync_single_for_device( |
119 | &hevc_pdev->dev, buf_start, |
120 | buf_size, DMA_TO_DEVICE); |
121 | } |
122 | |
123 | static void cache_flush(u32 buf_start, u32 buf_size) |
124 | { |
125 | if (hevc_pdev) |
126 | dma_sync_single_for_cpu( |
127 | &hevc_pdev->dev, buf_start, |
128 | buf_size, DMA_FROM_DEVICE); |
129 | } |
130 | |
131 | s32 vpu_hw_reset(void) |
132 | { |
133 | enc_pr(LOG_DEBUG, "request vpu reset from application.\n"); |
134 | return 0; |
135 | } |
136 | |
137 | s32 vpu_clk_config(u32 enable) |
138 | { |
139 | if (enable) { |
140 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) |
141 | HevcEnc_MoreClock_enable(); |
142 | HevcEnc_clock_enable(clock_level); |
143 | } else { |
144 | HevcEnc_clock_disable(); |
145 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) |
146 | HevcEnc_MoreClock_disable(); |
147 | } |
148 | return 0; |
149 | } |
150 | |
151 | static s32 vpu_alloc_dma_buffer(struct vpudrv_buffer_t *vb) |
152 | { |
153 | if (!vb) |
154 | return -1; |
155 | |
156 | vb->phys_addr = (ulong)vmem_alloc(&s_vmem, vb->size, 0); |
157 | if ((ulong)vb->phys_addr == (ulong)-1) { |
158 | enc_pr(LOG_ERROR, |
159 | "Physical memory allocation error size=%d\n", vb->size); |
160 | return -1; |
161 | } |
162 | |
163 | enc_pr(LOG_INFO, "vpu_alloc_dma_buffer: vb->phys_addr 0x%lx \n",vb->phys_addr); |
164 | return 0; |
165 | } |
166 | |
167 | static void vpu_free_dma_buffer(struct vpudrv_buffer_t *vb) |
168 | { |
169 | if (!vb) |
170 | return; |
171 | enc_pr(LOG_INFO, "vpu_free_dma_buffer 0x%lx\n",vb->phys_addr); |
172 | |
173 | if (vb->phys_addr) |
174 | vmem_free(&s_vmem, vb->phys_addr, 0); |
175 | } |
176 | |
177 | static s32 vpu_free_instances(struct file *filp) |
178 | { |
179 | struct vpudrv_instanace_list_t *vil, *n; |
180 | struct vpudrv_instance_pool_t *vip; |
181 | void *vip_base; |
182 | |
183 | enc_pr(LOG_DEBUG, "vpu_free_instances\n"); |
184 | |
185 | list_for_each_entry_safe(vil, n, &s_inst_list_head, list) { |
186 | if (vil->filp == filp) { |
187 | vip_base = (void *)s_instance_pool.base; |
188 | enc_pr(LOG_INFO, |
189 | "free_instances instIdx=%d, coreIdx=%d, vip_base=%p\n", |
190 | (s32)vil->inst_idx, |
191 | (s32)vil->core_idx, |
192 | vip_base); |
193 | vip = (struct vpudrv_instance_pool_t *)vip_base; |
194 | if (vip) { |
195 | /* only first 4 byte is key point |
196 | * (inUse of CodecInst in vpuapi) |
197 | * to free the corresponding instance. |
198 | */ |
199 | memset(&vip->codecInstPool[vil->inst_idx], |
200 | 0x00, 4); |
201 | } |
202 | s_vpu_open_ref_count--; |
203 | list_del(&vil->list); |
204 | kfree(vil); |
205 | } |
206 | } |
207 | return 1; |
208 | } |
209 | |
210 | static s32 vpu_free_buffers(struct file *filp) |
211 | { |
212 | struct vpudrv_buffer_pool_t *pool, *n; |
213 | struct vpudrv_buffer_t vb; |
214 | |
215 | enc_pr(LOG_DEBUG, "vpu_free_buffers\n"); |
216 | |
217 | list_for_each_entry_safe(pool, n, &s_vbp_head, list) { |
218 | if (pool->filp == filp) { |
219 | vb = pool->vb; |
220 | if (vb.phys_addr) { |
221 | vpu_free_dma_buffer(&vb); |
222 | list_del(&pool->list); |
223 | kfree(pool); |
224 | } |
225 | } |
226 | } |
227 | return 0; |
228 | } |
229 | |
230 | static u32 vpu_is_buffer_cached(struct file *filp, ulong vm_pgoff) |
231 | { |
232 | struct vpudrv_buffer_pool_t *pool, *n; |
233 | struct vpudrv_buffer_t vb; |
234 | bool find = false; |
235 | u32 cached = 0; |
236 | |
237 | enc_pr(LOG_ALL, "[+]vpu_is_buffer_cached\n"); |
238 | spin_lock(&s_vpu_lock); |
239 | list_for_each_entry_safe(pool, n, &s_vbp_head, list) { |
240 | if (pool->filp == filp) { |
241 | vb = pool->vb; |
242 | if (((vb.phys_addr >> PAGE_SHIFT) == vm_pgoff) |
243 | && find == false){ |
244 | cached = vb.cached; |
245 | find = true; |
246 | } |
247 | } |
248 | } |
249 | spin_unlock(&s_vpu_lock); |
250 | enc_pr(LOG_ALL, "[-]vpu_is_buffer_cached, ret:%d\n", cached); |
251 | return cached; |
252 | } |
253 | |
254 | static void hevcenc_isr_tasklet(ulong data) |
255 | { |
256 | struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)data; |
257 | |
258 | enc_pr(LOG_INFO, "hevcenc_isr_tasklet interruput:0x%08lx\n", |
259 | dev->interrupt_reason); |
260 | if (dev->interrupt_reason) { |
261 | /* notify the interrupt to user space */ |
262 | if (dev->async_queue) { |
263 | enc_pr(LOG_ALL, "kill_fasync e %s\n", __func__); |
264 | kill_fasync(&dev->async_queue, SIGIO, POLL_IN); |
265 | } |
266 | s_interrupt_flag = 1; |
267 | wake_up_interruptible(&s_interrupt_wait_q); |
268 | } |
269 | enc_pr(LOG_ALL, "[-]%s\n", __func__); |
270 | } |
271 | |
272 | static irqreturn_t vpu_irq_handler(s32 irq, void *dev_id) |
273 | { |
274 | struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)dev_id; |
275 | /* this can be removed. |
276 | * it also work in VPU_WaitInterrupt of API function |
277 | */ |
278 | u32 core; |
279 | ulong interrupt_reason = 0; |
280 | |
281 | enc_pr(LOG_ALL, "[+]%s\n", __func__); |
282 | |
283 | for (core = 0; core < MAX_NUM_VPU_CORE; core++) { |
284 | if (s_bit_firmware_info[core].size == 0) { |
285 | /* it means that we didn't get an information |
286 | * the current core from API layer. |
287 | * No core activated. |
288 | */ |
289 | enc_pr(LOG_ERROR, |
290 | "s_bit_firmware_info[core].size is zero\n"); |
291 | continue; |
292 | } |
293 | if (ReadVpuRegister(W4_VPU_VPU_INT_STS)) { |
294 | interrupt_reason = ReadVpuRegister(W4_VPU_INT_REASON); |
295 | WriteVpuRegister(W4_VPU_INT_REASON_CLEAR, |
296 | interrupt_reason); |
297 | WriteVpuRegister(W4_VPU_VINT_CLEAR, 0x1); |
298 | dev->interrupt_reason |= interrupt_reason; |
299 | } |
300 | enc_pr(LOG_INFO, |
301 | "intr_reason: 0x%08lx\n", dev->interrupt_reason); |
302 | } |
303 | if (dev->interrupt_reason) |
304 | tasklet_schedule(&hevc_tasklet); |
305 | enc_pr(LOG_ALL, "[-]%s\n", __func__); |
306 | return IRQ_HANDLED; |
307 | } |
308 | |
309 | static s32 vpu_open(struct inode *inode, struct file *filp) |
310 | { |
311 | bool alloc_buffer = false; |
312 | s32 r = 0; |
313 | |
314 | enc_pr(LOG_DEBUG, "[+] %s, open_count=%d\n", __func__, |
315 | s_vpu_drv_context.open_count); |
316 | enc_pr(LOG_DEBUG, "vpu_open, calling process: %d:%s\n", current->pid, current->comm); |
317 | spin_lock(&s_vpu_lock); |
318 | s_vpu_drv_context.open_count++; |
319 | if (s_vpu_drv_context.open_count == 1) { |
320 | alloc_buffer = true; |
321 | } else { |
322 | r = -EBUSY; |
323 | enc_pr(LOG_DEBUG, "vpu_open, device is busy, s_vpu_drv_context.open_count=%d\n", |
324 | s_vpu_drv_context.open_count); |
325 | s_vpu_drv_context.open_count--; |
326 | spin_unlock(&s_vpu_lock); |
327 | goto Err; |
328 | } |
329 | filp->private_data = (void *)(&s_vpu_drv_context); |
330 | spin_unlock(&s_vpu_lock); |
331 | if (alloc_buffer && !use_reserve) { |
332 | #ifdef CONFIG_CMA |
333 | s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE; |
334 | s_video_memory.phys_addr = |
335 | (ulong)codec_mm_alloc_for_dma(VPU_DEV_NAME, |
336 | VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE >> PAGE_SHIFT, 0, 0); |
337 | if (s_video_memory.phys_addr) { |
338 | enc_pr(LOG_DEBUG, |
339 | "allocating phys 0x%lx, virt addr 0x%lx, size %dk\n", |
340 | s_video_memory.phys_addr, |
341 | s_video_memory.base, |
342 | s_video_memory.size >> 10); |
343 | if (vmem_init(&s_vmem, |
344 | s_video_memory.phys_addr, |
345 | s_video_memory.size) < 0) { |
346 | enc_pr(LOG_ERROR, "fail to init vmem system\n"); |
347 | r = -ENOMEM; |
348 | codec_mm_free_for_dma( |
349 | VPU_DEV_NAME, |
350 | (u32)s_video_memory.phys_addr); |
351 | vmem_exit(&s_vmem); |
352 | memset(&s_video_memory, 0, |
353 | sizeof(struct vpudrv_buffer_t)); |
354 | memset(&s_vmem, 0, |
355 | sizeof(struct video_mm_t)); |
356 | } |
357 | } else { |
358 | enc_pr(LOG_ERROR, |
359 | "CMA failed to allocate dma buffer for %s, phys: 0x%lx\n", |
360 | VPU_DEV_NAME, s_video_memory.phys_addr); |
361 | if (s_video_memory.phys_addr) |
362 | codec_mm_free_for_dma( |
363 | VPU_DEV_NAME, |
364 | (u32)s_video_memory.phys_addr); |
365 | s_video_memory.phys_addr = 0; |
366 | r = -ENOMEM; |
367 | } |
368 | #else |
369 | enc_pr(LOG_ERROR, |
370 | "No CMA and reserved memory for HevcEnc!!!\n"); |
371 | r = -ENOMEM; |
372 | #endif |
373 | } else if (!s_video_memory.phys_addr) { |
374 | enc_pr(LOG_ERROR, |
375 | "HevcEnc memory is not malloced!!!\n"); |
376 | r = -ENOMEM; |
377 | } |
378 | if (alloc_buffer) { |
379 | ulong flags; |
380 | u32 data32; |
381 | |
382 | if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == false)) { |
383 | s32 err; |
384 | |
385 | err = request_irq(s_vpu_irq, vpu_irq_handler, 0, |
386 | "HevcEnc-irq", (void *)(&s_vpu_drv_context)); |
387 | if (err) { |
388 | enc_pr(LOG_ERROR, |
389 | "fail to register interrupt handler\n"); |
390 | s_vpu_drv_context.open_count--; |
391 | return -EFAULT; |
392 | } |
393 | s_vpu_irq_requested = true; |
394 | } |
395 | amports_switch_gate("vdec", 1); |
396 | spin_lock_irqsave(&s_vpu_lock, flags); |
397 | WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, |
398 | READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & |
399 | (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 |
400 | ? ~0x8 : ~(0x3<<24))); |
401 | udelay(10); |
402 | |
403 | if (get_cpu_type() <= MESON_CPU_MAJOR_ID_TXLX) { |
404 | data32 = 0x700; |
405 | data32 |= READ_VREG(DOS_SW_RESET4); |
406 | WRITE_VREG(DOS_SW_RESET4, data32); |
407 | data32 &= ~0x700; |
408 | WRITE_VREG(DOS_SW_RESET4, data32); |
409 | } else { |
410 | data32 = 0xf00; |
411 | data32 |= READ_VREG(DOS_SW_RESET4); |
412 | WRITE_VREG(DOS_SW_RESET4, data32); |
413 | data32 &= ~0xf00; |
414 | WRITE_VREG(DOS_SW_RESET4, data32); |
415 | } |
416 | |
417 | WRITE_MPEG_REG(RESET0_REGISTER, data32 & ~(1<<21)); |
418 | WRITE_MPEG_REG(RESET0_REGISTER, data32 | (1<<21)); |
419 | READ_MPEG_REG(RESET0_REGISTER); |
420 | READ_MPEG_REG(RESET0_REGISTER); |
421 | READ_MPEG_REG(RESET0_REGISTER); |
422 | READ_MPEG_REG(RESET0_REGISTER); |
423 | #ifndef VPU_SUPPORT_CLOCK_CONTROL |
424 | vpu_clk_config(1); |
425 | #endif |
426 | /* Enable wave420l_vpu_idle_rise_irq, |
427 | * Disable wave420l_vpu_idle_fall_irq |
428 | */ |
429 | WRITE_VREG(DOS_WAVE420L_CNTL_STAT, 0x1); |
430 | WRITE_VREG(DOS_MEM_PD_WAVE420L, 0x0); |
431 | |
432 | WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, |
433 | READ_AOREG(AO_RTI_GEN_PWR_ISO0) & |
434 | (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 |
435 | ? ~0x8 : ~(0x3<<12))); |
436 | udelay(10); |
437 | |
438 | spin_unlock_irqrestore(&s_vpu_lock, flags); |
439 | } |
440 | memset(dma_cfg, 0, sizeof(dma_cfg)); |
441 | dma_cfg[0].fd = -1; |
442 | dma_cfg[1].fd = -1; |
443 | dma_cfg[2].fd = -1; |
444 | Err: |
445 | if (r != 0) { |
446 | enc_pr(LOG_DEBUG, "vpu_open, error handling, r=%d, s_vpu_drv_context.open_count\n", |
447 | r, s_vpu_drv_context.open_count); |
448 | s_vpu_drv_context.open_count--; |
449 | } |
450 | enc_pr(LOG_DEBUG, "[-] %s, ret: %d\n", __func__, r); |
451 | return r; |
452 | } |
453 | |
454 | static long vpu_ioctl(struct file *filp, u32 cmd, ulong arg) |
455 | { |
456 | s32 ret = 0; |
457 | struct vpu_drv_context_t *dev = |
458 | (struct vpu_drv_context_t *)filp->private_data; |
459 | |
460 | switch (cmd) { |
461 | case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY: |
462 | { |
463 | struct vpudrv_buffer_pool_t *vbp; |
464 | |
465 | enc_pr(LOG_ALL, |
466 | "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); |
467 | ret = down_interruptible(&s_vpu_sem); |
468 | if (ret == 0) { |
469 | vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); |
470 | if (!vbp) { |
471 | up(&s_vpu_sem); |
472 | return -ENOMEM; |
473 | } |
474 | |
475 | ret = copy_from_user(&(vbp->vb), |
476 | (struct vpudrv_buffer_t *)arg, |
477 | sizeof(struct vpudrv_buffer_t)); |
478 | if (ret) { |
479 | kfree(vbp); |
480 | up(&s_vpu_sem); |
481 | return -EFAULT; |
482 | } |
483 | |
484 | ret = vpu_alloc_dma_buffer(&(vbp->vb)); |
485 | if (ret == -1) { |
486 | ret = -ENOMEM; |
487 | kfree(vbp); |
488 | up(&s_vpu_sem); |
489 | break; |
490 | } |
491 | ret = copy_to_user((void __user *)arg, |
492 | &(vbp->vb), |
493 | sizeof(struct vpudrv_buffer_t)); |
494 | if (ret) { |
495 | kfree(vbp); |
496 | ret = -EFAULT; |
497 | up(&s_vpu_sem); |
498 | break; |
499 | } |
500 | |
501 | vbp->filp = filp; |
502 | spin_lock(&s_vpu_lock); |
503 | list_add(&vbp->list, &s_vbp_head); |
504 | spin_unlock(&s_vpu_lock); |
505 | |
506 | up(&s_vpu_sem); |
507 | } |
508 | enc_pr(LOG_ALL, |
509 | "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n"); |
510 | } |
511 | break; |
512 | #ifdef CONFIG_COMPAT |
513 | case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32: |
514 | { |
515 | struct vpudrv_buffer_pool_t *vbp; |
516 | struct compat_vpudrv_buffer_t buf32; |
517 | |
518 | enc_pr(LOG_ALL, |
519 | "[+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); |
520 | ret = down_interruptible(&s_vpu_sem); |
521 | if (ret == 0) { |
522 | vbp = kzalloc(sizeof(*vbp), GFP_KERNEL); |
523 | if (!vbp) { |
524 | up(&s_vpu_sem); |
525 | return -ENOMEM; |
526 | } |
527 | |
528 | ret = copy_from_user(&buf32, |
529 | (struct compat_vpudrv_buffer_t *)arg, |
530 | sizeof(struct compat_vpudrv_buffer_t)); |
531 | if (ret) { |
532 | kfree(vbp); |
533 | up(&s_vpu_sem); |
534 | return -EFAULT; |
535 | } |
536 | |
537 | vbp->vb.size = buf32.size; |
538 | vbp->vb.cached = buf32.cached; |
539 | vbp->vb.phys_addr = |
540 | (ulong)buf32.phys_addr; |
541 | vbp->vb.virt_addr = |
542 | (ulong)buf32.virt_addr; |
543 | ret = vpu_alloc_dma_buffer(&(vbp->vb)); |
544 | if (ret == -1) { |
545 | ret = -ENOMEM; |
546 | kfree(vbp); |
547 | up(&s_vpu_sem); |
548 | break; |
549 | } |
550 | |
551 | buf32.size = vbp->vb.size; |
552 | buf32.phys_addr = |
553 | (compat_ulong_t)vbp->vb.phys_addr; |
554 | buf32.virt_addr = |
555 | (compat_ulong_t)vbp->vb.virt_addr; |
556 | |
557 | ret = copy_to_user((void __user *)arg, |
558 | &buf32, |
559 | sizeof(struct compat_vpudrv_buffer_t)); |
560 | if (ret) { |
561 | kfree(vbp); |
562 | ret = -EFAULT; |
563 | up(&s_vpu_sem); |
564 | break; |
565 | } |
566 | |
567 | vbp->filp = filp; |
568 | spin_lock(&s_vpu_lock); |
569 | list_add(&vbp->list, &s_vbp_head); |
570 | spin_unlock(&s_vpu_lock); |
571 | |
572 | up(&s_vpu_sem); |
573 | } |
574 | enc_pr(LOG_ALL, |
575 | "[-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY32\n"); |
576 | } |
577 | break; |
578 | #endif |
579 | case VDI_IOCTL_FREE_PHYSICALMEMORY: |
580 | { |
581 | struct vpudrv_buffer_pool_t *vbp, *n; |
582 | struct vpudrv_buffer_t vb; |
583 | |
584 | enc_pr(LOG_ALL, |
585 | "[+]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); |
586 | ret = down_interruptible(&s_vpu_sem); |
587 | if (ret == 0) { |
588 | ret = copy_from_user(&vb, |
589 | (struct vpudrv_buffer_t *)arg, |
590 | sizeof(struct vpudrv_buffer_t)); |
591 | if (ret) { |
592 | up(&s_vpu_sem); |
593 | return -EACCES; |
594 | } |
595 | |
596 | if (vb.phys_addr) |
597 | vpu_free_dma_buffer(&vb); |
598 | |
599 | spin_lock(&s_vpu_lock); |
600 | list_for_each_entry_safe(vbp, n, |
601 | &s_vbp_head, list) { |
602 | if (vbp->vb.phys_addr == vb.phys_addr) { |
603 | list_del(&vbp->list); |
604 | kfree(vbp); |
605 | break; |
606 | } |
607 | } |
608 | spin_unlock(&s_vpu_lock); |
609 | |
610 | up(&s_vpu_sem); |
611 | } |
612 | enc_pr(LOG_ALL, |
613 | "[-]VDI_IOCTL_FREE_PHYSICALMEMORY\n"); |
614 | } |
615 | break; |
616 | #ifdef CONFIG_COMPAT |
617 | case VDI_IOCTL_FREE_PHYSICALMEMORY32: |
618 | { |
619 | struct vpudrv_buffer_pool_t *vbp, *n; |
620 | struct compat_vpudrv_buffer_t buf32; |
621 | struct vpudrv_buffer_t vb; |
622 | |
623 | enc_pr(LOG_ALL, |
624 | "[+]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); |
625 | ret = down_interruptible(&s_vpu_sem); |
626 | if (ret == 0) { |
627 | ret = copy_from_user(&buf32, |
628 | (struct compat_vpudrv_buffer_t *)arg, |
629 | sizeof(struct compat_vpudrv_buffer_t)); |
630 | if (ret) { |
631 | up(&s_vpu_sem); |
632 | return -EACCES; |
633 | } |
634 | |
635 | vb.size = buf32.size; |
636 | vb.phys_addr = |
637 | (ulong)buf32.phys_addr; |
638 | vb.virt_addr = |
639 | (ulong)buf32.virt_addr; |
640 | |
641 | if (vb.phys_addr) |
642 | vpu_free_dma_buffer(&vb); |
643 | |
644 | spin_lock(&s_vpu_lock); |
645 | list_for_each_entry_safe(vbp, n, |
646 | &s_vbp_head, list) { |
647 | if ((compat_ulong_t)vbp->vb.base |
648 | == buf32.base) { |
649 | list_del(&vbp->list); |
650 | kfree(vbp); |
651 | break; |
652 | } |
653 | } |
654 | spin_unlock(&s_vpu_lock); |
655 | up(&s_vpu_sem); |
656 | } |
657 | enc_pr(LOG_ALL, |
658 | "[-]VDI_IOCTL_FREE_PHYSICALMEMORY32\n"); |
659 | } |
660 | break; |
661 | #endif |
662 | case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO: |
663 | { |
664 | enc_pr(LOG_ALL, |
665 | "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n"); |
666 | if (s_video_memory.phys_addr != 0) { |
667 | ret = copy_to_user((void __user *)arg, |
668 | &s_video_memory, |
669 | sizeof(struct vpudrv_buffer_t)); |
670 | if (ret != 0) |
671 | ret = -EFAULT; |
672 | } else { |
673 | ret = -EFAULT; |
674 | } |
675 | enc_pr(LOG_ALL, |
676 | "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n"); |
677 | } |
678 | break; |
679 | #ifdef CONFIG_COMPAT |
680 | case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32: |
681 | { |
682 | struct compat_vpudrv_buffer_t buf32; |
683 | |
684 | enc_pr(LOG_ALL, |
685 | "[+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n"); |
686 | |
687 | buf32.size = s_video_memory.size; |
688 | buf32.phys_addr = |
689 | (compat_ulong_t)s_video_memory.phys_addr; |
690 | buf32.virt_addr = |
691 | (compat_ulong_t)s_video_memory.virt_addr; |
692 | if (s_video_memory.phys_addr != 0) { |
693 | ret = copy_to_user((void __user *)arg, |
694 | &buf32, |
695 | sizeof(struct compat_vpudrv_buffer_t)); |
696 | if (ret != 0) |
697 | ret = -EFAULT; |
698 | } else { |
699 | ret = -EFAULT; |
700 | } |
701 | enc_pr(LOG_ALL, |
702 | "[-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO32\n"); |
703 | } |
704 | break; |
705 | #endif |
706 | case VDI_IOCTL_WAIT_INTERRUPT: |
707 | { |
708 | struct vpudrv_intr_info_t info; |
709 | |
710 | enc_pr(LOG_ALL, |
711 | "[+]VDI_IOCTL_WAIT_INTERRUPT\n"); |
712 | ret = copy_from_user(&info, |
713 | (struct vpudrv_intr_info_t *)arg, |
714 | sizeof(struct vpudrv_intr_info_t)); |
715 | if (ret != 0) |
716 | return -EFAULT; |
717 | |
718 | ret = wait_event_interruptible_timeout( |
719 | s_interrupt_wait_q, |
720 | s_interrupt_flag != 0, |
721 | msecs_to_jiffies(info.timeout)); |
722 | if (!ret) { |
723 | ret = -ETIME; |
724 | break; |
725 | } |
726 | enc_pr(LOG_INFO, |
727 | "s_interrupt_flag(%d), reason(0x%08lx)\n", |
728 | s_interrupt_flag, dev->interrupt_reason); |
729 | if (dev->interrupt_reason & (1 << W4_INT_ENC_PIC)) { |
730 | u32 start, end, size, core = 0; |
731 | |
732 | start = ReadVpuRegister(W4_BS_RD_PTR); |
733 | end = ReadVpuRegister(W4_BS_WR_PTR); |
734 | size = ReadVpuRegister(W4_RET_ENC_PIC_BYTE); |
735 | enc_pr(LOG_INFO, "flush output buffer, "); |
736 | enc_pr(LOG_INFO, |
737 | "start:0x%x, end:0x%x, size:0x%x\n", |
738 | start, end, size); |
739 | if (end - start > size && end > start) |
740 | size = end - start; |
741 | if (size > 0) |
742 | cache_flush(start, size); |
743 | } |
744 | |
745 | if (signal_pending(current)) { |
746 | ret = -ERESTARTSYS; |
747 | break; |
748 | } |
749 | |
750 | enc_pr(LOG_INFO, |
751 | "s_interrupt_flag(%d), reason(0x%08lx)\n", |
752 | s_interrupt_flag, dev->interrupt_reason); |
753 | |
754 | info.intr_reason = dev->interrupt_reason; |
755 | s_interrupt_flag = 0; |
756 | dev->interrupt_reason = 0; |
757 | ret = copy_to_user((void __user *)arg, |
758 | &info, sizeof(struct vpudrv_intr_info_t)); |
759 | enc_pr(LOG_ALL, |
760 | "[-]VDI_IOCTL_WAIT_INTERRUPT\n"); |
761 | if (ret != 0) |
762 | return -EFAULT; |
763 | } |
764 | break; |
765 | case VDI_IOCTL_SET_CLOCK_GATE: |
766 | { |
767 | u32 clkgate; |
768 | |
769 | enc_pr(LOG_ALL, |
770 | "[+]VDI_IOCTL_SET_CLOCK_GATE\n"); |
771 | if (get_user(clkgate, (u32 __user *) arg)) |
772 | return -EFAULT; |
773 | #ifdef VPU_SUPPORT_CLOCK_CONTROL |
774 | vpu_clk_config(clkgate); |
775 | #endif |
776 | enc_pr(LOG_ALL, |
777 | "[-]VDI_IOCTL_SET_CLOCK_GATE\n"); |
778 | } |
779 | break; |
780 | case VDI_IOCTL_GET_INSTANCE_POOL: |
781 | { |
782 | enc_pr(LOG_ALL, |
783 | "[+]VDI_IOCTL_GET_INSTANCE_POOL\n"); |
784 | ret = down_interruptible(&s_vpu_sem); |
785 | if (ret != 0) |
786 | break; |
787 | |
788 | if (s_instance_pool.base != 0) { |
789 | ret = copy_to_user((void __user *)arg, |
790 | &s_instance_pool, |
791 | sizeof(struct vpudrv_buffer_t)); |
792 | ret = (ret != 0) ? -EFAULT : 0; |
793 | } else { |
794 | ret = copy_from_user(&s_instance_pool, |
795 | (struct vpudrv_buffer_t *)arg, |
796 | sizeof(struct vpudrv_buffer_t)); |
797 | if (ret == 0) { |
798 | s_instance_pool.size = |
799 | PAGE_ALIGN( |
800 | s_instance_pool.size); |
801 | s_instance_pool.base = |
802 | (ulong)vmalloc( |
803 | s_instance_pool.size); |
804 | s_instance_pool.phys_addr = |
805 | s_instance_pool.base; |
806 | if (s_instance_pool.base == 0) { |
807 | ret = -EFAULT; |
808 | up(&s_vpu_sem); |
809 | break; |
810 | } |
811 | /*clearing memory*/ |
812 | memset((void *)s_instance_pool.base, |
813 | 0, s_instance_pool.size); |
814 | ret = copy_to_user((void __user *)arg, |
815 | &s_instance_pool, |
816 | sizeof(struct vpudrv_buffer_t)); |
817 | if (ret != 0) |
818 | ret = -EFAULT; |
819 | } else |
820 | ret = -EFAULT; |
821 | } |
822 | up(&s_vpu_sem); |
823 | enc_pr(LOG_ALL, |
824 | "[-]VDI_IOCTL_GET_INSTANCE_POOL\n"); |
825 | } |
826 | break; |
827 | #ifdef CONFIG_COMPAT |
828 | case VDI_IOCTL_GET_INSTANCE_POOL32: |
829 | { |
830 | struct compat_vpudrv_buffer_t buf32; |
831 | |
832 | enc_pr(LOG_ALL, |
833 | "[+]VDI_IOCTL_GET_INSTANCE_POOL32\n"); |
834 | ret = down_interruptible(&s_vpu_sem); |
835 | if (ret != 0) |
836 | break; |
837 | if (s_instance_pool.base != 0) { |
838 | buf32.size = s_instance_pool.size; |
839 | buf32.phys_addr = |
840 | (compat_ulong_t) |
841 | s_instance_pool.phys_addr; |
842 | buf32.virt_addr = |
843 | (compat_ulong_t) |
844 | s_instance_pool.virt_addr; |
845 | ret = copy_to_user((void __user *)arg, |
846 | &buf32, |
847 | sizeof(struct compat_vpudrv_buffer_t)); |
848 | ret = (ret != 0) ? -EFAULT : 0; |
849 | } else { |
850 | ret = copy_from_user(&buf32, |
851 | (struct compat_vpudrv_buffer_t *)arg, |
852 | sizeof(struct compat_vpudrv_buffer_t)); |
853 | if (ret == 0) { |
854 | s_instance_pool.size = buf32.size; |
855 | s_instance_pool.size = |
856 | PAGE_ALIGN( |
857 | s_instance_pool.size); |
858 | s_instance_pool.base = |
859 | (ulong)vmalloc( |
860 | s_instance_pool.size); |
861 | s_instance_pool.phys_addr = |
862 | s_instance_pool.base; |
863 | buf32.size = |
864 | s_instance_pool.size; |
865 | buf32.phys_addr = |
866 | (compat_ulong_t) |
867 | s_instance_pool.phys_addr; |
868 | buf32.base = |
869 | (compat_ulong_t) |
870 | s_instance_pool.base; |
871 | buf32.virt_addr = |
872 | (compat_ulong_t) |
873 | s_instance_pool.virt_addr; |
874 | if (s_instance_pool.base == 0) { |
875 | ret = -EFAULT; |
876 | up(&s_vpu_sem); |
877 | break; |
878 | } |
879 | /*clearing memory*/ |
880 | memset((void *)s_instance_pool.base, |
881 | 0x0, s_instance_pool.size); |
882 | ret = copy_to_user((void __user *)arg, |
883 | &buf32, |
884 | sizeof( |
885 | struct compat_vpudrv_buffer_t)); |
886 | if (ret != 0) |
887 | ret = -EFAULT; |
888 | } else |
889 | ret = -EFAULT; |
890 | } |
891 | up(&s_vpu_sem); |
892 | enc_pr(LOG_ALL, |
893 | "[-]VDI_IOCTL_GET_INSTANCE_POOL32\n"); |
894 | } |
895 | break; |
896 | #endif |
897 | case VDI_IOCTL_GET_COMMON_MEMORY: |
898 | { |
899 | enc_pr(LOG_ALL, |
900 | "[+]VDI_IOCTL_GET_COMMON_MEMORY\n"); |
901 | if (s_common_memory.phys_addr != 0) { |
902 | ret = copy_to_user((void __user *)arg, |
903 | &s_common_memory, |
904 | sizeof(struct vpudrv_buffer_t)); |
905 | if (ret != 0) |
906 | ret = -EFAULT; |
907 | } else { |
908 | ret = copy_from_user(&s_common_memory, |
909 | (struct vpudrv_buffer_t *)arg, |
910 | sizeof(struct vpudrv_buffer_t)); |
911 | if (ret != 0) { |
912 | ret = -EFAULT; |
913 | break; |
914 | } |
915 | if (vpu_alloc_dma_buffer( |
916 | &s_common_memory) != -1) { |
917 | ret = copy_to_user((void __user *)arg, |
918 | &s_common_memory, |
919 | sizeof(struct vpudrv_buffer_t)); |
920 | if (ret != 0) |
921 | ret = -EFAULT; |
922 | } else |
923 | ret = -EFAULT; |
924 | } |
925 | enc_pr(LOG_ALL, |
926 | "[-]VDI_IOCTL_GET_COMMON_MEMORY\n"); |
927 | } |
928 | break; |
929 | #ifdef CONFIG_COMPAT |
930 | case VDI_IOCTL_GET_COMMON_MEMORY32: |
931 | { |
932 | struct compat_vpudrv_buffer_t buf32; |
933 | |
934 | enc_pr(LOG_ALL, |
935 | "[+]VDI_IOCTL_GET_COMMON_MEMORY32\n"); |
936 | |
937 | buf32.size = s_common_memory.size; |
938 | buf32.phys_addr = |
939 | (compat_ulong_t) |
940 | s_common_memory.phys_addr; |
941 | buf32.virt_addr = |
942 | (compat_ulong_t) |
943 | s_common_memory.virt_addr; |
944 | if (s_common_memory.phys_addr != 0) { |
945 | ret = copy_to_user((void __user *)arg, |
946 | &buf32, |
947 | sizeof(struct compat_vpudrv_buffer_t)); |
948 | if (ret != 0) |
949 | ret = -EFAULT; |
950 | } else { |
951 | ret = copy_from_user(&buf32, |
952 | (struct compat_vpudrv_buffer_t *)arg, |
953 | sizeof(struct compat_vpudrv_buffer_t)); |
954 | if (ret != 0) { |
955 | ret = -EFAULT; |
956 | break; |
957 | } |
958 | s_common_memory.size = buf32.size; |
959 | if (vpu_alloc_dma_buffer( |
960 | &s_common_memory) != -1) { |
961 | buf32.size = |
962 | s_common_memory.size; |
963 | buf32.phys_addr = |
964 | (compat_ulong_t) |
965 | s_common_memory.phys_addr; |
966 | buf32.virt_addr = |
967 | (compat_ulong_t) |
968 | s_common_memory.virt_addr; |
969 | ret = copy_to_user((void __user *)arg, |
970 | &buf32, |
971 | sizeof( |
972 | struct compat_vpudrv_buffer_t)); |
973 | if (ret != 0) |
974 | ret = -EFAULT; |
975 | } else |
976 | ret = -EFAULT; |
977 | } |
978 | enc_pr(LOG_ALL, |
979 | "[-]VDI_IOCTL_GET_COMMON_MEMORY32\n"); |
980 | } |
981 | break; |
982 | #endif |
983 | case VDI_IOCTL_OPEN_INSTANCE: |
984 | { |
985 | struct vpudrv_inst_info_t inst_info; |
986 | struct vpudrv_instanace_list_t *vil, *n; |
987 | |
988 | vil = kzalloc(sizeof(*vil), GFP_KERNEL); |
989 | if (!vil) |
990 | return -ENOMEM; |
991 | |
992 | if (copy_from_user(&inst_info, |
993 | (struct vpudrv_inst_info_t *)arg, |
994 | sizeof(struct vpudrv_inst_info_t))) |
995 | return -EFAULT; |
996 | |
997 | vil->inst_idx = inst_info.inst_idx; |
998 | vil->core_idx = inst_info.core_idx; |
999 | vil->filp = filp; |
1000 | |
1001 | spin_lock(&s_vpu_lock); |
1002 | list_add(&vil->list, &s_inst_list_head); |
1003 | |
1004 | /* counting the current open instance number */ |
1005 | inst_info.inst_open_count = 0; |
1006 | list_for_each_entry_safe(vil, n, |
1007 | &s_inst_list_head, list) { |
1008 | if (vil->core_idx == inst_info.core_idx) |
1009 | inst_info.inst_open_count++; |
1010 | } |
1011 | |
1012 | /* flag just for that vpu is in opened or closed */ |
1013 | s_vpu_open_ref_count++; |
1014 | spin_unlock(&s_vpu_lock); |
1015 | |
1016 | if (copy_to_user((void __user *)arg, |
1017 | &inst_info, |
1018 | sizeof(struct vpudrv_inst_info_t))) { |
1019 | kfree(vil); |
1020 | return -EFAULT; |
1021 | } |
1022 | |
1023 | enc_pr(LOG_DEBUG, |
1024 | "VDI_IOCTL_OPEN_INSTANCE "); |
1025 | enc_pr(LOG_DEBUG, |
1026 | "core_idx=%d, inst_idx=%d, ", |
1027 | (u32)inst_info.core_idx, |
1028 | (u32)inst_info.inst_idx); |
1029 | enc_pr(LOG_DEBUG, |
1030 | "s_vpu_open_ref_count=%d, inst_open_count=%d\n", |
1031 | s_vpu_open_ref_count, |
1032 | inst_info.inst_open_count); |
1033 | } |
1034 | break; |
1035 | case VDI_IOCTL_CLOSE_INSTANCE: |
1036 | { |
1037 | struct vpudrv_inst_info_t inst_info; |
1038 | struct vpudrv_instanace_list_t *vil, *n; |
1039 | |
1040 | enc_pr(LOG_ALL, |
1041 | "[+]VDI_IOCTL_CLOSE_INSTANCE\n"); |
1042 | if (copy_from_user(&inst_info, |
1043 | (struct vpudrv_inst_info_t *)arg, |
1044 | sizeof(struct vpudrv_inst_info_t))) |
1045 | return -EFAULT; |
1046 | |
1047 | spin_lock(&s_vpu_lock); |
1048 | list_for_each_entry_safe(vil, n, |
1049 | &s_inst_list_head, list) { |
1050 | if (vil->inst_idx == inst_info.inst_idx && |
1051 | vil->core_idx == inst_info.core_idx) { |
1052 | list_del(&vil->list); |
1053 | kfree(vil); |
1054 | break; |
1055 | } |
1056 | } |
1057 | |
1058 | /* counting the current open instance number */ |
1059 | inst_info.inst_open_count = 0; |
1060 | list_for_each_entry_safe(vil, n, |
1061 | &s_inst_list_head, list) { |
1062 | if (vil->core_idx == inst_info.core_idx) |
1063 | inst_info.inst_open_count++; |
1064 | } |
1065 | |
1066 | /* flag just for that vpu is in opened or closed */ |
1067 | s_vpu_open_ref_count--; |
1068 | spin_unlock(&s_vpu_lock); |
1069 | |
1070 | if (copy_to_user((void __user *)arg, |
1071 | &inst_info, |
1072 | sizeof(struct vpudrv_inst_info_t))) |
1073 | return -EFAULT; |
1074 | |
1075 | enc_pr(LOG_DEBUG, |
1076 | "VDI_IOCTL_CLOSE_INSTANCE "); |
1077 | enc_pr(LOG_DEBUG, |
1078 | "core_idx=%d, inst_idx=%d, ", |
1079 | (u32)inst_info.core_idx, |
1080 | (u32)inst_info.inst_idx); |
1081 | enc_pr(LOG_DEBUG, |
1082 | "s_vpu_open_ref_count=%d, inst_open_count=%d\n", |
1083 | s_vpu_open_ref_count, |
1084 | inst_info.inst_open_count); |
1085 | } |
1086 | break; |
1087 | case VDI_IOCTL_GET_INSTANCE_NUM: |
1088 | { |
1089 | struct vpudrv_inst_info_t inst_info; |
1090 | struct vpudrv_instanace_list_t *vil, *n; |
1091 | |
1092 | enc_pr(LOG_ALL, |
1093 | "[+]VDI_IOCTL_GET_INSTANCE_NUM\n"); |
1094 | |
1095 | ret = copy_from_user(&inst_info, |
1096 | (struct vpudrv_inst_info_t *)arg, |
1097 | sizeof(struct vpudrv_inst_info_t)); |
1098 | if (ret != 0) |
1099 | break; |
1100 | |
1101 | inst_info.inst_open_count = 0; |
1102 | |
1103 | spin_lock(&s_vpu_lock); |
1104 | list_for_each_entry_safe(vil, n, |
1105 | &s_inst_list_head, list) { |
1106 | if (vil->core_idx == inst_info.core_idx) |
1107 | inst_info.inst_open_count++; |
1108 | } |
1109 | spin_unlock(&s_vpu_lock); |
1110 | |
1111 | ret = copy_to_user((void __user *)arg, |
1112 | &inst_info, |
1113 | sizeof(struct vpudrv_inst_info_t)); |
1114 | |
1115 | enc_pr(LOG_DEBUG, |
1116 | "VDI_IOCTL_GET_INSTANCE_NUM "); |
1117 | enc_pr(LOG_DEBUG, |
1118 | "core_idx=%d, inst_idx=%d, open_count=%d\n", |
1119 | (u32)inst_info.core_idx, |
1120 | (u32)inst_info.inst_idx, |
1121 | inst_info.inst_open_count); |
1122 | } |
1123 | break; |
1124 | case VDI_IOCTL_RESET: |
1125 | { |
1126 | vpu_hw_reset(); |
1127 | } |
1128 | break; |
1129 | case VDI_IOCTL_GET_REGISTER_INFO: |
1130 | { |
1131 | enc_pr(LOG_ALL, |
1132 | "[+]VDI_IOCTL_GET_REGISTER_INFO\n"); |
1133 | ret = copy_to_user((void __user *)arg, |
1134 | &s_vpu_register, |
1135 | sizeof(struct vpudrv_buffer_t)); |
1136 | if (ret != 0) |
1137 | ret = -EFAULT; |
1138 | enc_pr(LOG_ALL, |
1139 | "[-]VDI_IOCTL_GET_REGISTER_INFO "); |
1140 | enc_pr(LOG_ALL, |
1141 | "s_vpu_register.phys_addr=0x%lx, ", |
1142 | s_vpu_register.phys_addr); |
1143 | enc_pr(LOG_ALL, |
1144 | "s_vpu_register.virt_addr=0x%lx, ", |
1145 | s_vpu_register.virt_addr); |
1146 | enc_pr(LOG_ALL, |
1147 | "s_vpu_register.size=0x%x\n", |
1148 | s_vpu_register.size); |
1149 | } |
1150 | break; |
1151 | #ifdef CONFIG_COMPAT |
1152 | case VDI_IOCTL_GET_REGISTER_INFO32: |
1153 | { |
1154 | struct compat_vpudrv_buffer_t buf32; |
1155 | |
1156 | enc_pr(LOG_ALL, |
1157 | "[+]VDI_IOCTL_GET_REGISTER_INFO32\n"); |
1158 | |
1159 | buf32.size = s_vpu_register.size; |
1160 | buf32.phys_addr = |
1161 | (compat_ulong_t) |
1162 | s_vpu_register.phys_addr; |
1163 | buf32.virt_addr = |
1164 | (compat_ulong_t) |
1165 | s_vpu_register.virt_addr; |
1166 | ret = copy_to_user((void __user *)arg, |
1167 | &buf32, |
1168 | sizeof( |
1169 | struct compat_vpudrv_buffer_t)); |
1170 | if (ret != 0) |
1171 | ret = -EFAULT; |
1172 | enc_pr(LOG_ALL, |
1173 | "[-]VDI_IOCTL_GET_REGISTER_INFO32 "); |
1174 | enc_pr(LOG_ALL, |
1175 | "s_vpu_register.phys_addr=0x%lx, ", |
1176 | s_vpu_register.phys_addr); |
1177 | enc_pr(LOG_ALL, |
1178 | "s_vpu_register.virt_addr=0x%lx, ", |
1179 | s_vpu_register.virt_addr); |
1180 | enc_pr(LOG_ALL, |
1181 | "s_vpu_register.size=0x%x\n", |
1182 | s_vpu_register.size); |
1183 | } |
1184 | break; |
1185 | case VDI_IOCTL_FLUSH_BUFFER32: |
1186 | { |
1187 | struct vpudrv_buffer_pool_t *pool, *n; |
1188 | struct compat_vpudrv_buffer_t buf32; |
1189 | struct vpudrv_buffer_t vb; |
1190 | bool find = false; |
1191 | u32 cached = 0; |
1192 | |
1193 | enc_pr(LOG_ALL, |
1194 | "[+]VDI_IOCTL_FLUSH_BUFFER32\n"); |
1195 | |
1196 | ret = copy_from_user(&buf32, |
1197 | (struct compat_vpudrv_buffer_t *)arg, |
1198 | sizeof(struct compat_vpudrv_buffer_t)); |
1199 | if (ret) |
1200 | return -EFAULT; |
1201 | spin_lock(&s_vpu_lock); |
1202 | list_for_each_entry_safe(pool, n, |
1203 | &s_vbp_head, list) { |
1204 | if (pool->filp == filp) { |
1205 | vb = pool->vb; |
1206 | if (((compat_ulong_t)vb.phys_addr |
1207 | == buf32.phys_addr) |
1208 | && find == false){ |
1209 | cached = vb.cached; |
1210 | find = true; |
1211 | } |
1212 | } |
1213 | } |
1214 | spin_unlock(&s_vpu_lock); |
1215 | if (find && cached) |
1216 | dma_flush( |
1217 | (u32)buf32.phys_addr, |
1218 | (u32)buf32.size); |
1219 | enc_pr(LOG_ALL, |
1220 | "[-]VDI_IOCTL_FLUSH_BUFFER32\n"); |
1221 | } |
1222 | break; |
1223 | #endif |
1224 | case VDI_IOCTL_FLUSH_BUFFER: |
1225 | { |
1226 | struct vpudrv_buffer_pool_t *pool, *n; |
1227 | struct vpudrv_buffer_t vb, buf; |
1228 | bool find = false; |
1229 | u32 cached = 0; |
1230 | |
1231 | enc_pr(LOG_ALL, |
1232 | "[+]VDI_IOCTL_FLUSH_BUFFER\n"); |
1233 | |
1234 | ret = copy_from_user(&buf, |
1235 | (struct vpudrv_buffer_t *)arg, |
1236 | sizeof(struct vpudrv_buffer_t)); |
1237 | if (ret) |
1238 | return -EFAULT; |
1239 | spin_lock(&s_vpu_lock); |
1240 | list_for_each_entry_safe(pool, n, |
1241 | &s_vbp_head, list) { |
1242 | if (pool->filp == filp) { |
1243 | vb = pool->vb; |
1244 | if ((vb.phys_addr |
1245 | == buf.phys_addr) |
1246 | && find == false){ |
1247 | cached = vb.cached; |
1248 | find = true; |
1249 | } |
1250 | } |
1251 | } |
1252 | spin_unlock(&s_vpu_lock); |
1253 | if (find && cached) |
1254 | dma_flush( |
1255 | (u32)buf.phys_addr, |
1256 | (u32)buf.size); |
1257 | enc_pr(LOG_ALL, |
1258 | "[-]VDI_IOCTL_FLUSH_BUFFER\n"); |
1259 | } |
1260 | break; |
1261 | case VDI_IOCTL_CONFIG_DMA: |
1262 | { |
1263 | struct vpu_dma_buf_info_t dma_info; |
1264 | |
1265 | enc_pr(LOG_ALL, |
1266 | "[+]VDI_IOCTL_CONFIG_DMA\n"); |
1267 | if (copy_from_user(&dma_info, |
1268 | (struct vpu_dma_buf_info_t *)arg, |
1269 | sizeof(struct vpu_dma_buf_info_t))) |
1270 | return -EFAULT; |
1271 | |
1272 | if (vpu_src_addr_config(dma_info)) { |
1273 | enc_pr(LOG_ERROR, |
1274 | "src addr config error\n"); |
1275 | return -EFAULT; |
1276 | } |
1277 | |
1278 | enc_pr(LOG_ALL, |
1279 | "[-]VDI_IOCTL_CONFIG_DMA %d, %d, %d\n", |
1280 | dma_info.fd[0], |
1281 | dma_info.fd[1], |
1282 | dma_info.fd[2]); |
1283 | } |
1284 | break; |
1285 | case VDI_IOCTL_UNMAP_DMA: |
1286 | { |
1287 | enc_pr(LOG_ALL, |
1288 | "[+]VDI_IOCTL_UNMAP_DMA\n"); |
1289 | |
1290 | vpu_dma_buffer_unmap(&dma_cfg[0]); |
1291 | if (dma_cfg[1].paddr != 0) { |
1292 | vpu_dma_buffer_unmap(&dma_cfg[1]); |
1293 | } |
1294 | if (dma_cfg[2].paddr != 0) { |
1295 | vpu_dma_buffer_unmap(&dma_cfg[2]); |
1296 | } |
1297 | enc_pr(LOG_ALL, |
1298 | "[-]VDI_IOCTL_UNMAP_DMA\n"); |
1299 | } |
1300 | break; |
1301 | default: |
1302 | { |
1303 | enc_pr(LOG_ERROR, |
1304 | "No such IOCTL, cmd is 0x%x\n", cmd); |
1305 | ret = -EFAULT; |
1306 | } |
1307 | break; |
1308 | } |
1309 | return ret; |
1310 | } |
1311 | |
1312 | #ifdef CONFIG_COMPAT |
1313 | static long vpu_compat_ioctl(struct file *filp, u32 cmd, ulong arg) |
1314 | { |
1315 | long ret; |
1316 | |
1317 | arg = (ulong)compat_ptr(arg); |
1318 | ret = vpu_ioctl(filp, cmd, arg); |
1319 | return ret; |
1320 | } |
1321 | #endif |
1322 | |
1323 | static ssize_t vpu_write(struct file *filp, |
1324 | const char *buf, |
1325 | size_t len, |
1326 | loff_t *ppos) |
1327 | { |
1328 | enc_pr(LOG_INFO, "vpu_write len=%d\n", (int)len); |
1329 | |
1330 | if (!buf) { |
1331 | enc_pr(LOG_ERROR, "vpu_write buf = NULL error\n"); |
1332 | return -EFAULT; |
1333 | } |
1334 | |
1335 | if (len == sizeof(struct vpu_bit_firmware_info_t)) { |
1336 | struct vpu_bit_firmware_info_t *bit_firmware_info; |
1337 | |
1338 | bit_firmware_info = |
1339 | kmalloc(sizeof(struct vpu_bit_firmware_info_t), |
1340 | GFP_KERNEL); |
1341 | if (!bit_firmware_info) { |
1342 | enc_pr(LOG_ERROR, |
1343 | "vpu_write bit_firmware_info allocation error\n"); |
1344 | return -EFAULT; |
1345 | } |
1346 | |
1347 | if (copy_from_user(bit_firmware_info, buf, len)) { |
1348 | enc_pr(LOG_ERROR, |
1349 | "vpu_write copy_from_user error for bit_firmware_info\n"); |
1350 | return -EFAULT; |
1351 | } |
1352 | |
1353 | if (bit_firmware_info->size == |
1354 | sizeof(struct vpu_bit_firmware_info_t)) { |
1355 | enc_pr(LOG_INFO, |
1356 | "vpu_write set bit_firmware_info coreIdx=0x%x, ", |
1357 | bit_firmware_info->core_idx); |
1358 | enc_pr(LOG_INFO, |
1359 | "reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n", |
1360 | bit_firmware_info->reg_base_offset, |
1361 | bit_firmware_info->size, |
1362 | bit_firmware_info->bit_code[0]); |
1363 | |
1364 | if (bit_firmware_info->core_idx |
1365 | > MAX_NUM_VPU_CORE) { |
1366 | enc_pr(LOG_ERROR, |
1367 | "vpu_write coreIdx[%d] is ", |
1368 | bit_firmware_info->core_idx); |
1369 | enc_pr(LOG_ERROR, |
1370 | "exceeded than MAX_NUM_VPU_CORE[%d]\n", |
1371 | MAX_NUM_VPU_CORE); |
1372 | return -ENODEV; |
1373 | } |
1374 | |
1375 | memcpy((void *)&s_bit_firmware_info |
1376 | [bit_firmware_info->core_idx], |
1377 | bit_firmware_info, |
1378 | sizeof(struct vpu_bit_firmware_info_t)); |
1379 | kfree(bit_firmware_info); |
1380 | return len; |
1381 | } |
1382 | kfree(bit_firmware_info); |
1383 | } |
1384 | return -1; |
1385 | } |
1386 | |
1387 | static s32 vpu_release(struct inode *inode, struct file *filp) |
1388 | { |
1389 | s32 ret = 0; |
1390 | ulong flags; |
1391 | |
1392 | enc_pr(LOG_DEBUG, "vpu_release, calling process: %d:%s\n", current->pid, current->comm); |
1393 | ret = down_interruptible(&s_vpu_sem); |
1394 | enc_pr(LOG_DEBUG, "vpu_release, ret = %d\n", ret); |
1395 | |
1396 | if (s_vpu_drv_context.open_count <= 0) { |
1397 | enc_pr(LOG_DEBUG, "vpu_release, open_count=%d, already released or even not inited\n", |
1398 | s_vpu_drv_context.open_count); |
1399 | s_vpu_drv_context.open_count = 0; |
1400 | goto exit_release; |
1401 | } |
1402 | |
1403 | if (ret == 0) { |
1404 | vpu_free_buffers(filp); |
1405 | vpu_free_instances(filp); |
1406 | |
1407 | enc_pr(LOG_DEBUG, "vpu_release, decrease open_count from %d\n", |
1408 | s_vpu_drv_context.open_count); |
1409 | |
1410 | s_vpu_drv_context.open_count--; |
1411 | if (s_vpu_drv_context.open_count == 0) { |
1412 | enc_pr(LOG_DEBUG, |
1413 | "vpu_release: s_interrupt_flag(%d), reason(0x%08lx)\n", |
1414 | s_interrupt_flag, s_vpu_drv_context.interrupt_reason); |
1415 | s_vpu_drv_context.interrupt_reason = 0; |
1416 | s_interrupt_flag = 0; |
1417 | if (s_instance_pool.base) { |
1418 | enc_pr(LOG_DEBUG, "free instance pool\n"); |
1419 | vfree((const void *)s_instance_pool.base); |
1420 | s_instance_pool.base = 0; |
1421 | } |
1422 | if (s_common_memory.phys_addr) { |
1423 | enc_pr(LOG_INFO, "vpu_release, s_common_memory 0x%lx\n",s_common_memory.phys_addr); |
1424 | vpu_free_dma_buffer(&s_common_memory); |
1425 | s_common_memory.phys_addr = 0; |
1426 | } |
1427 | |
1428 | if (s_video_memory.phys_addr && !use_reserve) { |
1429 | enc_pr(LOG_DEBUG, "vpu_release, s_video_memory 0x%lx\n",s_video_memory.phys_addr); |
1430 | codec_mm_free_for_dma( |
1431 | VPU_DEV_NAME, |
1432 | (u32)s_video_memory.phys_addr); |
1433 | vmem_exit(&s_vmem); |
1434 | memset(&s_video_memory, |
1435 | 0, sizeof(struct vpudrv_buffer_t)); |
1436 | memset(&s_vmem, |
1437 | 0, sizeof(struct video_mm_t)); |
1438 | } |
1439 | if ((s_vpu_irq >= 0) && (s_vpu_irq_requested == true)) { |
1440 | free_irq(s_vpu_irq, &s_vpu_drv_context); |
1441 | s_vpu_irq_requested = false; |
1442 | } |
1443 | spin_lock_irqsave(&s_vpu_lock, flags); |
1444 | WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, |
1445 | READ_AOREG(AO_RTI_GEN_PWR_ISO0) | |
1446 | (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 |
1447 | ? 0x8 : (0x3<<12))); |
1448 | udelay(10); |
1449 | |
1450 | WRITE_VREG(DOS_MEM_PD_WAVE420L, 0xffffffff); |
1451 | #ifndef VPU_SUPPORT_CLOCK_CONTROL |
1452 | vpu_clk_config(0); |
1453 | #endif |
1454 | WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, |
1455 | READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | |
1456 | (get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 |
1457 | ? 0x8 : (0x3<<24))); |
1458 | udelay(10); |
1459 | spin_unlock_irqrestore(&s_vpu_lock, flags); |
1460 | amports_switch_gate("vdec", 0); |
1461 | } |
1462 | } |
1463 | exit_release: |
1464 | up(&s_vpu_sem); |
1465 | return 0; |
1466 | } |
1467 | |
1468 | static s32 vpu_fasync(s32 fd, struct file *filp, s32 mode) |
1469 | { |
1470 | struct vpu_drv_context_t *dev = |
1471 | (struct vpu_drv_context_t *)filp->private_data; |
1472 | return fasync_helper(fd, filp, mode, &dev->async_queue); |
1473 | } |
1474 | |
1475 | static s32 vpu_map_to_register(struct file *fp, struct vm_area_struct *vm) |
1476 | { |
1477 | ulong pfn; |
1478 | |
1479 | vm->vm_flags |= VM_IO | VM_RESERVED; |
1480 | vm->vm_page_prot = |
1481 | pgprot_noncached(vm->vm_page_prot); |
1482 | pfn = s_vpu_register.phys_addr >> PAGE_SHIFT; |
1483 | return remap_pfn_range(vm, vm->vm_start, pfn, |
1484 | vm->vm_end - vm->vm_start, |
1485 | vm->vm_page_prot) ? -EAGAIN : 0; |
1486 | } |
1487 | |
1488 | static s32 vpu_map_to_physical_memory( |
1489 | struct file *fp, struct vm_area_struct *vm) |
1490 | { |
1491 | vm->vm_flags |= VM_IO | VM_RESERVED; |
1492 | if (vm->vm_pgoff == |
1493 | (s_common_memory.phys_addr >> PAGE_SHIFT)) { |
1494 | vm->vm_page_prot = |
1495 | pgprot_noncached(vm->vm_page_prot); |
1496 | } else { |
1497 | if (vpu_is_buffer_cached(fp, vm->vm_pgoff) == 0) |
1498 | vm->vm_page_prot = |
1499 | pgprot_noncached(vm->vm_page_prot); |
1500 | } |
1501 | /* vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot); */ |
1502 | if (!pfn_valid(vm->vm_pgoff)) { |
1503 | enc_pr(LOG_ERROR, "%s invalid pfn\n", __FUNCTION__); |
1504 | return -EAGAIN; |
1505 | } |
1506 | return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, |
1507 | vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0; |
1508 | } |
1509 | |
1510 | static s32 vpu_map_to_instance_pool_memory( |
1511 | struct file *fp, struct vm_area_struct *vm) |
1512 | { |
1513 | s32 ret; |
1514 | long length = vm->vm_end - vm->vm_start; |
1515 | ulong start = vm->vm_start; |
1516 | s8 *vmalloc_area_ptr = (s8 *)s_instance_pool.base; |
1517 | ulong pfn; |
1518 | |
1519 | vm->vm_flags |= VM_RESERVED; |
1520 | |
1521 | /* loop over all pages, map it page individually */ |
1522 | while (length > 0) { |
1523 | pfn = vmalloc_to_pfn(vmalloc_area_ptr); |
1524 | ret = remap_pfn_range(vm, start, pfn, |
1525 | PAGE_SIZE, PAGE_SHARED); |
1526 | if (ret < 0) |
1527 | return ret; |
1528 | start += PAGE_SIZE; |
1529 | vmalloc_area_ptr += PAGE_SIZE; |
1530 | length -= PAGE_SIZE; |
1531 | } |
1532 | return 0; |
1533 | } |
1534 | |
1535 | /* |
1536 | * @brief memory map interface for vpu file operation |
1537 | * @return 0 on success or negative error code on error |
1538 | */ |
1539 | static s32 vpu_mmap(struct file *fp, struct vm_area_struct *vm) |
1540 | { |
1541 | /* if (vm->vm_pgoff == (s_vpu_register.phys_addr >> PAGE_SHIFT)) */ |
1542 | if ((vm->vm_end - vm->vm_start == s_vpu_register.size + 1) && |
1543 | (vm->vm_pgoff == 0)) { |
1544 | vm->vm_pgoff = (s_vpu_register.phys_addr >> PAGE_SHIFT); |
1545 | return vpu_map_to_register(fp, vm); |
1546 | } |
1547 | |
1548 | if (vm->vm_pgoff == 0) |
1549 | return vpu_map_to_instance_pool_memory(fp, vm); |
1550 | |
1551 | return vpu_map_to_physical_memory(fp, vm); |
1552 | } |
1553 | static int vpu_dma_buffer_map(struct vpu_dma_cfg *cfg) |
1554 | { |
1555 | int ret = -1; |
1556 | int fd = -1; |
1557 | struct dma_buf *dbuf = NULL; |
1558 | struct dma_buf_attachment *d_att = NULL; |
1559 | struct sg_table *sg = NULL; |
1560 | void *vaddr = NULL; |
1561 | struct device *dev = NULL; |
1562 | enum dma_data_direction dir; |
1563 | |
1564 | if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { |
1565 | enc_pr(LOG_ERROR, "error dma param\n"); |
1566 | return -EINVAL; |
1567 | } |
1568 | fd = cfg->fd; |
1569 | dev = cfg->dev; |
1570 | dir = cfg->dir; |
1571 | |
1572 | dbuf = dma_buf_get(fd); |
1573 | if (dbuf == NULL) { |
1574 | enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); |
1575 | return -EINVAL; |
1576 | } |
1577 | |
1578 | d_att = dma_buf_attach(dbuf, dev); |
1579 | if (d_att == NULL) { |
1580 | enc_pr(LOG_ERROR, "failed to set dma attach\n"); |
1581 | goto attach_err; |
1582 | } |
1583 | |
1584 | sg = dma_buf_map_attachment(d_att, dir); |
1585 | if (sg == NULL) { |
1586 | enc_pr(LOG_ERROR, "failed to get dma sg\n"); |
1587 | goto map_attach_err; |
1588 | } |
1589 | cfg->dbuf = dbuf; |
1590 | cfg->attach = d_att; |
1591 | cfg->vaddr = vaddr; |
1592 | cfg->sg = sg; |
1593 | |
1594 | return 0; |
1595 | |
1596 | map_attach_err: |
1597 | dma_buf_detach(dbuf, d_att); |
1598 | attach_err: |
1599 | dma_buf_put(dbuf); |
1600 | |
1601 | return ret; |
1602 | } |
1603 | |
1604 | static void vpu_dma_buffer_unmap(struct vpu_dma_cfg *cfg) |
1605 | { |
1606 | int fd = -1; |
1607 | struct dma_buf *dbuf = NULL; |
1608 | struct dma_buf_attachment *d_att = NULL; |
1609 | struct sg_table *sg = NULL; |
1610 | /*void *vaddr = NULL;*/ |
1611 | struct device *dev = NULL; |
1612 | enum dma_data_direction dir; |
1613 | |
1614 | if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL |
1615 | || cfg->dbuf == NULL /*|| cfg->vaddr == NULL*/ |
1616 | || cfg->attach == NULL || cfg->sg == NULL) { |
1617 | enc_pr(LOG_ERROR, "unmap: Error dma param\n"); |
1618 | return; |
1619 | } |
1620 | |
1621 | fd = cfg->fd; |
1622 | dev = cfg->dev; |
1623 | dir = cfg->dir; |
1624 | dbuf = cfg->dbuf; |
1625 | d_att = cfg->attach; |
1626 | sg = cfg->sg; |
1627 | |
1628 | dma_buf_unmap_attachment(d_att, sg, dir); |
1629 | dma_buf_detach(dbuf, d_att); |
1630 | dma_buf_put(dbuf); |
1631 | |
1632 | enc_pr(LOG_INFO, "vpu_dma_buffer_unmap fd %d\n",fd); |
1633 | } |
1634 | |
1635 | static int vpu_dma_buffer_get_phys(struct vpu_dma_cfg *cfg, unsigned long *addr) |
1636 | { |
1637 | struct sg_table *sg_table; |
1638 | struct page *page; |
1639 | int ret; |
1640 | |
1641 | ret = vpu_dma_buffer_map(cfg); |
1642 | if (ret < 0) { |
1643 | printk("vpu_dma_buffer_map failed\n"); |
1644 | return ret; |
1645 | } |
1646 | if (cfg->sg) { |
1647 | sg_table = cfg->sg; |
1648 | page = sg_page(sg_table->sgl); |
1649 | *addr = PFN_PHYS(page_to_pfn(page)); |
1650 | ret = 0; |
1651 | } |
1652 | enc_pr(LOG_INFO,"vpu_dma_buffer_get_phys\n"); |
1653 | |
1654 | return ret; |
1655 | } |
1656 | |
1657 | static u32 vpu_src_addr_config(struct vpu_dma_buf_info_t info) { |
1658 | unsigned long phy_addr_y = 0; |
1659 | unsigned long phy_addr_u = 0; |
1660 | unsigned long phy_addr_v = 0; |
1661 | unsigned long Ysize = info.width * info.height; |
1662 | unsigned long Usize = Ysize >> 2; |
1663 | s32 ret = 0; |
1664 | u32 core = 0; |
1665 | |
1666 | //y |
1667 | dma_cfg[0].dir = DMA_TO_DEVICE; |
1668 | dma_cfg[0].fd = info.fd[0]; |
1669 | dma_cfg[0].dev = &(hevc_pdev->dev); |
1670 | ret = vpu_dma_buffer_get_phys(&dma_cfg[0], &phy_addr_y); |
1671 | if (ret < 0) { |
1672 | enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[0]); |
1673 | return -1; |
1674 | } |
1675 | |
1676 | //u |
1677 | if (info.num_planes >=2) { |
1678 | dma_cfg[1].dir = DMA_TO_DEVICE; |
1679 | dma_cfg[1].fd = info.fd[1]; |
1680 | dma_cfg[1].dev = &(hevc_pdev->dev); |
1681 | ret = vpu_dma_buffer_get_phys(&dma_cfg[1], &phy_addr_u); |
1682 | if (ret < 0) { |
1683 | enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[1]); |
1684 | return -1; |
1685 | } |
1686 | } |
1687 | |
1688 | //v |
1689 | if (info.num_planes >=3) { |
1690 | dma_cfg[2].dir = DMA_TO_DEVICE; |
1691 | dma_cfg[2].fd = info.fd[2]; |
1692 | dma_cfg[2].dev = &(hevc_pdev->dev); |
1693 | ret = vpu_dma_buffer_get_phys(&dma_cfg[2], &phy_addr_v); |
1694 | if (ret < 0) { |
1695 | enc_pr(LOG_ERROR, "import fd %d failed\n", info.fd[2]); |
1696 | return -1; |
1697 | } |
1698 | } |
1699 | |
1700 | enc_pr(LOG_INFO, "vpu_src_addr_config phy_addr 0x%lx, 0x%lx, 0x%lx\n", |
1701 | phy_addr_y, phy_addr_u, phy_addr_v); |
1702 | |
1703 | dma_cfg[0].paddr = (void *)phy_addr_y; |
1704 | dma_cfg[1].paddr = (void *)phy_addr_u; |
1705 | dma_cfg[2].paddr = (void *)phy_addr_v; |
1706 | |
1707 | enc_pr(LOG_INFO, "info.num_planes %d, info.fmt %d\n", |
1708 | info.num_planes, info.fmt); |
1709 | |
1710 | WriteVpuRegister(W4_SRC_ADDR_Y, phy_addr_y); |
1711 | if (info.num_planes == 1) { |
1712 | if (info.fmt == AMVENC_YUV420) { |
1713 | WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize); |
1714 | WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize + Usize); |
1715 | } else if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) { |
1716 | WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_y + Ysize); |
1717 | WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_y + Ysize); |
1718 | } else { |
1719 | enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); |
1720 | } |
1721 | |
1722 | } else if (info.num_planes == 2) { |
1723 | if (info.fmt == AMVENC_NV12 || info.fmt == AMVENC_NV21 ) { |
1724 | WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u); |
1725 | WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_u); |
1726 | } else { |
1727 | enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); |
1728 | } |
1729 | |
1730 | } else if (info.num_planes == 3) { |
1731 | if (info.fmt == AMVENC_YUV420) { |
1732 | WriteVpuRegister(W4_SRC_ADDR_U, phy_addr_u); |
1733 | WriteVpuRegister(W4_SRC_ADDR_V, phy_addr_v); |
1734 | } else { |
1735 | enc_pr(LOG_ERROR, "not support fmt %d\n", info.fmt); |
1736 | } |
1737 | } |
1738 | return 0; |
1739 | |
1740 | } |
1741 | |
1742 | static const struct file_operations vpu_fops = { |
1743 | .owner = THIS_MODULE, |
1744 | .open = vpu_open, |
1745 | .release = vpu_release, |
1746 | .write = vpu_write, |
1747 | .unlocked_ioctl = vpu_ioctl, |
1748 | #ifdef CONFIG_COMPAT |
1749 | .compat_ioctl = vpu_compat_ioctl, |
1750 | #endif |
1751 | .fasync = vpu_fasync, |
1752 | .mmap = vpu_mmap, |
1753 | }; |
1754 | |
1755 | static ssize_t hevcenc_status_show(struct class *cla, |
1756 | struct class_attribute *attr, char *buf) |
1757 | { |
1758 | return snprintf(buf, 40, "hevcenc_status_show\n"); |
1759 | } |
1760 | |
1761 | static struct class_attribute hevcenc_class_attrs[] = { |
1762 | __ATTR(encode_status, |
1763 | S_IRUGO | S_IWUSR, |
1764 | hevcenc_status_show, |
1765 | NULL), |
1766 | __ATTR_NULL |
1767 | }; |
1768 | |
1769 | static struct class hevcenc_class = { |
1770 | .name = VPU_CLASS_NAME, |
1771 | .class_attrs = hevcenc_class_attrs, |
1772 | }; |
1773 | |
1774 | s32 init_HevcEnc_device(void) |
1775 | { |
1776 | s32 r = 0; |
1777 | |
1778 | r = register_chrdev(0, VPU_DEV_NAME, &vpu_fops); |
1779 | if (r <= 0) { |
1780 | enc_pr(LOG_ERROR, "register hevcenc device error.\n"); |
1781 | return r; |
1782 | } |
1783 | s_vpu_major = r; |
1784 | |
1785 | r = class_register(&hevcenc_class); |
1786 | if (r < 0) { |
1787 | enc_pr(LOG_ERROR, "error create hevcenc class.\n"); |
1788 | return r; |
1789 | } |
1790 | |
1791 | hevcenc_dev = device_create(&hevcenc_class, NULL, |
1792 | MKDEV(s_vpu_major, 0), NULL, |
1793 | VPU_DEV_NAME); |
1794 | |
1795 | if (IS_ERR(hevcenc_dev)) { |
1796 | enc_pr(LOG_ERROR, "create hevcenc device error.\n"); |
1797 | class_unregister(&hevcenc_class); |
1798 | return -1; |
1799 | } |
1800 | return r; |
1801 | } |
1802 | |
1803 | s32 uninit_HevcEnc_device(void) |
1804 | { |
1805 | if (hevcenc_dev) |
1806 | device_destroy(&hevcenc_class, MKDEV(s_vpu_major, 0)); |
1807 | |
1808 | class_destroy(&hevcenc_class); |
1809 | |
1810 | unregister_chrdev(s_vpu_major, VPU_DEV_NAME); |
1811 | return 0; |
1812 | } |
1813 | |
1814 | static s32 hevc_mem_device_init( |
1815 | struct reserved_mem *rmem, struct device *dev) |
1816 | { |
1817 | s32 r; |
1818 | |
1819 | if (!rmem) { |
1820 | enc_pr(LOG_ERROR, |
1821 | "Can not obtain I/O memory, will allocate hevc buffer!\n"); |
1822 | r = -EFAULT; |
1823 | return r; |
1824 | } |
1825 | |
1826 | if ((!rmem->base) || |
1827 | (rmem->size < VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) { |
1828 | enc_pr(LOG_ERROR, |
1829 | "memory range error, 0x%lx - 0x%lx\n", |
1830 | (ulong)rmem->base, (ulong)rmem->size); |
1831 | r = -EFAULT; |
1832 | return r; |
1833 | } |
1834 | r = 0; |
1835 | s_video_memory.size = rmem->size; |
1836 | s_video_memory.phys_addr = (ulong)rmem->base; |
1837 | enc_pr(LOG_DEBUG, "hevc_mem_device_init %d, 0x%lx\n ",s_video_memory.size,s_video_memory.phys_addr); |
1838 | |
1839 | return r; |
1840 | } |
1841 | |
1842 | static s32 vpu_probe(struct platform_device *pdev) |
1843 | { |
1844 | s32 err = 0, irq, reg_count, idx; |
1845 | struct resource res; |
1846 | struct device_node *np, *child; |
1847 | |
1848 | enc_pr(LOG_DEBUG, "vpu_probe\n"); |
1849 | |
1850 | s_vpu_major = 0; |
1851 | use_reserve = false; |
1852 | s_vpu_irq = -1; |
1853 | cma_pool_size = 0; |
1854 | s_vpu_irq_requested = false; |
1855 | s_vpu_open_ref_count = 0; |
1856 | hevcenc_dev = NULL; |
1857 | hevc_pdev = NULL; |
1858 | memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); |
1859 | memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); |
1860 | memset(&s_vmem, 0, sizeof(struct video_mm_t)); |
1861 | memset(&s_bit_firmware_info[0], 0, sizeof(s_bit_firmware_info)); |
1862 | memset(&res, 0, sizeof(struct resource)); |
1863 | |
1864 | idx = of_reserved_mem_device_init(&pdev->dev); |
1865 | if (idx != 0) { |
1866 | enc_pr(LOG_DEBUG, |
1867 | "HevcEnc reserved memory config fail.\n"); |
1868 | } else if (s_video_memory.phys_addr) { |
1869 | use_reserve = true; |
1870 | } |
1871 | |
1872 | if (use_reserve == false) { |
1873 | #ifndef CONFIG_CMA |
1874 | enc_pr(LOG_ERROR, |
1875 | "HevcEnc reserved memory is invaild, probe fail!\n"); |
1876 | err = -EFAULT; |
1877 | goto ERROR_PROVE_DEVICE; |
1878 | #else |
1879 | cma_pool_size = |
1880 | (codec_mm_get_total_size() > |
1881 | (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE)) ? |
1882 | (VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE) : |
1883 | codec_mm_get_total_size(); |
1884 | enc_pr(LOG_DEBUG, |
1885 | "HevcEnc - cma memory pool size: %d MB\n", |
1886 | (u32)cma_pool_size / SZ_1M); |
1887 | #endif |
1888 | } |
1889 | |
1890 | /* get interrupt resource */ |
1891 | irq = platform_get_irq_byname(pdev, "wave420l_irq"); |
1892 | if (irq < 0) { |
1893 | enc_pr(LOG_ERROR, "get HevcEnc irq resource error\n"); |
1894 | err = -ENXIO; |
1895 | goto ERROR_PROVE_DEVICE; |
1896 | } |
1897 | s_vpu_irq = irq; |
1898 | enc_pr(LOG_DEBUG, "HevcEnc - wave420l_irq: %d\n", s_vpu_irq); |
1899 | #if 0 |
1900 | rstc = devm_reset_control_get(&pdev->dev, "HevcEnc"); |
1901 | if (IS_ERR(rstc)) { |
1902 | enc_pr(LOG_ERROR, |
1903 | "get HevcEnc rstc error: %lx\n", PTR_ERR(rstc)); |
1904 | rstc = NULL; |
1905 | err = -ENOENT; |
1906 | goto ERROR_PROVE_DEVICE; |
1907 | } |
1908 | reset_control_assert(rstc); |
1909 | s_vpu_rstc = rstc; |
1910 | |
1911 | clk = clk_get(&pdev->dev, "clk_HevcEnc"); |
1912 | if (IS_ERR(clk)) { |
1913 | enc_pr(LOG_ERROR, "cannot get clock\n"); |
1914 | clk = NULL; |
1915 | err = -ENOENT; |
1916 | goto ERROR_PROVE_DEVICE; |
1917 | } |
1918 | s_vpu_clk = clk; |
1919 | #endif |
1920 | |
1921 | #ifdef VPU_SUPPORT_CLOCK_CONTROL |
1922 | #else |
1923 | vpu_clk_config(1); |
1924 | #endif |
1925 | |
1926 | np = pdev->dev.of_node; |
1927 | reg_count = 0; |
1928 | for_each_child_of_node(np, child) { |
1929 | if (of_address_to_resource(child, 0, &res) |
1930 | || (reg_count > 1)) { |
1931 | enc_pr(LOG_ERROR, |
1932 | "no reg ranges or more reg ranges %d\n", |
1933 | reg_count); |
1934 | err = -ENXIO; |
1935 | goto ERROR_PROVE_DEVICE; |
1936 | } |
1937 | /* if platform driver is implemented */ |
1938 | if (res.start != 0) { |
1939 | s_vpu_register.phys_addr = res.start; |
1940 | s_vpu_register.virt_addr = |
1941 | (ulong)ioremap_nocache( |
1942 | res.start, resource_size(&res)); |
1943 | s_vpu_register.size = res.end - res.start; |
1944 | enc_pr(LOG_DEBUG, |
1945 | "vpu base address get from platform driver "); |
1946 | enc_pr(LOG_DEBUG, |
1947 | "physical base addr=0x%lx, virtual base=0x%lx\n", |
1948 | s_vpu_register.phys_addr, |
1949 | s_vpu_register.virt_addr); |
1950 | } else { |
1951 | s_vpu_register.phys_addr = VPU_REG_BASE_ADDR; |
1952 | s_vpu_register.virt_addr = |
1953 | (ulong)ioremap_nocache( |
1954 | s_vpu_register.phys_addr, VPU_REG_SIZE); |
1955 | s_vpu_register.size = VPU_REG_SIZE; |
1956 | enc_pr(LOG_DEBUG, |
1957 | "vpu base address get from defined value "); |
1958 | enc_pr(LOG_DEBUG, |
1959 | "physical base addr=0x%lx, virtual base=0x%lx\n", |
1960 | s_vpu_register.phys_addr, |
1961 | s_vpu_register.virt_addr); |
1962 | } |
1963 | reg_count++; |
1964 | } |
1965 | |
1966 | /* get the major number of the character device */ |
1967 | if (init_HevcEnc_device()) { |
1968 | err = -EBUSY; |
1969 | enc_pr(LOG_ERROR, "could not allocate major number\n"); |
1970 | goto ERROR_PROVE_DEVICE; |
1971 | } |
1972 | enc_pr(LOG_INFO, "SUCCESS alloc_chrdev_region\n"); |
1973 | |
1974 | init_waitqueue_head(&s_interrupt_wait_q); |
1975 | tasklet_init(&hevc_tasklet, |
1976 | hevcenc_isr_tasklet, |
1977 | (ulong)&s_vpu_drv_context); |
1978 | s_common_memory.base = 0; |
1979 | s_instance_pool.base = 0; |
1980 | |
1981 | if (use_reserve == true) { |
1982 | if (vmem_init(&s_vmem, s_video_memory.phys_addr, |
1983 | s_video_memory.size) < 0) { |
1984 | enc_pr(LOG_ERROR, "fail to init vmem system\n"); |
1985 | goto ERROR_PROVE_DEVICE; |
1986 | } |
1987 | enc_pr(LOG_DEBUG, |
1988 | "success to probe vpu device with video memory "); |
1989 | enc_pr(LOG_DEBUG, |
1990 | "phys_addr=0x%lx, base = 0x%lx\n", |
1991 | (ulong)s_video_memory.phys_addr, |
1992 | (ulong)s_video_memory.base); |
1993 | } else |
1994 | enc_pr(LOG_DEBUG, |
1995 | "success to probe vpu device with video memory from cma\n"); |
1996 | hevc_pdev = pdev; |
1997 | return 0; |
1998 | |
1999 | ERROR_PROVE_DEVICE: |
2000 | if (s_vpu_register.virt_addr) { |
2001 | iounmap((void *)s_vpu_register.virt_addr); |
2002 | memset(&s_vpu_register, 0, sizeof(struct vpudrv_buffer_t)); |
2003 | } |
2004 | |
2005 | if (s_video_memory.phys_addr) { |
2006 | vmem_exit(&s_vmem); |
2007 | memset(&s_video_memory, 0, sizeof(struct vpudrv_buffer_t)); |
2008 | memset(&s_vmem, 0, sizeof(struct video_mm_t)); |
2009 | } |
2010 | |
2011 | vpu_clk_config(0); |
2012 | |
2013 | if (s_vpu_irq_requested == true) { |
2014 | if (s_vpu_irq >= 0) { |
2015 | free_irq(s_vpu_irq, &s_vpu_drv_context); |
2016 | s_vpu_irq = -1; |
2017 | } |
2018 | s_vpu_irq_requested = false; |
2019 | } |
2020 | uninit_HevcEnc_device(); |
2021 | return err; |
2022 | } |
2023 | |
2024 | static s32 vpu_remove(struct platform_device *pdev) |
2025 | { |
2026 | enc_pr(LOG_DEBUG, "vpu_remove\n"); |
2027 | |
2028 | if (s_instance_pool.base) { |
2029 | vfree((const void *)s_instance_pool.base); |
2030 | s_instance_pool.base = 0; |
2031 | } |
2032 | |
2033 | if (s_common_memory.phys_addr) { |
2034 | vpu_free_dma_buffer(&s_common_memory); |
2035 | s_common_memory.phys_addr = 0; |
2036 | } |
2037 | |
2038 | if (s_video_memory.phys_addr) { |
2039 | if (!use_reserve) { |
2040 | codec_mm_free_for_dma( |
2041 | VPU_DEV_NAME, |
2042 | (u32)s_video_memory.phys_addr); |
2043 | } |
2044 | vmem_exit(&s_vmem); |
2045 | memset(&s_video_memory, |
2046 | 0, sizeof(struct vpudrv_buffer_t)); |
2047 | memset(&s_vmem, |
2048 | 0, sizeof(struct video_mm_t)); |
2049 | } |
2050 | |
2051 | if (s_vpu_irq_requested == true) { |
2052 | if (s_vpu_irq >= 0) { |
2053 | free_irq(s_vpu_irq, &s_vpu_drv_context); |
2054 | s_vpu_irq = -1; |
2055 | } |
2056 | s_vpu_irq_requested = false; |
2057 | } |
2058 | |
2059 | if (s_vpu_register.virt_addr) { |
2060 | iounmap((void *)s_vpu_register.virt_addr); |
2061 | memset(&s_vpu_register, |
2062 | 0, sizeof(struct vpudrv_buffer_t)); |
2063 | } |
2064 | hevc_pdev = NULL; |
2065 | vpu_clk_config(0); |
2066 | |
2067 | uninit_HevcEnc_device(); |
2068 | return 0; |
2069 | } |
2070 | |
2071 | #ifdef CONFIG_PM |
2072 | static void Wave4BitIssueCommand(u32 core, u32 cmd) |
2073 | { |
2074 | WriteVpuRegister(W4_VPU_BUSY_STATUS, 1); |
2075 | WriteVpuRegister(W4_CORE_INDEX, 0); |
2076 | /* coreIdx = ReadVpuRegister(W4_VPU_BUSY_STATUS); */ |
2077 | /* coreIdx = 0; */ |
2078 | /* WriteVpuRegister(W4_INST_INDEX, |
2079 | * (instanceIndex & 0xffff) | (codecMode << 16)); |
2080 | */ |
2081 | WriteVpuRegister(W4_COMMAND, cmd); |
2082 | WriteVpuRegister(W4_VPU_HOST_INT_REQ, 1); |
2083 | } |
2084 | |
2085 | static s32 vpu_suspend(struct platform_device *pdev, pm_message_t state) |
2086 | { |
2087 | u32 core; |
2088 | ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ |
2089 | |
2090 | enc_pr(LOG_DEBUG, "vpu_suspend\n"); |
2091 | |
2092 | vpu_clk_config(1); |
2093 | |
2094 | if (s_vpu_open_ref_count > 0) { |
2095 | for (core = 0; core < MAX_NUM_VPU_CORE; core++) { |
2096 | if (s_bit_firmware_info[core].size == 0) |
2097 | continue; |
2098 | while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { |
2099 | if (time_after(jiffies, timeout)) { |
2100 | enc_pr(LOG_ERROR, |
2101 | "SLEEP_VPU BUSY timeout"); |
2102 | goto DONE_SUSPEND; |
2103 | } |
2104 | } |
2105 | Wave4BitIssueCommand(core, W4_CMD_SLEEP_VPU); |
2106 | |
2107 | while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { |
2108 | if (time_after(jiffies, timeout)) { |
2109 | enc_pr(LOG_ERROR, |
2110 | "SLEEP_VPU BUSY timeout"); |
2111 | goto DONE_SUSPEND; |
2112 | } |
2113 | } |
2114 | if (ReadVpuRegister(W4_RET_SUCCESS) == 0) { |
2115 | enc_pr(LOG_ERROR, |
2116 | "SLEEP_VPU failed [0x%x]", |
2117 | ReadVpuRegister(W4_RET_FAIL_REASON)); |
2118 | goto DONE_SUSPEND; |
2119 | } |
2120 | } |
2121 | } |
2122 | |
2123 | vpu_clk_config(0); |
2124 | return 0; |
2125 | |
2126 | DONE_SUSPEND: |
2127 | vpu_clk_config(0); |
2128 | return -EAGAIN; |
2129 | } |
2130 | static s32 vpu_resume(struct platform_device *pdev) |
2131 | { |
2132 | u32 i; |
2133 | u32 core; |
2134 | u32 val; |
2135 | ulong timeout = jiffies + HZ; /* vpu wait timeout to 1sec */ |
2136 | ulong code_base; |
2137 | u32 code_size; |
2138 | u32 remap_size; |
2139 | u32 regVal; |
2140 | u32 hwOption = 0; |
2141 | |
2142 | enc_pr(LOG_DEBUG, "vpu_resume\n"); |
2143 | |
2144 | vpu_clk_config(1); |
2145 | if (s_vpu_open_ref_count > 0) { |
2146 | for (core = 0; core < MAX_NUM_VPU_CORE; core++) { |
2147 | if (s_bit_firmware_info[core].size == 0) |
2148 | continue; |
2149 | code_base = s_common_memory.phys_addr; |
2150 | /* ALIGN TO 4KB */ |
2151 | code_size = (s_common_memory.size & ~0xfff); |
2152 | if (code_size < s_bit_firmware_info[core].size * 2) |
2153 | goto DONE_WAKEUP; |
2154 | |
2155 | /*---- LOAD BOOT CODE */ |
2156 | for (i = 0; i < 512; i += 2) { |
2157 | val = s_bit_firmware_info[core].bit_code[i]; |
2158 | val |= (s_bit_firmware_info[core].bit_code[i+1] << 16); |
2159 | WriteVpu(code_base+(i*2), val); |
2160 | } |
2161 | |
2162 | regVal = 0; |
2163 | WriteVpuRegister(W4_PO_CONF, regVal); |
2164 | |
2165 | /* Reset All blocks */ |
2166 | regVal = 0x7ffffff; |
2167 | WriteVpuRegister(W4_VPU_RESET_REQ, regVal); |
2168 | |
2169 | /* Waiting reset done */ |
2170 | while (ReadVpuRegister(W4_VPU_RESET_STATUS)) { |
2171 | if (time_after(jiffies, timeout)) |
2172 | goto DONE_WAKEUP; |
2173 | } |
2174 | |
2175 | WriteVpuRegister(W4_VPU_RESET_REQ, 0); |
2176 | |
2177 | /* remap page size */ |
2178 | remap_size = (code_size >> 12) & 0x1ff; |
2179 | regVal = 0x80000000 | (W4_REMAP_CODE_INDEX<<12) |
2180 | | (0 << 16) | (1<<11) | remap_size; |
2181 | WriteVpuRegister(W4_VPU_REMAP_CTRL, regVal); |
2182 | /* DO NOT CHANGE! */ |
2183 | WriteVpuRegister(W4_VPU_REMAP_VADDR, 0x00000000); |
2184 | WriteVpuRegister(W4_VPU_REMAP_PADDR, code_base); |
2185 | WriteVpuRegister(W4_ADDR_CODE_BASE, code_base); |
2186 | WriteVpuRegister(W4_CODE_SIZE, code_size); |
2187 | WriteVpuRegister(W4_CODE_PARAM, 0); |
2188 | WriteVpuRegister(W4_INIT_VPU_TIME_OUT_CNT, timeout); |
2189 | WriteVpuRegister(W4_HW_OPTION, hwOption); |
2190 | |
2191 | /* Interrupt */ |
2192 | regVal = (1 << W4_INT_DEC_PIC_HDR); |
2193 | regVal |= (1 << W4_INT_DEC_PIC); |
2194 | regVal |= (1 << W4_INT_QUERY_DEC); |
2195 | regVal |= (1 << W4_INT_SLEEP_VPU); |
2196 | regVal |= (1 << W4_INT_BSBUF_EMPTY); |
2197 | regVal = 0xfffffefe; |
2198 | WriteVpuRegister(W4_VPU_VINT_ENABLE, regVal); |
2199 | Wave4BitIssueCommand(core, W4_CMD_INIT_VPU); |
2200 | WriteVpuRegister(W4_VPU_REMAP_CORE_START, 1); |
2201 | while (ReadVpuRegister(W4_VPU_BUSY_STATUS)) { |
2202 | if (time_after(jiffies, timeout)) |
2203 | goto DONE_WAKEUP; |
2204 | } |
2205 | |
2206 | if (ReadVpuRegister(W4_RET_SUCCESS) == 0) { |
2207 | enc_pr(LOG_ERROR, |
2208 | "WAKEUP_VPU failed [0x%x]", |
2209 | ReadVpuRegister(W4_RET_FAIL_REASON)); |
2210 | goto DONE_WAKEUP; |
2211 | } |
2212 | } |
2213 | } |
2214 | |
2215 | if (s_vpu_open_ref_count == 0) |
2216 | vpu_clk_config(0); |
2217 | DONE_WAKEUP: |
2218 | if (s_vpu_open_ref_count > 0) |
2219 | vpu_clk_config(1); |
2220 | return 0; |
2221 | } |
2222 | #else |
2223 | #define vpu_suspend NULL |
2224 | #define vpu_resume NULL |
2225 | #endif /* !CONFIG_PM */ |
2226 | |
2227 | static const struct of_device_id cnm_hevcenc_dt_match[] = { |
2228 | { |
2229 | .compatible = "cnm, HevcEnc", |
2230 | }, |
2231 | {}, |
2232 | }; |
2233 | |
2234 | static struct platform_driver vpu_driver = { |
2235 | .driver = { |
2236 | .name = VPU_PLATFORM_DEVICE_NAME, |
2237 | .of_match_table = cnm_hevcenc_dt_match, |
2238 | }, |
2239 | .probe = vpu_probe, |
2240 | .remove = vpu_remove, |
2241 | .suspend = vpu_suspend, |
2242 | .resume = vpu_resume, |
2243 | }; |
2244 | |
2245 | static s32 __init vpu_init(void) |
2246 | { |
2247 | s32 res; |
2248 | |
2249 | enc_pr(LOG_DEBUG, "vpu_init\n"); |
2250 | |
2251 | if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) |
2252 | && (get_cpu_type() != MESON_CPU_MAJOR_ID_G12A) |
2253 | && (get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX) |
2254 | && (get_cpu_type() != MESON_CPU_MAJOR_ID_G12B) |
2255 | && (get_cpu_type() != MESON_CPU_MAJOR_ID_SM1)) { |
2256 | enc_pr(LOG_DEBUG, |
2257 | "The chip is not support hevc encoder\n"); |
2258 | return -1; |
2259 | } |
2260 | if (get_cpu_type() == MESON_CPU_MAJOR_ID_G12A) { |
2261 | if ((READ_EFUSE_REG(EFUSE_LIC2) >> 12) & 1) { |
2262 | enc_pr(LOG_DEBUG, |
2263 | "Chip efuse disabled H265\n"); |
2264 | return -1; |
2265 | } |
2266 | } |
2267 | |
2268 | res = platform_driver_register(&vpu_driver); |
2269 | enc_pr(LOG_INFO, |
2270 | "end vpu_init result=0x%x\n", res); |
2271 | return res; |
2272 | } |
2273 | |
2274 | static void __exit vpu_exit(void) |
2275 | { |
2276 | enc_pr(LOG_DEBUG, "vpu_exit\n"); |
2277 | if ((get_cpu_type() != MESON_CPU_MAJOR_ID_GXM) && |
2278 | (get_cpu_type() != MESON_CPU_MAJOR_ID_G12A) && |
2279 | (get_cpu_type() != MESON_CPU_MAJOR_ID_GXLX) && |
2280 | (get_cpu_type() != MESON_CPU_MAJOR_ID_G12B) && |
2281 | (get_cpu_type() != MESON_CPU_MAJOR_ID_SM1)) { |
2282 | enc_pr(LOG_INFO, |
2283 | "The chip is not support hevc encoder\n"); |
2284 | return; |
2285 | } |
2286 | platform_driver_unregister(&vpu_driver); |
2287 | } |
2288 | |
2289 | static const struct reserved_mem_ops rmem_hevc_ops = { |
2290 | .device_init = hevc_mem_device_init, |
2291 | }; |
2292 | |
2293 | static s32 __init hevc_mem_setup(struct reserved_mem *rmem) |
2294 | { |
2295 | rmem->ops = &rmem_hevc_ops; |
2296 | enc_pr(LOG_DEBUG, "HevcEnc reserved mem setup.\n"); |
2297 | return 0; |
2298 | } |
2299 | |
2300 | module_param(print_level, uint, 0664); |
2301 | MODULE_PARM_DESC(print_level, "\n print_level\n"); |
2302 | |
2303 | module_param(force_release, uint, 0664); |
2304 | MODULE_PARM_DESC(force_release, "\n print_level\n"); |
2305 | |
2306 | module_param(clock_level, uint, 0664); |
2307 | MODULE_PARM_DESC(clock_level, "\n clock_level\n"); |
2308 | |
2309 | MODULE_AUTHOR("Amlogic using C&M VPU, Inc."); |
2310 | MODULE_DESCRIPTION("VPU linux driver"); |
2311 | MODULE_LICENSE("GPL"); |
2312 | |
2313 | module_init(vpu_init); |
2314 | module_exit(vpu_exit); |
2315 | RESERVEDMEM_OF_DECLARE(cnm_hevc, "cnm, HevcEnc-memory", hevc_mem_setup); |
2316 |