summaryrefslogtreecommitdiff
path: root/drivers/frame_provider/decoder/utils/vdec.c (plain)
blob: e219e44a09050bf3c5b2a6063fb4eac8867865de
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 */
17#define DEBUG
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#ifdef CONFIG_AMLOGIC_V4L_VIDEO3
37#include <linux/amlogic/media/video_sink/v4lvideo_ext.h>
38#endif
39#include <linux/amlogic/media/vfm/vfm_ext.h>
40#include <linux/sched/clock.h>
41#include <uapi/linux/sched/types.h>
42#include <linux/signal.h>
43/*for VDEC_DEBUG_SUPPORT*/
44#include <linux/time.h>
45#include <linux/amlogic/media/utils/vdec_reg.h>
46#include "vdec.h"
47#include "vdec_trace.h"
48#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
49#include "vdec_profile.h"
50#endif
51#include <linux/sched/clock.h>
52#include <linux/of.h>
53#include <linux/of_fdt.h>
54#include <linux/libfdt_env.h>
55#include <linux/of_reserved_mem.h>
56#include <linux/dma-contiguous.h>
57#include <linux/cma.h>
58#include <linux/module.h>
59#include <linux/slab.h>
60#include <linux/dma-mapping.h>
61#include <linux/dma-contiguous.h>
62#include "../../../stream_input/amports/amports_priv.h"
63
64#include <linux/amlogic/media/utils/amports_config.h>
65#include "../utils/amvdec.h"
66#include "vdec_input.h"
67
68#include "../../../common/media_clock/clk/clk.h"
69#include <linux/reset.h>
70#include <linux/amlogic/cpu_version.h>
71#include <linux/amlogic/media/codec_mm/codec_mm.h>
72#include <linux/amlogic/media/video_sink/video_keeper.h>
73#include <linux/amlogic/media/codec_mm/configs.h>
74#include <linux/amlogic/media/frame_sync/ptsserv.h>
75#include "secprot.h"
76#include "../../../common/chips/decoder_cpu_ver_info.h"
77#include "frame_check.h"
78
79#ifdef CONFIG_AMLOGIC_POWER
80#include <linux/amlogic/power_ctrl.h>
81#endif
82
83#ifdef CONFIG_AMLOGIC_IONVIDEO
84#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
85#endif
86//#include <dt-bindings/power/sc2-pd.h>
87#include <linux/amlogic/pwr_ctrl.h>
88#include <linux/of_device.h>
89#include "vdec_power_ctrl.h"
90
91static DEFINE_MUTEX(vdec_mutex);
92
93#define MC_SIZE (4096 * 4)
94#define CMA_ALLOC_SIZE SZ_64M
95#define MEM_NAME "vdec_prealloc"
96static int inited_vcodec_num;
97#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
98static int poweron_clock_level;
99static int keep_vdec_mem;
100static unsigned int debug_trace_num = 16 * 20;
101static int step_mode;
102static unsigned int clk_config;
103/*
104 &1: sched_priority to MAX_RT_PRIO -1.
105 &2: always reload firmware.
106 &4: vdec canvas debug enable
107 */
108static unsigned int debug;
109
110int hevc_max_reset_count;
111EXPORT_SYMBOL(hevc_max_reset_count);
112
113int no_powerdown;
114EXPORT_SYMBOL(no_powerdown);
115static int parallel_decode = 1;
116static int fps_detection;
117static int fps_clear;
118
119static int force_nosecure_even_drm;
120static int disable_switch_single_to_mult;
121
122static DEFINE_SPINLOCK(vdec_spin_lock);
123
124#define HEVC_TEST_LIMIT 100
125#define GXBB_REV_A_MINOR 0xA
126
127#define PRINT_FRAME_INFO 1
128#define DISABLE_FRAME_INFO 2
129
130static int frameinfo_flag = 0;
131static int v4lvideo_add_di = 1;
132static int max_di_instance = 2;
133//static int path_debug = 0;
134
135static int enable_mvdec_info = 1;
136
137int decode_underflow = 0;
138
139#define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1)
140
141struct am_reg {
142 char *name;
143 int offset;
144};
145
146struct vdec_isr_context_s {
147 int index;
148 int irq;
149 irq_handler_t dev_isr;
150 irq_handler_t dev_threaded_isr;
151 void *dev_id;
152 struct vdec_s *vdec;
153};
154
155struct decode_fps_s {
156 u32 frame_count;
157 u64 start_timestamp;
158 u64 last_timestamp;
159 u32 fps;
160};
161
162struct vdec_core_s {
163 struct list_head connected_vdec_list;
164 spinlock_t lock;
165 spinlock_t canvas_lock;
166 spinlock_t fps_lock;
167 spinlock_t input_lock;
168 struct ida ida;
169 atomic_t vdec_nr;
170 struct vdec_s *vfm_vdec;
171 struct vdec_s *active_vdec;
172 struct vdec_s *active_hevc;
173 struct vdec_s *hint_fr_vdec;
174 struct platform_device *vdec_core_platform_device;
175 struct device *cma_dev;
176 struct semaphore sem;
177 struct task_struct *thread;
178 struct workqueue_struct *vdec_core_wq;
179
180 unsigned long sched_mask;
181 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
182 int power_ref_count[VDEC_MAX];
183 struct vdec_s *last_vdec;
184 int parallel_dec;
185 unsigned long power_ref_mask;
186 int vdec_combine_flag;
187 struct decode_fps_s decode_fps[MAX_INSTANCE_MUN];
188 unsigned long buff_flag;
189 unsigned long stream_buff_flag;
190 struct power_manager_s *pm;
191};
192
193struct canvas_status_s {
194 int type;
195 int canvas_used_flag;
196 int id;
197};
198
199
200static struct vdec_core_s *vdec_core;
201
202static const char * const vdec_status_string[] = {
203 "VDEC_STATUS_UNINITIALIZED",
204 "VDEC_STATUS_DISCONNECTED",
205 "VDEC_STATUS_CONNECTED",
206 "VDEC_STATUS_ACTIVE"
207};
208
209static int debugflags;
210
211static struct canvas_status_s canvas_stat[AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1];
212
213
214int vdec_get_debug_flags(void)
215{
216 return debugflags;
217}
218EXPORT_SYMBOL(vdec_get_debug_flags);
219
220void VDEC_PRINT_FUN_LINENO(const char *fun, int line)
221{
222 if (debugflags & 0x10000000)
223 pr_info("%s, %d\n", fun, line);
224}
225EXPORT_SYMBOL(VDEC_PRINT_FUN_LINENO);
226
227unsigned char is_mult_inc(unsigned int type)
228{
229 unsigned char ret = 0;
230 if (vdec_get_debug_flags() & 0xf000)
231 ret = (vdec_get_debug_flags() & 0x1000)
232 ? 1 : 0;
233 else if (type & PORT_TYPE_DECODER_SCHED)
234 ret = 1;
235 return ret;
236}
237EXPORT_SYMBOL(is_mult_inc);
238
239static const bool cores_with_input[VDEC_MAX] = {
240 true, /* VDEC_1 */
241 false, /* VDEC_HCODEC */
242 false, /* VDEC_2 */
243 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
244 false, /* VDEC_HEVC_BACK */
245};
246
247static const int cores_int[VDEC_MAX] = {
248 VDEC_IRQ_1,
249 VDEC_IRQ_2,
250 VDEC_IRQ_0,
251 VDEC_IRQ_0,
252 VDEC_IRQ_HEVC_BACK
253};
254
255unsigned long vdec_canvas_lock(struct vdec_core_s *core)
256{
257 unsigned long flags;
258 spin_lock_irqsave(&core->canvas_lock, flags);
259
260 return flags;
261}
262
263void vdec_canvas_unlock(struct vdec_core_s *core, unsigned long flags)
264{
265 spin_unlock_irqrestore(&core->canvas_lock, flags);
266}
267
268unsigned long vdec_fps_lock(struct vdec_core_s *core)
269{
270 unsigned long flags;
271 spin_lock_irqsave(&core->fps_lock, flags);
272
273 return flags;
274}
275
276void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags)
277{
278 spin_unlock_irqrestore(&core->fps_lock, flags);
279}
280
281unsigned long vdec_core_lock(struct vdec_core_s *core)
282{
283 unsigned long flags;
284
285 spin_lock_irqsave(&core->lock, flags);
286
287 return flags;
288}
289
290void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
291{
292 spin_unlock_irqrestore(&core->lock, flags);
293}
294
295unsigned long vdec_inputbuff_lock(struct vdec_core_s *core)
296{
297 unsigned long flags;
298
299 spin_lock_irqsave(&core->input_lock, flags);
300
301 return flags;
302}
303
304void vdec_inputbuff_unlock(struct vdec_core_s *core, unsigned long flags)
305{
306 spin_unlock_irqrestore(&core->input_lock, flags);
307}
308
309
310static bool vdec_is_input_frame_empty(struct vdec_s *vdec) {
311 struct vdec_core_s *core = vdec_core;
312 bool ret;
313 unsigned long flags;
314
315 flags = vdec_inputbuff_lock(core);
316 ret = !(vdec->core_mask & core->buff_flag);
317 vdec_inputbuff_unlock(core, flags);
318
319 return ret;
320}
321
322static void vdec_up(struct vdec_s *vdec)
323{
324 struct vdec_core_s *core = vdec_core;
325
326 if (debug & 8)
327 pr_info("vdec_up, id:%d\n", vdec->id);
328 up(&core->sem);
329}
330
331static u64 vdec_get_us_time_system(void)
332{
333 return div64_u64(local_clock(), 1000);
334}
335
336static void vdec_fps_clear(int id)
337{
338 if (id >= MAX_INSTANCE_MUN)
339 return;
340
341 vdec_core->decode_fps[id].frame_count = 0;
342 vdec_core->decode_fps[id].start_timestamp = 0;
343 vdec_core->decode_fps[id].last_timestamp = 0;
344 vdec_core->decode_fps[id].fps = 0;
345}
346
347static void vdec_fps_clearall(void)
348{
349 int i;
350
351 for (i = 0; i < MAX_INSTANCE_MUN; i++) {
352 vdec_core->decode_fps[i].frame_count = 0;
353 vdec_core->decode_fps[i].start_timestamp = 0;
354 vdec_core->decode_fps[i].last_timestamp = 0;
355 vdec_core->decode_fps[i].fps = 0;
356 }
357}
358
359static void vdec_fps_detec(int id)
360{
361 unsigned long flags;
362
363 if (fps_detection == 0)
364 return;
365
366 if (id >= MAX_INSTANCE_MUN)
367 return;
368
369 flags = vdec_fps_lock(vdec_core);
370
371 if (fps_clear == 1) {
372 vdec_fps_clearall();
373 fps_clear = 0;
374 }
375
376 vdec_core->decode_fps[id].frame_count++;
377 if (vdec_core->decode_fps[id].frame_count == 1) {
378 vdec_core->decode_fps[id].start_timestamp =
379 vdec_get_us_time_system();
380 vdec_core->decode_fps[id].last_timestamp =
381 vdec_core->decode_fps[id].start_timestamp;
382 } else {
383 vdec_core->decode_fps[id].last_timestamp =
384 vdec_get_us_time_system();
385 vdec_core->decode_fps[id].fps =
386 (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) *
387 10000000000),
388 (vdec_core->decode_fps[id].last_timestamp -
389 vdec_core->decode_fps[id].start_timestamp));
390 }
391 vdec_fps_unlock(vdec_core, flags);
392}
393
394
395
396static int get_canvas(unsigned int index, unsigned int base)
397{
398 int start;
399 int canvas_index = index * base;
400 int ret;
401
402 if ((base > 4) || (base == 0))
403 return -1;
404
405 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
406 <= AMVDEC_CANVAS_MAX1) {
407 start = AMVDEC_CANVAS_START_INDEX + base * index;
408 } else {
409 canvas_index -= (AMVDEC_CANVAS_MAX1 -
410 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
411 if (canvas_index <= AMVDEC_CANVAS_MAX2)
412 start = canvas_index / base;
413 else
414 return -1;
415 }
416
417 if (base == 1) {
418 ret = start;
419 } else if (base == 2) {
420 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
421 } else if (base == 3) {
422 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
423 } else if (base == 4) {
424 ret = (((start + 3) << 24) | (start + 2) << 16) |
425 ((start + 1) << 8) | start;
426 }
427
428 return ret;
429}
430
431static int get_canvas_ex(int type, int id)
432{
433 int i;
434 unsigned long flags;
435
436 flags = vdec_canvas_lock(vdec_core);
437
438 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
439 /*0x10-0x15 has been used by rdma*/
440 if ((i >= 0x10) && (i <= 0x15))
441 continue;
442 if ((canvas_stat[i].type == type) &&
443 (canvas_stat[i].id & (1 << id)) == 0) {
444 canvas_stat[i].canvas_used_flag++;
445 canvas_stat[i].id |= (1 << id);
446 if (debug & 4)
447 pr_debug("get used canvas %d\n", i);
448 vdec_canvas_unlock(vdec_core, flags);
449 if (i < AMVDEC_CANVAS_MAX2 + 1)
450 return i;
451 else
452 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
453 }
454 }
455
456 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
457 /*0x10-0x15 has been used by rdma*/
458 if ((i >= 0x10) && (i <= 0x15))
459 continue;
460 if (canvas_stat[i].type == 0) {
461 canvas_stat[i].type = type;
462 canvas_stat[i].canvas_used_flag = 1;
463 canvas_stat[i].id = (1 << id);
464 if (debug & 4) {
465 pr_debug("get canvas %d\n", i);
466 pr_debug("canvas_used_flag %d\n",
467 canvas_stat[i].canvas_used_flag);
468 pr_debug("canvas_stat[i].id %d\n",
469 canvas_stat[i].id);
470 }
471 vdec_canvas_unlock(vdec_core, flags);
472 if (i < AMVDEC_CANVAS_MAX2 + 1)
473 return i;
474 else
475 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
476 }
477 }
478 vdec_canvas_unlock(vdec_core, flags);
479
480 pr_info("cannot get canvas\n");
481
482 return -1;
483}
484
485static void free_canvas_ex(int index, int id)
486{
487 unsigned long flags;
488 int offset;
489
490 flags = vdec_canvas_lock(vdec_core);
491 if (index >= 0 &&
492 index < AMVDEC_CANVAS_MAX2 + 1)
493 offset = index;
494 else if ((index >= AMVDEC_CANVAS_START_INDEX) &&
495 (index <= AMVDEC_CANVAS_MAX1))
496 offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX;
497 else {
498 vdec_canvas_unlock(vdec_core, flags);
499 return;
500 }
501
502 if ((canvas_stat[offset].canvas_used_flag > 0) &&
503 (canvas_stat[offset].id & (1 << id))) {
504 canvas_stat[offset].canvas_used_flag--;
505 canvas_stat[offset].id &= ~(1 << id);
506 if (canvas_stat[offset].canvas_used_flag == 0) {
507 canvas_stat[offset].type = 0;
508 canvas_stat[offset].id = 0;
509 }
510 if (debug & 4) {
511 pr_debug("free index %d used_flag %d, type = %d, id = %d\n",
512 offset,
513 canvas_stat[offset].canvas_used_flag,
514 canvas_stat[offset].type,
515 canvas_stat[offset].id);
516 }
517 }
518 vdec_canvas_unlock(vdec_core, flags);
519
520 return;
521
522}
523
524static void vdec_dmc_pipeline_reset(void)
525{
526
527 WRITE_RESET_REG(RESET7_REGISTER,
528 (1 << 15) | (1 << 14) | (1 << 13) |
529 (1 << 12) | (1 << 11));
530}
531
532static void vdec_stop_armrisc(int hw)
533{
534 ulong timeout = jiffies + HZ;
535
536 if (hw == VDEC_INPUT_TARGET_VLD) {
537 WRITE_VREG(MPSR, 0);
538 WRITE_VREG(CPSR, 0);
539
540 while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
541 if (time_after(jiffies, timeout))
542 break;
543 }
544
545 timeout = jiffies + HZ;
546 while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) {
547 if (time_after(jiffies, timeout))
548 break;
549 }
550 } else if (hw == VDEC_INPUT_TARGET_HEVC) {
551 WRITE_VREG(HEVC_MPSR, 0);
552 WRITE_VREG(HEVC_CPSR, 0);
553
554 while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) {
555 if (time_after(jiffies, timeout))
556 break;
557 }
558
559 timeout = jiffies + HZ/10;
560 while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) {
561 if (time_after(jiffies, timeout))
562 break;
563 }
564 }
565}
566
567static void vdec_disable_DMC(struct vdec_s *vdec)
568{
569 /*close first,then wait pedding end,timing suggestion from vlsi*/
570 struct vdec_input_s *input = &vdec->input;
571 unsigned long flags;
572 unsigned int mask = 0;
573
574 if (input->target == VDEC_INPUT_TARGET_VLD) {
575 mask = (1 << 13);
576 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
577 mask = (1 << 21);
578 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
579 mask = (1 << 4); /*hevc*/
580 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
581 mask |= (1 << 8); /*hevcb */
582 }
583
584 /* need to stop armrisc. */
585 if (!IS_ERR_OR_NULL(vdec->dev))
586 vdec_stop_armrisc(input->target);
587
588 spin_lock_irqsave(&vdec_spin_lock, flags);
589 codec_dmcbus_write(DMC_REQ_CTRL,
590 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
591 spin_unlock_irqrestore(&vdec_spin_lock, flags);
592
593 while (!(codec_dmcbus_read(DMC_CHAN_STS)
594 & mask))
595 ;
596
597 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
598}
599
600static void vdec_enable_DMC(struct vdec_s *vdec)
601{
602 struct vdec_input_s *input = &vdec->input;
603 unsigned long flags;
604 unsigned int mask = 0;
605
606 if (input->target == VDEC_INPUT_TARGET_VLD) {
607 mask = (1 << 13);
608 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
609 mask = (1 << 21);
610 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
611 mask = (1 << 4); /*hevc*/
612 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
613 mask |= (1 << 8); /*hevcb */
614 }
615
616 /*must to be reset the dmc pipeline if it's g12b.*/
617 if (get_cpu_type() == AM_MESON_CPU_MAJOR_ID_G12B)
618 vdec_dmc_pipeline_reset();
619
620 spin_lock_irqsave(&vdec_spin_lock, flags);
621 codec_dmcbus_write(DMC_REQ_CTRL,
622 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
623 spin_unlock_irqrestore(&vdec_spin_lock, flags);
624 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
625}
626
627
628
629static int vdec_get_hw_type(int value)
630{
631 int type;
632 switch (value) {
633 case VFORMAT_HEVC:
634 case VFORMAT_VP9:
635 case VFORMAT_AVS2:
636 type = CORE_MASK_HEVC;
637 break;
638
639 case VFORMAT_MPEG12:
640 case VFORMAT_MPEG4:
641 case VFORMAT_H264:
642 case VFORMAT_MJPEG:
643 case VFORMAT_REAL:
644 case VFORMAT_JPEG:
645 case VFORMAT_VC1:
646 case VFORMAT_AVS:
647 case VFORMAT_YUV:
648 case VFORMAT_H264MVC:
649 case VFORMAT_H264_4K2K:
650 case VFORMAT_H264_ENC:
651 case VFORMAT_JPEG_ENC:
652 type = CORE_MASK_VDEC_1;
653 break;
654
655 default:
656 type = -1;
657 }
658
659 return type;
660}
661
662
663static void vdec_save_active_hw(struct vdec_s *vdec)
664{
665 int type;
666
667 type = vdec_get_hw_type(vdec->port->vformat);
668
669 if (type == CORE_MASK_HEVC) {
670 vdec_core->active_hevc = vdec;
671 } else if (type == CORE_MASK_VDEC_1) {
672 vdec_core->active_vdec = vdec;
673 } else {
674 pr_info("save_active_fw wrong\n");
675 }
676}
677
678static void vdec_update_buff_status(void)
679{
680 struct vdec_core_s *core = vdec_core;
681 unsigned long flags;
682 struct vdec_s *vdec;
683
684 flags = vdec_inputbuff_lock(core);
685 core->buff_flag = 0;
686 core->stream_buff_flag = 0;
687 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
688 struct vdec_input_s *input = &vdec->input;
689 if (input_frame_based(input)) {
690 if (input->have_frame_num || input->eos)
691 core->buff_flag |= vdec->core_mask;
692 } else if (input_stream_based(input)) {
693 core->stream_buff_flag |= vdec->core_mask;
694 }
695 }
696 vdec_inputbuff_unlock(core, flags);
697}
698
699#if 0
700void vdec_update_streambuff_status(void)
701{
702 struct vdec_core_s *core = vdec_core;
703 struct vdec_s *vdec;
704
705 /* check streaming prepare level threshold if not EOS */
706 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
707 struct vdec_input_s *input = &vdec->input;
708 if (input && input_stream_based(input) && !input->eos &&
709 (vdec->need_more_data & VDEC_NEED_MORE_DATA)) {
710 u32 rp, wp, level;
711
712 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
713 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
714 if (wp < rp)
715 level = input->size + wp - rp;
716 else
717 level = wp - rp;
718 if ((level < input->prepare_level) &&
719 (pts_get_rec_num(PTS_TYPE_VIDEO,
720 vdec->input.total_rd_count) < 2)) {
721 break;
722 } else if (level > input->prepare_level) {
723 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
724 if (debug & 8)
725 pr_info("vdec_flush_streambuff_status up\n");
726 vdec_up(vdec);
727 }
728 break;
729 }
730 }
731}
732EXPORT_SYMBOL(vdec_update_streambuff_status);
733#endif
734
735int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
736{
737 if (vdec && vdec->dec_status &&
738 ((vdec->status == VDEC_STATUS_CONNECTED ||
739 vdec->status == VDEC_STATUS_ACTIVE)))
740 return vdec->dec_status(vdec, vstatus);
741
742 return 0;
743}
744EXPORT_SYMBOL(vdec_status);
745
746int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
747{
748 int r;
749
750 if (vdec->set_trickmode) {
751 r = vdec->set_trickmode(vdec, trickmode);
752
753 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
754 r = vdec->slave->set_trickmode(vdec->slave,
755 trickmode);
756 return r;
757 }
758
759 return -1;
760}
761EXPORT_SYMBOL(vdec_set_trickmode);
762
763int vdec_set_isreset(struct vdec_s *vdec, int isreset)
764{
765 vdec->is_reset = isreset;
766 pr_info("is_reset=%d\n", isreset);
767 if (vdec->set_isreset)
768 return vdec->set_isreset(vdec, isreset);
769 return 0;
770}
771EXPORT_SYMBOL(vdec_set_isreset);
772
773int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
774{
775 vdec->dolby_meta_with_el = isdvmetawithel;
776 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
777 return 0;
778}
779EXPORT_SYMBOL(vdec_set_dv_metawithel);
780
781void vdec_set_no_powerdown(int flag)
782{
783 no_powerdown = flag;
784 pr_info("no_powerdown=%d\n", no_powerdown);
785 return;
786}
787EXPORT_SYMBOL(vdec_set_no_powerdown);
788
789void vdec_count_info(struct vdec_info *vs, unsigned int err,
790 unsigned int offset)
791{
792 if (err)
793 vs->error_frame_count++;
794 if (offset) {
795 if (0 == vs->frame_count) {
796 vs->offset = 0;
797 vs->samp_cnt = 0;
798 }
799 vs->frame_data = offset > vs->total_data ?
800 offset - vs->total_data : vs->total_data - offset;
801 vs->total_data = offset;
802 if (vs->samp_cnt < 96000 * 2) { /* 2s */
803 if (0 == vs->samp_cnt)
804 vs->offset = offset;
805 vs->samp_cnt += vs->frame_dur;
806 } else {
807 vs->bit_rate = (offset - vs->offset) / 2;
808 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
809 vs->samp_cnt = 0;
810 }
811 vs->frame_count++;
812 }
813 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
814 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
815 return;
816}
817EXPORT_SYMBOL(vdec_count_info);
818int vdec_is_support_4k(void)
819{
820 return !is_meson_gxl_package_805X();
821}
822EXPORT_SYMBOL(vdec_is_support_4k);
823
824/*
825 * clk_config:
826 *0:default
827 *1:no gp0_pll;
828 *2:always used gp0_pll;
829 *>=10:fixed n M clk;
830 *== 100 , 100M clks;
831 */
832unsigned int get_vdec_clk_config_settings(void)
833{
834 return clk_config;
835}
836void update_vdec_clk_config_settings(unsigned int config)
837{
838 clk_config = config;
839}
840EXPORT_SYMBOL(update_vdec_clk_config_settings);
841
842struct device *get_codec_cma_device(void)
843{
844 return vdec_core->cma_dev;
845}
846
847#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
848static const char * const vdec_device_name[] = {
849 "amvdec_mpeg12", "ammvdec_mpeg12",
850 "amvdec_mpeg4", "ammvdec_mpeg4",
851 "amvdec_h264", "ammvdec_h264",
852 "amvdec_mjpeg", "ammvdec_mjpeg",
853 "amvdec_real", "ammvdec_real",
854 "amjpegdec", "ammjpegdec",
855 "amvdec_vc1", "ammvdec_vc1",
856 "amvdec_avs", "ammvdec_avs",
857 "amvdec_yuv", "ammvdec_yuv",
858 "amvdec_h264mvc", "ammvdec_h264mvc",
859 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
860 "amvdec_h265", "ammvdec_h265",
861 "amvenc_avc", "amvenc_avc",
862 "jpegenc", "jpegenc",
863 "amvdec_vp9", "ammvdec_vp9",
864 "amvdec_avs2", "ammvdec_avs2"
865};
866
867
868#else
869
870static const char * const vdec_device_name[] = {
871 "amvdec_mpeg12",
872 "amvdec_mpeg4",
873 "amvdec_h264",
874 "amvdec_mjpeg",
875 "amvdec_real",
876 "amjpegdec",
877 "amvdec_vc1",
878 "amvdec_avs",
879 "amvdec_yuv",
880 "amvdec_h264mvc",
881 "amvdec_h264_4k2k",
882 "amvdec_h265",
883 "amvenc_avc",
884 "jpegenc",
885 "amvdec_vp9",
886 "amvdec_avs2"
887};
888
889#endif
890
891/*
892 * Only support time sliced decoding for frame based input,
893 * so legacy decoder can exist with time sliced decoder.
894 */
895static const char *get_dev_name(bool use_legacy_vdec, int format)
896{
897#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
898 if (use_legacy_vdec && (debugflags & 0x8) == 0)
899 return vdec_device_name[format * 2];
900 else
901 return vdec_device_name[format * 2 + 1];
902#else
903 return vdec_device_name[format];
904#endif
905}
906
907#ifdef VDEC_DEBUG_SUPPORT
908static u64 get_current_clk(void)
909{
910 /*struct timespec xtime = current_kernel_time();
911 u64 usec = xtime.tv_sec * 1000000;
912 usec += xtime.tv_nsec / 1000;
913 */
914 u64 usec = sched_clock();
915 return usec;
916}
917
918static void inc_profi_count(unsigned long mask, u32 *count)
919{
920 enum vdec_type_e type;
921
922 for (type = VDEC_1; type < VDEC_MAX; type++) {
923 if (mask & (1 << type))
924 count[type]++;
925 }
926}
927
928static void update_profi_clk_run(struct vdec_s *vdec,
929 unsigned long mask, u64 clk)
930{
931 enum vdec_type_e type;
932
933 for (type = VDEC_1; type < VDEC_MAX; type++) {
934 if (mask & (1 << type)) {
935 vdec->start_run_clk[type] = clk;
936 if (vdec->profile_start_clk[type] == 0)
937 vdec->profile_start_clk[type] = clk;
938 vdec->total_clk[type] = clk
939 - vdec->profile_start_clk[type];
940 /*pr_info("set start_run_clk %ld\n",
941 vdec->start_run_clk);*/
942
943 }
944 }
945}
946
947static void update_profi_clk_stop(struct vdec_s *vdec,
948 unsigned long mask, u64 clk)
949{
950 enum vdec_type_e type;
951
952 for (type = VDEC_1; type < VDEC_MAX; type++) {
953 if (mask & (1 << type)) {
954 if (vdec->start_run_clk[type] == 0)
955 pr_info("error, start_run_clk[%d] not set\n", type);
956
957 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
958 type,
959 clk,
960 vdec->start_run_clk[type],
961 vdec->run_clk[type]);*/
962 vdec->run_clk[type] +=
963 (clk - vdec->start_run_clk[type]);
964 }
965 }
966}
967
968#endif
969
970int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
971{
972 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
973 sizeof(struct dec_sysinfo)))
974 return -EFAULT;
975
976 /* force switch to mult instance if supports this profile. */
977 if ((vdec->type == VDEC_TYPE_SINGLE) &&
978 !disable_switch_single_to_mult) {
979 const char *str = NULL;
980 char fmt[16] = {0};
981
982 str = strchr(get_dev_name(false, vdec->format), '_');
983 if (!str)
984 return -1;
985
986 sprintf(fmt, "m%s", ++str);
987 if (is_support_profile(fmt) &&
988 vdec->sys_info->format != VIDEO_DEC_FORMAT_H263)
989 vdec->type = VDEC_TYPE_STREAM_PARSER;
990 }
991
992 return 0;
993}
994EXPORT_SYMBOL(vdec_set_decinfo);
995
996/* construct vdec strcture */
997struct vdec_s *vdec_create(struct stream_port_s *port,
998 struct vdec_s *master)
999{
1000 struct vdec_s *vdec;
1001 int type = VDEC_TYPE_SINGLE;
1002 int id;
1003 if (is_mult_inc(port->type))
1004 type = (port->type & PORT_TYPE_FRAME) ?
1005 VDEC_TYPE_FRAME_BLOCK :
1006 VDEC_TYPE_STREAM_PARSER;
1007
1008 id = ida_simple_get(&vdec_core->ida,
1009 0, MAX_INSTANCE_MUN, GFP_KERNEL);
1010 if (id < 0) {
1011 pr_info("vdec_create request id failed!ret =%d\n", id);
1012 return NULL;
1013 }
1014 vdec = vzalloc(sizeof(struct vdec_s));
1015
1016 /* TBD */
1017 if (vdec) {
1018 vdec->magic = 0x43454456;
1019 vdec->id = -1;
1020 vdec->type = type;
1021 vdec->port = port;
1022 vdec->sys_info = &vdec->sys_info_store;
1023
1024 INIT_LIST_HEAD(&vdec->list);
1025
1026 atomic_inc(&vdec_core->vdec_nr);
1027 vdec->id = id;
1028 vdec_input_init(&vdec->input, vdec);
1029 vdec->input.vdec_is_input_frame_empty = vdec_is_input_frame_empty;
1030 vdec->input.vdec_up = vdec_up;
1031 if (master) {
1032 vdec->master = master;
1033 master->slave = vdec;
1034 master->sched = 1;
1035 }
1036 if (enable_mvdec_info) {
1037 vdec->mvfrm = (struct vdec_frames_s *)
1038 vzalloc(sizeof(struct vdec_frames_s));
1039 if (!vdec->mvfrm)
1040 pr_err("vzalloc: vdec_frames_s failed\n");
1041 }
1042 }
1043
1044 pr_debug("vdec_create instance %p, total %d, PM: %s\n", vdec,
1045 atomic_read(&vdec_core->vdec_nr),
1046 get_pm_name(vdec_core->pm->pm_type));
1047
1048 //trace_vdec_create(vdec); /*DEBUG_TMP*/
1049
1050 return vdec;
1051}
1052EXPORT_SYMBOL(vdec_create);
1053
1054int vdec_set_format(struct vdec_s *vdec, int format)
1055{
1056 vdec->format = format;
1057 vdec->port_flag |= PORT_FLAG_VFORMAT;
1058
1059 if (vdec->slave) {
1060 vdec->slave->format = format;
1061 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
1062 }
1063
1064 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
1065
1066 return 0;
1067}
1068EXPORT_SYMBOL(vdec_set_format);
1069
1070int vdec_set_pts(struct vdec_s *vdec, u32 pts)
1071{
1072 vdec->pts = pts;
1073 vdec->pts64 = div64_u64((u64)pts * 100, 9);
1074 vdec->pts_valid = true;
1075 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
1076 return 0;
1077}
1078EXPORT_SYMBOL(vdec_set_pts);
1079
1080void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
1081{
1082 vdec->timestamp = timestamp;
1083 vdec->timestamp_valid = true;
1084}
1085EXPORT_SYMBOL(vdec_set_timestamp);
1086
1087int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
1088{
1089 vdec->pts64 = pts64;
1090 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
1091 vdec->pts_valid = true;
1092
1093 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
1094 return 0;
1095}
1096EXPORT_SYMBOL(vdec_set_pts64);
1097
1098int vdec_get_status(struct vdec_s *vdec)
1099{
1100 return vdec->status;
1101}
1102EXPORT_SYMBOL(vdec_get_status);
1103
1104int vdec_get_frame_num(struct vdec_s *vdec)
1105{
1106 return vdec->input.have_frame_num;
1107}
1108EXPORT_SYMBOL(vdec_get_frame_num);
1109
1110void vdec_set_status(struct vdec_s *vdec, int status)
1111{
1112 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
1113 vdec->status = status;
1114}
1115EXPORT_SYMBOL(vdec_set_status);
1116
1117void vdec_set_next_status(struct vdec_s *vdec, int status)
1118{
1119 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
1120 vdec->next_status = status;
1121}
1122EXPORT_SYMBOL(vdec_set_next_status);
1123
1124int vdec_set_video_path(struct vdec_s *vdec, int video_path)
1125{
1126 vdec->frame_base_video_path = video_path;
1127 return 0;
1128}
1129EXPORT_SYMBOL(vdec_set_video_path);
1130
1131int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
1132{
1133 vdec->vf_receiver_inst = receive_id;
1134 return 0;
1135}
1136EXPORT_SYMBOL(vdec_set_receive_id);
1137
1138/* add frame data to input chain */
1139int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
1140{
1141 return vdec_input_add_frame(&vdec->input, buf, count);
1142}
1143EXPORT_SYMBOL(vdec_write_vframe);
1144
1145int vdec_write_vframe_with_dma(struct vdec_s *vdec,
1146 ulong addr, size_t count, u32 handle)
1147{
1148 return vdec_input_add_frame_with_dma(&vdec->input, addr, count, handle);
1149}
1150EXPORT_SYMBOL(vdec_write_vframe_with_dma);
1151
1152/* add a work queue thread for vdec*/
1153void vdec_schedule_work(struct work_struct *work)
1154{
1155 if (vdec_core->vdec_core_wq)
1156 queue_work(vdec_core->vdec_core_wq, work);
1157 else
1158 schedule_work(work);
1159}
1160EXPORT_SYMBOL(vdec_schedule_work);
1161
1162static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
1163{
1164 if (vdec->master)
1165 return vdec->master;
1166 else if (vdec->slave)
1167 return vdec->slave;
1168 return NULL;
1169}
1170
1171static void vdec_sync_input_read(struct vdec_s *vdec)
1172{
1173 if (!vdec_stream_based(vdec))
1174 return;
1175
1176 if (vdec_dual(vdec)) {
1177 u32 me, other;
1178 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1179 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1180 other =
1181 vdec_get_associate(vdec)->input.stream_cookie;
1182 if (me > other)
1183 return;
1184 else if (me == other) {
1185 me = READ_VREG(VLD_MEM_VIFIFO_RP);
1186 other =
1187 vdec_get_associate(vdec)->input.swap_rp;
1188 if (me > other) {
1189 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1190 vdec_get_associate(vdec)->
1191 input.swap_rp);
1192 return;
1193 }
1194 }
1195 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1196 READ_VREG(VLD_MEM_VIFIFO_RP));
1197 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1198 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1199 if (((me & 0x80000000) == 0) &&
1200 (vdec->input.streaming_rp & 0x80000000))
1201 me += 1ULL << 32;
1202 other = vdec_get_associate(vdec)->input.streaming_rp;
1203 if (me > other) {
1204 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1205 vdec_get_associate(vdec)->
1206 input.swap_rp);
1207 return;
1208 }
1209
1210 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1211 READ_VREG(HEVC_STREAM_RD_PTR));
1212 }
1213 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1214 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1215 READ_VREG(VLD_MEM_VIFIFO_RP));
1216 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1217 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1218 READ_VREG(HEVC_STREAM_RD_PTR));
1219 }
1220}
1221
1222static void vdec_sync_input_write(struct vdec_s *vdec)
1223{
1224 if (!vdec_stream_based(vdec))
1225 return;
1226
1227 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1228 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1229 READ_PARSER_REG(PARSER_VIDEO_WP));
1230 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1231 WRITE_VREG(HEVC_STREAM_WR_PTR,
1232 READ_PARSER_REG(PARSER_VIDEO_WP));
1233 }
1234}
1235
1236/*
1237 *get next frame from input chain
1238 */
1239/*
1240 *THE VLD_FIFO is 512 bytes and Video buffer level
1241 * empty interrupt is set to 0x80 bytes threshold
1242 */
1243#define VLD_PADDING_SIZE 1024
1244#define HEVC_PADDING_SIZE (1024*16)
1245int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
1246{
1247 struct vdec_input_s *input = &vdec->input;
1248 struct vframe_chunk_s *chunk = NULL;
1249 struct vframe_block_list_s *block = NULL;
1250 int dummy;
1251
1252 /* full reset to HW input */
1253 if (input->target == VDEC_INPUT_TARGET_VLD) {
1254 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1255
1256 /* reset VLD fifo for all vdec */
1257 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
1258 WRITE_VREG(DOS_SW_RESET0, 0);
1259
1260 dummy = READ_RESET_REG(RESET0_REGISTER);
1261 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
1262 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1263#if 0
1264 /*move to driver*/
1265 if (input_frame_based(input))
1266 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
1267
1268 /*
1269 * 2: assist
1270 * 3: parser
1271 * 4: parser_state
1272 * 8: dblk
1273 * 11:mcpu
1274 * 12:ccpu
1275 * 13:ddr
1276 * 14:iqit
1277 * 15:ipp
1278 * 17:qdct
1279 * 18:mpred
1280 * 19:sao
1281 * 24:hevc_afifo
1282 */
1283 WRITE_VREG(DOS_SW_RESET3,
1284 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
1285 (1<<17)|(1<<18)|(1<<19));
1286 WRITE_VREG(DOS_SW_RESET3, 0);
1287#endif
1288 }
1289
1290 /*
1291 *setup HW decoder input buffer (VLD context)
1292 * based on input->type and input->target
1293 */
1294 if (input_frame_based(input)) {
1295 chunk = vdec_input_next_chunk(&vdec->input);
1296
1297 if (chunk == NULL) {
1298 *p = NULL;
1299 return -1;
1300 }
1301
1302 block = chunk->block;
1303
1304 if (input->target == VDEC_INPUT_TARGET_VLD) {
1305 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
1306 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
1307 block->size - 8);
1308 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1309 round_down(block->start + chunk->offset,
1310 VDEC_FIFO_ALIGN));
1311
1312 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1313 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1314
1315 /* set to manual mode */
1316 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1317 WRITE_VREG(VLD_MEM_VIFIFO_RP,
1318 round_down(block->start + chunk->offset,
1319 VDEC_FIFO_ALIGN));
1320 dummy = chunk->offset + chunk->size +
1321 VLD_PADDING_SIZE;
1322 if (dummy >= block->size)
1323 dummy -= block->size;
1324 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1325 round_down(block->start + dummy,
1326 VDEC_FIFO_ALIGN));
1327
1328 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
1329 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1330
1331 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1332 (0x11 << 16) | (1<<10) | (7<<3));
1333
1334 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1335 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
1336 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
1337 block->size);
1338 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
1339 chunk->offset);
1340 dummy = chunk->offset + chunk->size +
1341 HEVC_PADDING_SIZE;
1342 if (dummy >= block->size)
1343 dummy -= block->size;
1344 WRITE_VREG(HEVC_STREAM_WR_PTR,
1345 round_down(block->start + dummy,
1346 VDEC_FIFO_ALIGN));
1347
1348 /* set endian */
1349 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1350 }
1351
1352 *p = chunk;
1353 return chunk->size;
1354
1355 } else {
1356 /* stream based */
1357 u32 rp = 0, wp = 0, fifo_len = 0;
1358 int size;
1359 bool swap_valid = input->swap_valid;
1360 unsigned long swap_page_phys = input->swap_page_phys;
1361
1362 if (vdec_dual(vdec) &&
1363 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
1364 /* keep using previous input context */
1365 struct vdec_s *master = (vdec->slave) ?
1366 vdec : vdec->master;
1367 if (master->input.last_swap_slave) {
1368 swap_valid = master->slave->input.swap_valid;
1369 swap_page_phys =
1370 master->slave->input.swap_page_phys;
1371 } else {
1372 swap_valid = master->input.swap_valid;
1373 swap_page_phys = master->input.swap_page_phys;
1374 }
1375 }
1376
1377 if (swap_valid) {
1378 if (input->target == VDEC_INPUT_TARGET_VLD) {
1379 if (vdec->format == VFORMAT_H264)
1380 SET_VREG_MASK(POWER_CTL_VLD,
1381 (1 << 9));
1382
1383 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1384
1385 /* restore read side */
1386 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1387 swap_page_phys);
1388 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1389
1390 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1391 ;
1392 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1393
1394 /* restore wrap count */
1395 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
1396 input->stream_cookie);
1397
1398 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1399 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
1400
1401 /* enable */
1402 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1403 (0x11 << 16) | (1<<10));
1404
1405 /* sync with front end */
1406 vdec_sync_input_read(vdec);
1407 vdec_sync_input_write(vdec);
1408
1409 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1410 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1411 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1412
1413 /* restore read side */
1414 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1415 swap_page_phys);
1416 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1417
1418 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1419 & (1<<7))
1420 ;
1421 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1422
1423 /* restore stream offset */
1424 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
1425 input->stream_cookie);
1426
1427 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1428 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1429 >> 16) & 0x7f;
1430
1431
1432 /* enable */
1433
1434 /* sync with front end */
1435 vdec_sync_input_read(vdec);
1436 vdec_sync_input_write(vdec);
1437
1438 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1439
1440 /*pr_info("vdec: restore context\r\n");*/
1441 }
1442
1443 } else {
1444 if (input->target == VDEC_INPUT_TARGET_VLD) {
1445 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1446 input->start);
1447 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1448 input->start + input->size - 8);
1449 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1450 input->start);
1451
1452 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1453 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1454
1455 /* set to manual mode */
1456 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1457 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1458 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1459 READ_PARSER_REG(PARSER_VIDEO_WP));
1460
1461 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1462
1463 /* enable */
1464 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1465 (0x11 << 16) | (1<<10));
1466
1467 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1468
1469 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1470 WRITE_VREG(HEVC_STREAM_START_ADDR,
1471 input->start);
1472 WRITE_VREG(HEVC_STREAM_END_ADDR,
1473 input->start + input->size);
1474 WRITE_VREG(HEVC_STREAM_RD_PTR,
1475 input->start);
1476 WRITE_VREG(HEVC_STREAM_WR_PTR,
1477 READ_PARSER_REG(PARSER_VIDEO_WP));
1478
1479 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1480 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1481 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1482 >> 16) & 0x7f;
1483
1484 /* enable */
1485 }
1486 }
1487 *p = NULL;
1488 if (wp >= rp)
1489 size = wp - rp + fifo_len;
1490 else
1491 size = wp + input->size - rp + fifo_len;
1492 if (size < 0) {
1493 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1494 __func__, input->size, wp, rp, fifo_len, size);
1495 size = 0;
1496 }
1497 return size;
1498 }
1499}
1500EXPORT_SYMBOL(vdec_prepare_input);
1501
1502void vdec_enable_input(struct vdec_s *vdec)
1503{
1504 struct vdec_input_s *input = &vdec->input;
1505
1506 if (vdec->status != VDEC_STATUS_ACTIVE)
1507 return;
1508
1509 if (input->target == VDEC_INPUT_TARGET_VLD)
1510 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
1511 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1512 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1513 if (vdec_stream_based(vdec))
1514 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1515 else
1516 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1517 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
1518 }
1519}
1520EXPORT_SYMBOL(vdec_enable_input);
1521
1522int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
1523{
1524 int r = vdec_input_set_buffer(&vdec->input, start, size);
1525
1526 if (r)
1527 return r;
1528
1529 if (vdec->slave)
1530 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
1531
1532 return r;
1533}
1534EXPORT_SYMBOL(vdec_set_input_buffer);
1535
1536/*
1537 * vdec_eos returns the possibility that there are
1538 * more input can be used by decoder through vdec_prepare_input
1539 * Note: this function should be called prior to vdec_vframe_dirty
1540 * by decoder driver to determine if EOS happens for stream based
1541 * decoding when there is no sufficient data for a frame
1542 */
1543bool vdec_has_more_input(struct vdec_s *vdec)
1544{
1545 struct vdec_input_s *input = &vdec->input;
1546
1547 if (!input->eos)
1548 return true;
1549
1550 if (input_frame_based(input))
1551 return vdec_input_next_input_chunk(input) != NULL;
1552 else {
1553 if (input->target == VDEC_INPUT_TARGET_VLD)
1554 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
1555 READ_PARSER_REG(PARSER_VIDEO_WP);
1556 else {
1557 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1558 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
1559 }
1560 }
1561}
1562EXPORT_SYMBOL(vdec_has_more_input);
1563
1564void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1565{
1566 vdec->input.prepare_level = level;
1567}
1568EXPORT_SYMBOL(vdec_set_prepare_level);
1569
1570void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1571{
1572 vdec->flag = flag;
1573}
1574EXPORT_SYMBOL(vdec_set_flag);
1575
1576void vdec_set_eos(struct vdec_s *vdec, bool eos)
1577{
1578 struct vdec_core_s *core = vdec_core;
1579
1580 vdec->input.eos = eos;
1581
1582 if (vdec->slave)
1583 vdec->slave->input.eos = eos;
1584 up(&core->sem);
1585}
1586EXPORT_SYMBOL(vdec_set_eos);
1587
1588#ifdef VDEC_DEBUG_SUPPORT
1589void vdec_set_step_mode(void)
1590{
1591 step_mode = 0x1ff;
1592}
1593EXPORT_SYMBOL(vdec_set_step_mode);
1594#endif
1595
1596void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1597{
1598 if (vdec && next_vdec) {
1599 vdec->sched = 0;
1600 next_vdec->sched = 1;
1601 }
1602}
1603EXPORT_SYMBOL(vdec_set_next_sched);
1604
1605/*
1606 * Swap Context: S0 S1 S2 S3 S4
1607 * Sample sequence: M S M M S
1608 * Master Context: S0 S0 S2 S3 S3
1609 * Slave context: NA S1 S1 S2 S4
1610 * ^
1611 * ^
1612 * ^
1613 * the tricky part
1614 * If there are back to back decoding of master or slave
1615 * then the context of the counter part should be updated
1616 * with current decoder. In this example, S1 should be
1617 * updated to S2.
1618 * This is done by swap the swap_page and related info
1619 * between two layers.
1620 */
1621static void vdec_borrow_input_context(struct vdec_s *vdec)
1622{
1623 struct page *swap_page;
1624 unsigned long swap_page_phys;
1625 struct vdec_input_s *me;
1626 struct vdec_input_s *other;
1627
1628 if (!vdec_dual(vdec))
1629 return;
1630
1631 me = &vdec->input;
1632 other = &vdec_get_associate(vdec)->input;
1633
1634 /* swap the swap_context, borrow counter part's
1635 * swap context storage and update all related info.
1636 * After vdec_vframe_dirty, vdec_save_input_context
1637 * will be called to update current vdec's
1638 * swap context
1639 */
1640 swap_page = other->swap_page;
1641 other->swap_page = me->swap_page;
1642 me->swap_page = swap_page;
1643
1644 swap_page_phys = other->swap_page_phys;
1645 other->swap_page_phys = me->swap_page_phys;
1646 me->swap_page_phys = swap_page_phys;
1647
1648 other->swap_rp = me->swap_rp;
1649 other->streaming_rp = me->streaming_rp;
1650 other->stream_cookie = me->stream_cookie;
1651 other->swap_valid = me->swap_valid;
1652}
1653
1654void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1655{
1656 if (chunk)
1657 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1658
1659 if (vdec_stream_based(vdec)) {
1660 vdec->input.swap_needed = true;
1661
1662 if (vdec_dual(vdec)) {
1663 vdec_get_associate(vdec)->input.dirty_count = 0;
1664 vdec->input.dirty_count++;
1665 if (vdec->input.dirty_count > 1) {
1666 vdec->input.dirty_count = 1;
1667 vdec_borrow_input_context(vdec);
1668 }
1669 }
1670
1671 /* for stream based mode, we update read and write pointer
1672 * also in case decoder wants to keep working on decoding
1673 * for more frames while input front end has more data
1674 */
1675 vdec_sync_input_read(vdec);
1676 vdec_sync_input_write(vdec);
1677
1678 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1679 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1680 }
1681}
1682EXPORT_SYMBOL(vdec_vframe_dirty);
1683
1684bool vdec_need_more_data(struct vdec_s *vdec)
1685{
1686 if (vdec_stream_based(vdec))
1687 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1688
1689 return false;
1690}
1691EXPORT_SYMBOL(vdec_need_more_data);
1692
1693
1694void hevc_wait_ddr(void)
1695{
1696 unsigned long flags;
1697 unsigned int mask = 0;
1698
1699 mask = 1 << 4; /* hevc */
1700 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
1701 mask |= (1 << 8); /* hevcb */
1702
1703 spin_lock_irqsave(&vdec_spin_lock, flags);
1704 codec_dmcbus_write(DMC_REQ_CTRL,
1705 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
1706 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1707
1708 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1709 & mask))
1710 ;
1711}
1712
1713void vdec_save_input_context(struct vdec_s *vdec)
1714{
1715 struct vdec_input_s *input = &vdec->input;
1716
1717#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1718 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1719#endif
1720
1721 if (input->target == VDEC_INPUT_TARGET_VLD)
1722 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1723
1724 if (input_stream_based(input) && (input->swap_needed)) {
1725 if (input->target == VDEC_INPUT_TARGET_VLD) {
1726 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1727 input->swap_page_phys);
1728 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1729 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1730 ;
1731 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1732 vdec->input.stream_cookie =
1733 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1734 vdec->input.swap_rp =
1735 READ_VREG(VLD_MEM_VIFIFO_RP);
1736 vdec->input.total_rd_count =
1737 (u64)vdec->input.stream_cookie *
1738 vdec->input.size + vdec->input.swap_rp -
1739 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
1740 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1741 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1742 input->swap_page_phys);
1743 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1744
1745 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1746 ;
1747 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1748
1749 vdec->input.stream_cookie =
1750 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1751 vdec->input.swap_rp =
1752 READ_VREG(HEVC_STREAM_RD_PTR);
1753 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1754 (vdec->input.streaming_rp & 0x80000000))
1755 vdec->input.streaming_rp += 1ULL << 32;
1756 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1757 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1758 vdec->input.total_rd_count = vdec->input.streaming_rp;
1759 }
1760
1761 input->swap_valid = true;
1762 input->swap_needed = false;
1763 /*pr_info("vdec: save context\r\n");*/
1764
1765 vdec_sync_input_read(vdec);
1766
1767 if (vdec_dual(vdec)) {
1768 struct vdec_s *master = (vdec->slave) ?
1769 vdec : vdec->master;
1770 master->input.last_swap_slave = (master->slave == vdec);
1771 /* pr_info("master->input.last_swap_slave = %d\n",
1772 master->input.last_swap_slave); */
1773 }
1774
1775 hevc_wait_ddr();
1776 }
1777}
1778EXPORT_SYMBOL(vdec_save_input_context);
1779
1780void vdec_clean_input(struct vdec_s *vdec)
1781{
1782 struct vdec_input_s *input = &vdec->input;
1783
1784 while (!list_empty(&input->vframe_chunk_list)) {
1785 struct vframe_chunk_s *chunk =
1786 vdec_input_next_chunk(input);
1787 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
1788 vdec_input_release_chunk(input, chunk);
1789 else
1790 break;
1791 }
1792 vdec_save_input_context(vdec);
1793}
1794EXPORT_SYMBOL(vdec_clean_input);
1795
1796
1797static int vdec_input_read_restore(struct vdec_s *vdec)
1798{
1799 struct vdec_input_s *input = &vdec->input;
1800
1801 if (!vdec_stream_based(vdec))
1802 return 0;
1803
1804 if (!input->swap_valid) {
1805 if (input->target == VDEC_INPUT_TARGET_VLD) {
1806 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1807 input->start);
1808 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1809 input->start + input->size - 8);
1810 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1811 input->start);
1812 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1813 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1814
1815 /* set to manual mode */
1816 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1817 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1818 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1819 WRITE_VREG(HEVC_STREAM_START_ADDR,
1820 input->start);
1821 WRITE_VREG(HEVC_STREAM_END_ADDR,
1822 input->start + input->size);
1823 WRITE_VREG(HEVC_STREAM_RD_PTR,
1824 input->start);
1825 }
1826 return 0;
1827 }
1828 if (input->target == VDEC_INPUT_TARGET_VLD) {
1829 /* restore read side */
1830 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1831 input->swap_page_phys);
1832
1833 /*swap active*/
1834 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1835
1836 /*wait swap busy*/
1837 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1838 ;
1839
1840 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1841 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1842 /* restore read side */
1843 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1844 input->swap_page_phys);
1845 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1846
1847 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1848 & (1<<7))
1849 ;
1850 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1851 }
1852
1853 return 0;
1854}
1855
1856
1857int vdec_sync_input(struct vdec_s *vdec)
1858{
1859 struct vdec_input_s *input = &vdec->input;
1860 u32 rp = 0, wp = 0, fifo_len = 0;
1861 int size;
1862
1863 vdec_input_read_restore(vdec);
1864 vdec_sync_input_read(vdec);
1865 vdec_sync_input_write(vdec);
1866 if (input->target == VDEC_INPUT_TARGET_VLD) {
1867 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1868 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1869
1870 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1871 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1872 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1873 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1874 >> 16) & 0x7f;
1875 }
1876 if (wp >= rp)
1877 size = wp - rp + fifo_len;
1878 else
1879 size = wp + input->size - rp + fifo_len;
1880 if (size < 0) {
1881 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1882 __func__, input->size, wp, rp, fifo_len, size);
1883 size = 0;
1884 }
1885 return size;
1886
1887}
1888EXPORT_SYMBOL(vdec_sync_input);
1889
1890const char *vdec_status_str(struct vdec_s *vdec)
1891{
1892 if (vdec->status < 0)
1893 return "INVALID";
1894 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1895 vdec_status_string[vdec->status] : "INVALID";
1896}
1897
1898const char *vdec_type_str(struct vdec_s *vdec)
1899{
1900 switch (vdec->type) {
1901 case VDEC_TYPE_SINGLE:
1902 return "VDEC_TYPE_SINGLE";
1903 case VDEC_TYPE_STREAM_PARSER:
1904 return "VDEC_TYPE_STREAM_PARSER";
1905 case VDEC_TYPE_FRAME_BLOCK:
1906 return "VDEC_TYPE_FRAME_BLOCK";
1907 case VDEC_TYPE_FRAME_CIRCULAR:
1908 return "VDEC_TYPE_FRAME_CIRCULAR";
1909 default:
1910 return "VDEC_TYPE_INVALID";
1911 }
1912}
1913
1914const char *vdec_device_name_str(struct vdec_s *vdec)
1915{
1916 return vdec_device_name[vdec->format * 2 + 1];
1917}
1918EXPORT_SYMBOL(vdec_device_name_str);
1919
1920void walk_vdec_core_list(char *s)
1921{
1922 struct vdec_s *vdec;
1923 struct vdec_core_s *core = vdec_core;
1924 unsigned long flags;
1925
1926 pr_info("%s --->\n", s);
1927
1928 flags = vdec_core_lock(vdec_core);
1929
1930 if (list_empty(&core->connected_vdec_list)) {
1931 pr_info("connected vdec list empty\n");
1932 } else {
1933 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1934 pr_info("\tvdec (%p), status = %s\n", vdec,
1935 vdec_status_str(vdec));
1936 }
1937 }
1938
1939 vdec_core_unlock(vdec_core, flags);
1940}
1941EXPORT_SYMBOL(walk_vdec_core_list);
1942
1943/* insert vdec to vdec_core for scheduling,
1944 * for dual running decoders, connect/disconnect always runs in pairs
1945 */
1946int vdec_connect(struct vdec_s *vdec)
1947{
1948 unsigned long flags;
1949
1950 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1951
1952 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1953 return 0;
1954
1955 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1956 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1957
1958 init_completion(&vdec->inactive_done);
1959
1960 if (vdec->slave) {
1961 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1962 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1963
1964 init_completion(&vdec->slave->inactive_done);
1965 }
1966
1967 flags = vdec_core_lock(vdec_core);
1968
1969 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1970
1971 if (vdec->slave) {
1972 list_add_tail(&vdec->slave->list,
1973 &vdec_core->connected_vdec_list);
1974 }
1975
1976 vdec_core_unlock(vdec_core, flags);
1977
1978 up(&vdec_core->sem);
1979
1980 return 0;
1981}
1982EXPORT_SYMBOL(vdec_connect);
1983
1984/* remove vdec from vdec_core scheduling */
1985int vdec_disconnect(struct vdec_s *vdec)
1986{
1987#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1988 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1989#endif
1990 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
1991
1992 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1993 (vdec->status != VDEC_STATUS_ACTIVE)) {
1994 return 0;
1995 }
1996 mutex_lock(&vdec_mutex);
1997 /*
1998 *when a vdec is under the management of scheduler
1999 * the status change will only be from vdec_core_thread
2000 */
2001 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
2002
2003 if (vdec->slave)
2004 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
2005 else if (vdec->master)
2006 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
2007 mutex_unlock(&vdec_mutex);
2008 up(&vdec_core->sem);
2009
2010 if(!wait_for_completion_timeout(&vdec->inactive_done,
2011 msecs_to_jiffies(2000)))
2012 goto discon_timeout;
2013
2014 if (vdec->slave) {
2015 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
2016 msecs_to_jiffies(2000)))
2017 goto discon_timeout;
2018 } else if (vdec->master) {
2019 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
2020 msecs_to_jiffies(2000)))
2021 goto discon_timeout;
2022 }
2023
2024 return 0;
2025discon_timeout:
2026 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
2027 return 0;
2028}
2029EXPORT_SYMBOL(vdec_disconnect);
2030
2031/* release vdec structure */
2032int vdec_destroy(struct vdec_s *vdec)
2033{
2034 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
2035
2036 vdec_input_release(&vdec->input);
2037
2038#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2039 vdec_profile_flush(vdec);
2040#endif
2041 ida_simple_remove(&vdec_core->ida, vdec->id);
2042 if (vdec->mvfrm)
2043 vfree(vdec->mvfrm);
2044 vfree(vdec);
2045
2046 atomic_dec(&vdec_core->vdec_nr);
2047
2048 return 0;
2049}
2050EXPORT_SYMBOL(vdec_destroy);
2051
2052/*
2053 *register vdec_device
2054 * create output, vfm or create ionvideo output
2055 */
2056s32 vdec_init(struct vdec_s *vdec, int is_4k)
2057{
2058 int r = 0;
2059 struct vdec_s *p = vdec;
2060 const char *dev_name;
2061 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
2062
2063 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
2064
2065 if (dev_name == NULL)
2066 return -ENODEV;
2067
2068 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
2069 dev_name, vdec_type_str(vdec));
2070
2071 /*
2072 *todo: VFM patch control should be configurable,
2073 * for now all stream based input uses default VFM path.
2074 */
2075 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
2076 if (vdec_core->vfm_vdec == NULL) {
2077 pr_debug("vdec_init set vfm decoder %p\n", vdec);
2078 vdec_core->vfm_vdec = vdec;
2079 } else {
2080 pr_info("vdec_init vfm path busy.\n");
2081 return -EBUSY;
2082 }
2083 }
2084
2085 mutex_lock(&vdec_mutex);
2086 inited_vcodec_num++;
2087 mutex_unlock(&vdec_mutex);
2088
2089 vdec_input_set_type(&vdec->input, vdec->type,
2090 (vdec->format == VFORMAT_HEVC ||
2091 vdec->format == VFORMAT_AVS2 ||
2092 vdec->format == VFORMAT_VP9) ?
2093 VDEC_INPUT_TARGET_HEVC :
2094 VDEC_INPUT_TARGET_VLD);
2095 if (vdec_single(vdec) || (vdec_get_debug_flags() & 0x2))
2096 vdec_enable_DMC(vdec);
2097 p->cma_dev = vdec_core->cma_dev;
2098 p->get_canvas = get_canvas;
2099 p->get_canvas_ex = get_canvas_ex;
2100 p->free_canvas_ex = free_canvas_ex;
2101 p->vdec_fps_detec = vdec_fps_detec;
2102 atomic_set(&p->inrelease, 0);
2103 atomic_set(&p->inirq_flag, 0);
2104 atomic_set(&p->inirq_thread_flag, 0);
2105 /* todo */
2106 if (!vdec_dual(vdec))
2107 p->use_vfm_path = vdec_stream_based(vdec);
2108 if (debugflags & 0x4)
2109 p->use_vfm_path = 1;
2110 /* vdec_dev_reg.flag = 0; */
2111 if (vdec->id >= 0)
2112 id = vdec->id;
2113 p->parallel_dec = parallel_decode;
2114 vdec_core->parallel_dec = parallel_decode;
2115 vdec->canvas_mode = CANVAS_BLKMODE_32X32;
2116#ifdef FRAME_CHECK
2117 vdec_frame_check_init(vdec);
2118#endif
2119 p->dev = platform_device_register_data(
2120 &vdec_core->vdec_core_platform_device->dev,
2121 dev_name,
2122 id,
2123 &p, sizeof(struct vdec_s *));
2124
2125 if (IS_ERR(p->dev)) {
2126 r = PTR_ERR(p->dev);
2127 pr_err("vdec: Decoder device %s register failed (%d)\n",
2128 dev_name, r);
2129
2130 mutex_lock(&vdec_mutex);
2131 inited_vcodec_num--;
2132 mutex_unlock(&vdec_mutex);
2133
2134 goto error;
2135 } else if (!p->dev->dev.driver) {
2136 pr_info("vdec: Decoder device %s driver probe failed.\n",
2137 dev_name);
2138 r = -ENODEV;
2139
2140 goto error;
2141 }
2142
2143 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
2144 r = -ENODEV;
2145 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
2146
2147 mutex_lock(&vdec_mutex);
2148 inited_vcodec_num--;
2149 mutex_unlock(&vdec_mutex);
2150
2151 goto error;
2152 }
2153
2154 if (p->use_vfm_path) {
2155 vdec->vf_receiver_inst = -1;
2156 vdec->vfm_map_id[0] = 0;
2157 } else if (!vdec_dual(vdec)) {
2158 /* create IONVIDEO instance and connect decoder's
2159 * vf_provider interface to it
2160 */
2161 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
2162 r = -ENODEV;
2163 pr_err("vdec: Incorrect decoder type\n");
2164
2165 mutex_lock(&vdec_mutex);
2166 inited_vcodec_num--;
2167 mutex_unlock(&vdec_mutex);
2168
2169 goto error;
2170 }
2171 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
2172#if 1
2173 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2174 &vdec->vf_receiver_inst);
2175#else
2176 /*
2177 * temporarily just use decoder instance ID as iondriver ID
2178 * to solve OMX iondriver instance number check time sequence
2179 * only the limitation is we can NOT mix different video
2180 * decoders since same ID will be used for different decoder
2181 * formats.
2182 */
2183 vdec->vf_receiver_inst = p->dev->id;
2184 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2185 &vdec->vf_receiver_inst);
2186#endif
2187 if (r < 0) {
2188 pr_err("IonVideo frame receiver allocation failed.\n");
2189
2190 mutex_lock(&vdec_mutex);
2191 inited_vcodec_num--;
2192 mutex_unlock(&vdec_mutex);
2193
2194 goto error;
2195 }
2196
2197 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2198 "%s %s", vdec->vf_provider_name,
2199 vdec->vf_receiver_name);
2200 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2201 "vdec-map-%d", vdec->id);
2202 } else if (p->frame_base_video_path ==
2203 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
2204 if (vdec_secure(vdec)) {
2205 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2206 "%s %s", vdec->vf_provider_name,
2207 "amlvideo amvideo");
2208 } else {
2209 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2210 "%s %s", vdec->vf_provider_name,
2211 "amlvideo ppmgr deinterlace amvideo");
2212 }
2213 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2214 "vdec-map-%d", vdec->id);
2215 } else if (p->frame_base_video_path ==
2216 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
2217 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2218 "%s %s", vdec->vf_provider_name,
2219 "aml_video.1 videosync.0 videopip");
2220 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2221 "vdec-map-%d", vdec->id);
2222 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_OSD) {
2223 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2224 "%s %s", vdec->vf_provider_name,
2225 vdec->vf_receiver_name);
2226 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2227 "vdec-map-%d", vdec->id);
2228 } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) {
2229 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2230 "%s %s", vdec->vf_provider_name,
2231 "amvideo");
2232 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2233 "vdec-map-%d", vdec->id);
2234 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
2235 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2236 "%s %s %s", vdec->vf_provider_name,
2237 vdec->vf_receiver_name, "amvideo");
2238 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2239 "vdec-map-%d", vdec->id);
2240 } else if (p->frame_base_video_path ==
2241 FRAME_BASE_PATH_DI_V4LVIDEO) {
2242#ifdef CONFIG_AMLOGIC_V4L_VIDEO3
2243 r = v4lvideo_assign_map(&vdec->vf_receiver_name,
2244 &vdec->vf_receiver_inst);
2245#else
2246 r = -1;
2247#endif
2248 if (r < 0) {
2249 pr_err("V4lVideo frame receiver allocation failed.\n");
2250 mutex_lock(&vdec_mutex);
2251 inited_vcodec_num--;
2252 mutex_unlock(&vdec_mutex);
2253 goto error;
2254 }
2255 if (!v4lvideo_add_di || vdec_secure(vdec))
2256 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2257 "%s %s", vdec->vf_provider_name,
2258 vdec->vf_receiver_name);
2259 else {
2260 if (vdec->vf_receiver_inst == 0)
2261 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2262 "%s %s %s", vdec->vf_provider_name,
2263 "dimulti.1",
2264 vdec->vf_receiver_name);
2265 else if ((vdec->vf_receiver_inst <
2266 max_di_instance) &&
2267 (vdec->vf_receiver_inst == 1))
2268 snprintf(vdec->vfm_map_chain,
2269 VDEC_MAP_NAME_SIZE,
2270 "%s %s %s",
2271 vdec->vf_provider_name,
2272 "deinterlace",
2273 vdec->vf_receiver_name);
2274 else if (vdec->vf_receiver_inst <
2275 max_di_instance)
2276 snprintf(vdec->vfm_map_chain,
2277 VDEC_MAP_NAME_SIZE,
2278 "%s %s%d %s",
2279 vdec->vf_provider_name,
2280 "dimulti.",
2281 vdec->vf_receiver_inst,
2282 vdec->vf_receiver_name);
2283 else
2284 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2285 "%s %s", vdec->vf_provider_name,
2286 vdec->vf_receiver_name);
2287 }
2288 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2289 "vdec-map-%d", vdec->id);
2290 }
2291
2292 if (vfm_map_add(vdec->vfm_map_id,
2293 vdec->vfm_map_chain) < 0) {
2294 r = -ENOMEM;
2295 pr_err("Decoder pipeline map creation failed %s.\n",
2296 vdec->vfm_map_id);
2297 vdec->vfm_map_id[0] = 0;
2298
2299 mutex_lock(&vdec_mutex);
2300 inited_vcodec_num--;
2301 mutex_unlock(&vdec_mutex);
2302
2303 goto error;
2304 }
2305
2306 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
2307
2308 /*
2309 *assume IONVIDEO driver already have a few vframe_receiver
2310 * registered.
2311 * 1. Call iondriver function to allocate a IONVIDEO path and
2312 * provide receiver's name and receiver op.
2313 * 2. Get decoder driver's provider name from driver instance
2314 * 3. vfm_map_add(name, "<decoder provider name>
2315 * <iondriver receiver name>"), e.g.
2316 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
2317 * 4. vf_reg_provider and vf_reg_receiver
2318 * Note: the decoder provider's op uses vdec as op_arg
2319 * the iondriver receiver's op uses iondev device as
2320 * op_arg
2321 */
2322
2323 }
2324
2325 if (!vdec_single(vdec)) {
2326 vf_reg_provider(&p->vframe_provider);
2327
2328 vf_notify_receiver(p->vf_provider_name,
2329 VFRAME_EVENT_PROVIDER_START,
2330 vdec);
2331
2332 if (vdec_core->hint_fr_vdec == NULL)
2333 vdec_core->hint_fr_vdec = vdec;
2334
2335 if (vdec_core->hint_fr_vdec == vdec) {
2336 if (p->sys_info->rate != 0) {
2337 if (!vdec->is_reset) {
2338 vf_notify_receiver(p->vf_provider_name,
2339 VFRAME_EVENT_PROVIDER_FR_HINT,
2340 (void *)
2341 ((unsigned long)
2342 p->sys_info->rate));
2343 vdec->fr_hint_state = VDEC_HINTED;
2344 }
2345 } else {
2346 vdec->fr_hint_state = VDEC_NEED_HINT;
2347 }
2348 }
2349 }
2350
2351 p->dolby_meta_with_el = 0;
2352 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
2353 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
2354 &vdec->input,
2355 vdec->sys_info->width,
2356 vdec->sys_info->height);
2357 /* vdec is now ready to be active */
2358 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2359 return 0;
2360
2361error:
2362 return r;
2363}
2364EXPORT_SYMBOL(vdec_init);
2365
2366/* vdec_create/init/release/destroy are applied to both dual running decoders
2367 */
2368void vdec_release(struct vdec_s *vdec)
2369{
2370 //trace_vdec_release(vdec);/*DEBUG_TMP*/
2371#ifdef VDEC_DEBUG_SUPPORT
2372 if (step_mode) {
2373 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
2374 while (step_mode)
2375 udelay(10);
2376 pr_info("VDEC_DEBUG: step_mode is clear\n");
2377 }
2378#endif
2379 vdec_disconnect(vdec);
2380
2381 if (vdec->vframe_provider.name) {
2382 if (!vdec_single(vdec)) {
2383 if (vdec_core->hint_fr_vdec == vdec
2384 && vdec->fr_hint_state == VDEC_HINTED)
2385 vf_notify_receiver(
2386 vdec->vf_provider_name,
2387 VFRAME_EVENT_PROVIDER_FR_END_HINT,
2388 NULL);
2389 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
2390 }
2391 vf_unreg_provider(&vdec->vframe_provider);
2392 }
2393
2394 if (vdec_core->vfm_vdec == vdec)
2395 vdec_core->vfm_vdec = NULL;
2396
2397 if (vdec_core->hint_fr_vdec == vdec)
2398 vdec_core->hint_fr_vdec = NULL;
2399
2400 if (vdec->vf_receiver_inst >= 0) {
2401 if (vdec->vfm_map_id[0]) {
2402 vfm_map_remove(vdec->vfm_map_id);
2403 vdec->vfm_map_id[0] = 0;
2404 }
2405 }
2406
2407 atomic_set(&vdec->inrelease, 1);
2408 while ((atomic_read(&vdec->inirq_flag) > 0)
2409 || (atomic_read(&vdec->inirq_thread_flag) > 0))
2410 schedule();
2411
2412#ifdef FRAME_CHECK
2413 vdec_frame_check_exit(vdec);
2414#endif
2415 vdec_fps_clear(vdec->id);
2416 if (atomic_read(&vdec_core->vdec_nr) == 1)
2417 vdec_disable_DMC(vdec);
2418 platform_device_unregister(vdec->dev);
2419 pr_debug("vdec_release instance %p, total %d\n", vdec,
2420 atomic_read(&vdec_core->vdec_nr));
2421 vdec_destroy(vdec);
2422
2423 mutex_lock(&vdec_mutex);
2424 inited_vcodec_num--;
2425 mutex_unlock(&vdec_mutex);
2426
2427}
2428EXPORT_SYMBOL(vdec_release);
2429
2430/* For dual running decoders, vdec_reset is only called with master vdec.
2431 */
2432int vdec_reset(struct vdec_s *vdec)
2433{
2434 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2435
2436 vdec_disconnect(vdec);
2437
2438 if (vdec->vframe_provider.name)
2439 vf_unreg_provider(&vdec->vframe_provider);
2440
2441 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2442 vf_unreg_provider(&vdec->slave->vframe_provider);
2443
2444 if (vdec->reset) {
2445 vdec->reset(vdec);
2446 if (vdec->slave)
2447 vdec->slave->reset(vdec->slave);
2448 }
2449 vdec->mc_loaded = 0;/*clear for reload firmware*/
2450 vdec_input_release(&vdec->input);
2451
2452 vdec_input_init(&vdec->input, vdec);
2453
2454 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2455 vdec->sys_info->height);
2456
2457 vf_reg_provider(&vdec->vframe_provider);
2458 vf_notify_receiver(vdec->vf_provider_name,
2459 VFRAME_EVENT_PROVIDER_START, vdec);
2460
2461 if (vdec->slave) {
2462 vf_reg_provider(&vdec->slave->vframe_provider);
2463 vf_notify_receiver(vdec->slave->vf_provider_name,
2464 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2465 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2466 }
2467
2468 vdec_connect(vdec);
2469
2470 return 0;
2471}
2472EXPORT_SYMBOL(vdec_reset);
2473
2474int vdec_v4l2_reset(struct vdec_s *vdec, int flag)
2475{
2476 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2477 pr_debug("vdec_v4l2_reset %d\n", flag);
2478 vdec_disconnect(vdec);
2479 if (flag != 2) {
2480 if (vdec->vframe_provider.name)
2481 vf_unreg_provider(&vdec->vframe_provider);
2482
2483 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2484 vf_unreg_provider(&vdec->slave->vframe_provider);
2485
2486 if (vdec->reset) {
2487 vdec->reset(vdec);
2488 if (vdec->slave)
2489 vdec->slave->reset(vdec->slave);
2490 }
2491 vdec->mc_loaded = 0;/*clear for reload firmware*/
2492
2493 vdec_input_release(&vdec->input);
2494
2495 vdec_input_init(&vdec->input, vdec);
2496
2497 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2498 vdec->sys_info->height);
2499
2500 vf_reg_provider(&vdec->vframe_provider);
2501 vf_notify_receiver(vdec->vf_provider_name,
2502 VFRAME_EVENT_PROVIDER_START, vdec);
2503
2504 if (vdec->slave) {
2505 vf_reg_provider(&vdec->slave->vframe_provider);
2506 vf_notify_receiver(vdec->slave->vf_provider_name,
2507 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2508 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2509 }
2510 } else {
2511 if (vdec->reset) {
2512 vdec->reset(vdec);
2513 if (vdec->slave)
2514 vdec->slave->reset(vdec->slave);
2515 }
2516 }
2517
2518 vdec_connect(vdec);
2519
2520 vdec_frame_check_init(vdec);
2521
2522 return 0;
2523}
2524EXPORT_SYMBOL(vdec_v4l2_reset);
2525
2526void vdec_free_cmabuf(void)
2527{
2528 mutex_lock(&vdec_mutex);
2529
2530 /*if (inited_vcodec_num > 0) {
2531 mutex_unlock(&vdec_mutex);
2532 return;
2533 }*/
2534 mutex_unlock(&vdec_mutex);
2535}
2536
2537void vdec_core_request(struct vdec_s *vdec, unsigned long mask)
2538{
2539 vdec->core_mask |= mask;
2540
2541 if (vdec->slave)
2542 vdec->slave->core_mask |= mask;
2543 if (vdec_core->parallel_dec == 1) {
2544 if (mask & CORE_MASK_COMBINE)
2545 vdec_core->vdec_combine_flag++;
2546 }
2547
2548}
2549EXPORT_SYMBOL(vdec_core_request);
2550
2551int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
2552{
2553 vdec->core_mask &= ~mask;
2554
2555 if (vdec->slave)
2556 vdec->slave->core_mask &= ~mask;
2557 if (vdec_core->parallel_dec == 1) {
2558 if (mask & CORE_MASK_COMBINE)
2559 vdec_core->vdec_combine_flag--;
2560 }
2561 return 0;
2562}
2563EXPORT_SYMBOL(vdec_core_release);
2564
2565bool vdec_core_with_input(unsigned long mask)
2566{
2567 enum vdec_type_e type;
2568
2569 for (type = VDEC_1; type < VDEC_MAX; type++) {
2570 if ((mask & (1 << type)) && cores_with_input[type])
2571 return true;
2572 }
2573
2574 return false;
2575}
2576
2577void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
2578{
2579 unsigned long i;
2580 unsigned long t = mask;
2581 mutex_lock(&vdec_mutex);
2582 while (t) {
2583 i = __ffs(t);
2584 clear_bit(i, &vdec->active_mask);
2585 t &= ~(1 << i);
2586 }
2587
2588 if (vdec->active_mask == 0)
2589 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
2590
2591 mutex_unlock(&vdec_mutex);
2592}
2593EXPORT_SYMBOL(vdec_core_finish_run);
2594/*
2595 * find what core resources are available for vdec
2596 */
2597static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
2598 unsigned long active_mask)
2599{
2600 unsigned long mask = vdec->core_mask &
2601 ~CORE_MASK_COMBINE;
2602
2603 if (vdec->core_mask & CORE_MASK_COMBINE) {
2604 /* combined cores must be granted together */
2605 if ((mask & ~active_mask) == mask)
2606 return mask;
2607 else
2608 return 0;
2609 } else
2610 return mask & ~vdec->sched_mask & ~active_mask;
2611}
2612
2613/*
2614 *Decoder callback
2615 * Each decoder instance uses this callback to notify status change, e.g. when
2616 * decoder finished using HW resource.
2617 * a sample callback from decoder's driver is following:
2618 *
2619 * if (hw->vdec_cb) {
2620 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2621 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
2622 * }
2623 */
2624static void vdec_callback(struct vdec_s *vdec, void *data)
2625{
2626 struct vdec_core_s *core = (struct vdec_core_s *)data;
2627
2628#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2629 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
2630#endif
2631
2632 up(&core->sem);
2633}
2634
2635static irqreturn_t vdec_isr(int irq, void *dev_id)
2636{
2637 struct vdec_isr_context_s *c =
2638 (struct vdec_isr_context_s *)dev_id;
2639 struct vdec_s *vdec = vdec_core->last_vdec;
2640 irqreturn_t ret = IRQ_HANDLED;
2641
2642 if (vdec_core->parallel_dec == 1) {
2643 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2644 vdec = vdec_core->active_hevc;
2645 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2646 vdec = vdec_core->active_vdec;
2647 else
2648 vdec = NULL;
2649 }
2650
2651 if (vdec) {
2652 if (atomic_read(&vdec->inrelease) > 0)
2653 return ret;
2654 atomic_set(&vdec->inirq_flag, 1);
2655 vdec->isr_ns = local_clock();
2656 }
2657 if (c->dev_isr) {
2658 ret = c->dev_isr(irq, c->dev_id);
2659 goto isr_done;
2660 }
2661
2662 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
2663 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
2664 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
2665#if 0
2666 pr_warn("vdec interrupt w/o a valid receiver\n");
2667#endif
2668 goto isr_done;
2669 }
2670
2671 if (!vdec) {
2672#if 0
2673 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
2674 core);
2675#endif
2676 goto isr_done;
2677 }
2678
2679 if (!vdec->irq_handler) {
2680#if 0
2681 pr_warn("vdec instance has no irq handle.\n");
2682#endif
2683 goto isr_done;
2684 }
2685
2686 ret = vdec->irq_handler(vdec, c->index);
2687isr_done:
2688 if (vdec)
2689 atomic_set(&vdec->inirq_flag, 0);
2690 return ret;
2691}
2692
2693static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
2694{
2695 struct vdec_isr_context_s *c =
2696 (struct vdec_isr_context_s *)dev_id;
2697 struct vdec_s *vdec = vdec_core->last_vdec;
2698 irqreturn_t ret = IRQ_HANDLED;
2699
2700 if (vdec_core->parallel_dec == 1) {
2701 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2702 vdec = vdec_core->active_hevc;
2703 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2704 vdec = vdec_core->active_vdec;
2705 else
2706 vdec = NULL;
2707 }
2708
2709 if (vdec) {
2710 u32 isr2tfn = 0;
2711 if (atomic_read(&vdec->inrelease) > 0)
2712 return ret;
2713 atomic_set(&vdec->inirq_thread_flag, 1);
2714 vdec->tfn_ns = local_clock();
2715 isr2tfn = vdec->tfn_ns - vdec->isr_ns;
2716 if (isr2tfn > 10000000)
2717 pr_err("!!!!!!! %s vdec_isr to %s took %u ns !!!\n",
2718 vdec->vf_provider_name, __func__, isr2tfn);
2719 }
2720 if (c->dev_threaded_isr) {
2721 ret = c->dev_threaded_isr(irq, c->dev_id);
2722 goto thread_isr_done;
2723 }
2724 if (!vdec)
2725 goto thread_isr_done;
2726
2727 if (!vdec->threaded_irq_handler)
2728 goto thread_isr_done;
2729 ret = vdec->threaded_irq_handler(vdec, c->index);
2730thread_isr_done:
2731 if (vdec)
2732 atomic_set(&vdec->inirq_thread_flag, 0);
2733 return ret;
2734}
2735
2736unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
2737{
2738 unsigned long ready_mask;
2739 struct vdec_input_s *input = &vdec->input;
2740 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2741 (vdec->status != VDEC_STATUS_ACTIVE))
2742 return false;
2743
2744 if (!vdec->run_ready)
2745 return false;
2746
2747 /* when crc32 error, block at error frame */
2748 if (vdec->vfc.err_crc_block)
2749 return false;
2750
2751 if ((vdec->slave || vdec->master) &&
2752 (vdec->sched == 0))
2753 return false;
2754#ifdef VDEC_DEBUG_SUPPORT
2755 inc_profi_count(mask, vdec->check_count);
2756#endif
2757 if (vdec_core_with_input(mask)) {
2758 /* check frame based input underrun */
2759 if (input && !input->eos && input_frame_based(input)
2760 && (!vdec_input_next_chunk(input))) {
2761#ifdef VDEC_DEBUG_SUPPORT
2762 inc_profi_count(mask, vdec->input_underrun_count);
2763#endif
2764 return false;
2765 }
2766 /* check streaming prepare level threshold if not EOS */
2767 if (input && input_stream_based(input) && !input->eos) {
2768 u32 rp, wp, level;
2769
2770 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
2771 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
2772 if (wp < rp)
2773 level = input->size + wp - rp;
2774 else
2775 level = wp - rp;
2776
2777 if ((level < input->prepare_level) &&
2778 (pts_get_rec_num(PTS_TYPE_VIDEO,
2779 vdec->input.total_rd_count) < 2)) {
2780 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2781#ifdef VDEC_DEBUG_SUPPORT
2782 inc_profi_count(mask, vdec->input_underrun_count);
2783 if (step_mode & 0x200) {
2784 if ((step_mode & 0xff) == vdec->id) {
2785 step_mode |= 0xff;
2786 return mask;
2787 }
2788 }
2789#endif
2790 return false;
2791 } else if (level > input->prepare_level)
2792 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
2793 }
2794 }
2795
2796 if (step_mode) {
2797 if ((step_mode & 0xff) != vdec->id)
2798 return 0;
2799 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
2800 }
2801
2802 /*step_mode &= ~0xff; not work for id of 0, removed*/
2803
2804#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2805 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
2806#endif
2807
2808 ready_mask = vdec->run_ready(vdec, mask) & mask;
2809#ifdef VDEC_DEBUG_SUPPORT
2810 if (ready_mask != mask)
2811 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
2812#endif
2813#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2814 if (ready_mask)
2815 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
2816#endif
2817
2818 return ready_mask;
2819}
2820
2821/* bridge on/off vdec's interrupt processing to vdec core */
2822static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
2823 bool enable)
2824{
2825 enum vdec_type_e type;
2826
2827 for (type = VDEC_1; type < VDEC_MAX; type++) {
2828 if (mask & (1 << type)) {
2829 struct vdec_isr_context_s *c =
2830 &vdec_core->isr_context[cores_int[type]];
2831 if (enable)
2832 c->vdec = vdec;
2833 else if (c->vdec == vdec)
2834 c->vdec = NULL;
2835 }
2836 }
2837}
2838
2839/*
2840 * Set up secure protection for each decoder instance running.
2841 * Note: The operation from REE side only resets memory access
2842 * to a default policy and even a non_secure type will still be
2843 * changed to secure type automatically when secure source is
2844 * detected inside TEE.
2845 * Perform need_more_data checking and set flag is decoder
2846 * is not consuming data.
2847 */
2848void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
2849{
2850 struct vdec_input_s *input = &vdec->input;
2851 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
2852 DMC_DEV_TYPE_NON_SECURE;
2853
2854 vdec_route_interrupt(vdec, mask, true);
2855
2856 if (!vdec_core_with_input(mask))
2857 return;
2858
2859 if (secure && vdec_stream_based(vdec) && force_nosecure_even_drm)
2860 {
2861 /* Verimatrix ultra webclient (HLS) was played in drmmode and used hw demux. In drmmode VDEC only can access secure.
2862 Now HW demux parsed es data to no-secure buffer. So the VDEC input was no-secure, VDEC playback failed. Forcing
2863 use nosecure for verimatrix webclient HLS. If in the future HW demux can parse es data to secure buffer, make
2864 VDEC r/w secure.*/
2865 secure = 0;
2866 //pr_debug("allow VDEC can access nosecure even in drmmode\n");
2867 }
2868 if (input->target == VDEC_INPUT_TARGET_VLD)
2869 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
2870 else if (input->target == VDEC_INPUT_TARGET_HEVC)
2871 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
2872
2873 if (vdec_stream_based(vdec) &&
2874 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
2875 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
2876 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2877 }
2878
2879 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
2880 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
2881}
2882
2883/* struct vdec_core_shread manages all decoder instance in active list. When
2884 * a vdec is added into the active list, it can onlt be in two status:
2885 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
2886 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
2887 * Removing a decoder from active list is only performed within core thread.
2888 * Adding a decoder into active list is performed from user thread.
2889 */
2890static int vdec_core_thread(void *data)
2891{
2892 struct vdec_core_s *core = (struct vdec_core_s *)data;
2893 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
2894 unsigned long flags;
2895 int i;
2896
2897 sched_setscheduler(current, SCHED_FIFO, &param);
2898
2899 allow_signal(SIGTERM);
2900
2901 while (down_interruptible(&core->sem) == 0) {
2902 struct vdec_s *vdec, *tmp, *worker;
2903 unsigned long sched_mask = 0;
2904 LIST_HEAD(disconnecting_list);
2905
2906 if (kthread_should_stop())
2907 break;
2908 mutex_lock(&vdec_mutex);
2909
2910 if (core->parallel_dec == 1) {
2911 for (i = VDEC_1; i < VDEC_MAX; i++) {
2912 core->power_ref_mask =
2913 core->power_ref_count[i] > 0 ?
2914 (core->power_ref_mask | (1 << i)) :
2915 (core->power_ref_mask & ~(1 << i));
2916 }
2917 }
2918 /* clean up previous active vdec's input */
2919 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2920 unsigned long mask = vdec->sched_mask &
2921 (vdec->active_mask ^ vdec->sched_mask);
2922
2923 vdec_route_interrupt(vdec, mask, false);
2924
2925#ifdef VDEC_DEBUG_SUPPORT
2926 update_profi_clk_stop(vdec, mask, get_current_clk());
2927#endif
2928 /*
2929 * If decoder released some core resources (mask), then
2930 * check if these core resources are associated
2931 * with any input side and do input clean up accordingly
2932 */
2933 if (vdec_core_with_input(mask)) {
2934 struct vdec_input_s *input = &vdec->input;
2935 while (!list_empty(
2936 &input->vframe_chunk_list)) {
2937 struct vframe_chunk_s *chunk =
2938 vdec_input_next_chunk(input);
2939 if (chunk && (chunk->flag &
2940 VFRAME_CHUNK_FLAG_CONSUMED))
2941 vdec_input_release_chunk(input,
2942 chunk);
2943 else
2944 break;
2945 }
2946
2947 vdec_save_input_context(vdec);
2948 }
2949
2950 vdec->sched_mask &= ~mask;
2951 core->sched_mask &= ~mask;
2952 }
2953 vdec_update_buff_status();
2954 /*
2955 *todo:
2956 * this is the case when the decoder is in active mode and
2957 * the system side wants to stop it. Currently we rely on
2958 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2959 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2960 * reason the decoder can not exist by itself (dead decoding
2961 * or whatever), then we may have to add another vdec API
2962 * to kill the vdec and release its HW resource and make it
2963 * become inactive again.
2964 * if ((core->active_vdec) &&
2965 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2966 * }
2967 */
2968
2969 /* check disconnected decoders */
2970 flags = vdec_core_lock(vdec_core);
2971 list_for_each_entry_safe(vdec, tmp,
2972 &core->connected_vdec_list, list) {
2973 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2974 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
2975 if (core->parallel_dec == 1) {
2976 if (vdec_core->active_hevc == vdec)
2977 vdec_core->active_hevc = NULL;
2978 if (vdec_core->active_vdec == vdec)
2979 vdec_core->active_vdec = NULL;
2980 }
2981 if (core->last_vdec == vdec)
2982 core->last_vdec = NULL;
2983 list_move(&vdec->list, &disconnecting_list);
2984 }
2985 }
2986 vdec_core_unlock(vdec_core, flags);
2987 mutex_unlock(&vdec_mutex);
2988 /* elect next vdec to be scheduled */
2989 vdec = core->last_vdec;
2990 if (vdec) {
2991 vdec = list_entry(vdec->list.next, struct vdec_s, list);
2992 list_for_each_entry_from(vdec,
2993 &core->connected_vdec_list, list) {
2994 sched_mask = vdec_schedule_mask(vdec,
2995 core->sched_mask);
2996 if (!sched_mask)
2997 continue;
2998 sched_mask = vdec_ready_to_run(vdec,
2999 sched_mask);
3000 if (sched_mask)
3001 break;
3002 }
3003
3004 if (&vdec->list == &core->connected_vdec_list)
3005 vdec = NULL;
3006 }
3007
3008 if (!vdec) {
3009 /* search from beginning */
3010 list_for_each_entry(vdec,
3011 &core->connected_vdec_list, list) {
3012 sched_mask = vdec_schedule_mask(vdec,
3013 core->sched_mask);
3014 if (vdec == core->last_vdec) {
3015 if (!sched_mask) {
3016 vdec = NULL;
3017 break;
3018 }
3019
3020 sched_mask = vdec_ready_to_run(vdec,
3021 sched_mask);
3022
3023 if (!sched_mask) {
3024 vdec = NULL;
3025 break;
3026 }
3027 break;
3028 }
3029
3030 if (!sched_mask)
3031 continue;
3032
3033 sched_mask = vdec_ready_to_run(vdec,
3034 sched_mask);
3035 if (sched_mask)
3036 break;
3037 }
3038
3039 if (&vdec->list == &core->connected_vdec_list)
3040 vdec = NULL;
3041 }
3042
3043 worker = vdec;
3044
3045 if (vdec) {
3046 unsigned long mask = sched_mask;
3047 unsigned long i;
3048
3049 /* setting active_mask should be atomic.
3050 * it can be modified by decoder driver callbacks.
3051 */
3052 while (sched_mask) {
3053 i = __ffs(sched_mask);
3054 set_bit(i, &vdec->active_mask);
3055 sched_mask &= ~(1 << i);
3056 }
3057
3058 /* vdec's sched_mask is only set from core thread */
3059 vdec->sched_mask |= mask;
3060 if (core->last_vdec) {
3061 if ((core->last_vdec != vdec) &&
3062 (core->last_vdec->mc_type != vdec->mc_type))
3063 vdec->mc_loaded = 0;/*clear for reload firmware*/
3064 } else
3065 vdec->mc_loaded = 0;
3066 core->last_vdec = vdec;
3067 if (debug & 2)
3068 vdec->mc_loaded = 0;/*alway reload firmware*/
3069 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
3070
3071 core->sched_mask |= mask;
3072 if (core->parallel_dec == 1)
3073 vdec_save_active_hw(vdec);
3074#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
3075 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
3076#endif
3077 vdec_prepare_run(vdec, mask);
3078#ifdef VDEC_DEBUG_SUPPORT
3079 inc_profi_count(mask, vdec->run_count);
3080 update_profi_clk_run(vdec, mask, get_current_clk());
3081#endif
3082 vdec->run(vdec, mask, vdec_callback, core);
3083
3084
3085 /* we have some cores scheduled, keep working until
3086 * all vdecs are checked with no cores to schedule
3087 */
3088 if (core->parallel_dec == 1) {
3089 if (vdec_core->vdec_combine_flag == 0)
3090 up(&core->sem);
3091 } else
3092 up(&core->sem);
3093 }
3094
3095 /* remove disconnected decoder from active list */
3096 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
3097 list_del(&vdec->list);
3098 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
3099 /*core->last_vdec = NULL;*/
3100 complete(&vdec->inactive_done);
3101 }
3102
3103 /* if there is no new work scheduled and nothing
3104 * is running, sleep 20ms
3105 */
3106 if (core->parallel_dec == 1) {
3107 if (vdec_core->vdec_combine_flag == 0) {
3108 if ((!worker) &&
3109 ((core->sched_mask != core->power_ref_mask)) &&
3110 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3111 ((core->buff_flag | core->stream_buff_flag) &
3112 (core->sched_mask ^ core->power_ref_mask))) {
3113 usleep_range(1000, 2000);
3114 up(&core->sem);
3115 }
3116 } else {
3117 if ((!worker) && (!core->sched_mask) &&
3118 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3119 (core->buff_flag | core->stream_buff_flag)) {
3120 usleep_range(1000, 2000);
3121 up(&core->sem);
3122 }
3123 }
3124 } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
3125 usleep_range(1000, 2000);
3126 up(&core->sem);
3127 }
3128
3129 }
3130
3131 return 0;
3132}
3133
3134#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
3135void vdec_power_reset(void)
3136{
3137 /* enable vdec1 isolation */
3138 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3139 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
3140 /* power off vdec1 memories */
3141 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3142 /* vdec1 power off */
3143 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3144 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
3145
3146 if (has_vdec2()) {
3147 /* enable vdec2 isolation */
3148 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3149 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
3150 /* power off vdec2 memories */
3151 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3152 /* vdec2 power off */
3153 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3154 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
3155 }
3156
3157 if (has_hdec()) {
3158 /* enable hcodec isolation */
3159 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3160 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
3161 /* power off hcodec memories */
3162 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3163 /* hcodec power off */
3164 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3165 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
3166 }
3167
3168 if (has_hevc_vdec()) {
3169 /* enable hevc isolation */
3170 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3171 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
3172 /* power off hevc memories */
3173 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3174 /* hevc power off */
3175 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3176 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
3177 }
3178}
3179EXPORT_SYMBOL(vdec_power_reset);
3180
3181
3182void vdec_poweron(enum vdec_type_e core)
3183{
3184 if (core >= VDEC_MAX)
3185 return;
3186
3187 mutex_lock(&vdec_mutex);
3188
3189 vdec_core->power_ref_count[core]++;
3190 if (vdec_core->power_ref_count[core] > 1) {
3191 mutex_unlock(&vdec_mutex);
3192 return;
3193 }
3194
3195 if (vdec_on(core)) {
3196 mutex_unlock(&vdec_mutex);
3197 return;
3198 }
3199
3200 vdec_core->pm->power_on(vdec_core->cma_dev, core);
3201
3202 mutex_unlock(&vdec_mutex);
3203}
3204EXPORT_SYMBOL(vdec_poweron);
3205
3206void vdec_poweroff(enum vdec_type_e core)
3207{
3208 if (core >= VDEC_MAX)
3209 return;
3210
3211 mutex_lock(&vdec_mutex);
3212
3213 vdec_core->power_ref_count[core]--;
3214 if (vdec_core->power_ref_count[core] > 0) {
3215 mutex_unlock(&vdec_mutex);
3216 return;
3217 }
3218
3219 vdec_core->pm->power_off(vdec_core->cma_dev, core);
3220
3221 mutex_unlock(&vdec_mutex);
3222}
3223EXPORT_SYMBOL(vdec_poweroff);
3224
3225bool vdec_on(enum vdec_type_e core)
3226{
3227 return vdec_core->pm->power_state(vdec_core->cma_dev, core);
3228}
3229EXPORT_SYMBOL(vdec_on);
3230
3231#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
3232void vdec_poweron(enum vdec_type_e core)
3233{
3234 ulong flags;
3235
3236 spin_lock_irqsave(&lock, flags);
3237
3238 if (core == VDEC_1) {
3239 /* vdec1 soft reset */
3240 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3241 WRITE_VREG(DOS_SW_RESET0, 0);
3242 /* enable vdec1 clock */
3243 vdec_clock_enable();
3244 /* reset DOS top registers */
3245 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3246 } else if (core == VDEC_2) {
3247 /* vdec2 soft reset */
3248 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3249 WRITE_VREG(DOS_SW_RESET2, 0);
3250 /* enable vdec2 clock */
3251 vdec2_clock_enable();
3252 /* reset DOS top registers */
3253 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3254 } else if (core == VDEC_HCODEC) {
3255 /* hcodec soft reset */
3256 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3257 WRITE_VREG(DOS_SW_RESET1, 0);
3258 /* enable hcodec clock */
3259 hcodec_clock_enable();
3260 }
3261
3262 spin_unlock_irqrestore(&lock, flags);
3263}
3264
3265void vdec_poweroff(enum vdec_type_e core)
3266{
3267 ulong flags;
3268
3269 spin_lock_irqsave(&lock, flags);
3270
3271 if (core == VDEC_1) {
3272 /* disable vdec1 clock */
3273 vdec_clock_off();
3274 } else if (core == VDEC_2) {
3275 /* disable vdec2 clock */
3276 vdec2_clock_off();
3277 } else if (core == VDEC_HCODEC) {
3278 /* disable hcodec clock */
3279 hcodec_clock_off();
3280 }
3281
3282 spin_unlock_irqrestore(&lock, flags);
3283}
3284
3285bool vdec_on(enum vdec_type_e core)
3286{
3287 bool ret = false;
3288
3289 if (core == VDEC_1) {
3290 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
3291 ret = true;
3292 } else if (core == VDEC_2) {
3293 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
3294 ret = true;
3295 } else if (core == VDEC_HCODEC) {
3296 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
3297 ret = true;
3298 }
3299
3300 return ret;
3301}
3302#endif
3303
3304int vdec_source_changed(int format, int width, int height, int fps)
3305{
3306 /* todo: add level routines for clock adjustment per chips */
3307 int ret = -1;
3308 static int on_setting;
3309
3310 if (on_setting > 0)
3311 return ret;/*on changing clk,ignore this change*/
3312
3313 if (vdec_source_get(VDEC_1) == width * height * fps)
3314 return ret;
3315
3316
3317 on_setting = 1;
3318 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3319 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
3320 width, height, fps, vdec_clk_get(VDEC_1));
3321 on_setting = 0;
3322 return ret;
3323
3324}
3325EXPORT_SYMBOL(vdec_source_changed);
3326
3327void vdec_reset_core(struct vdec_s *vdec)
3328{
3329 unsigned long flags;
3330 unsigned int mask = 0;
3331
3332 mask = 1 << 13; /*bit13: DOS VDEC interface*/
3333 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3334 mask = 1 << 21; /*bit21: DOS VDEC interface*/
3335
3336 spin_lock_irqsave(&vdec_spin_lock, flags);
3337 codec_dmcbus_write(DMC_REQ_CTRL,
3338 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3339 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3340
3341 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3342 & mask))
3343 ;
3344 /*
3345 * 2: assist
3346 * 3: vld_reset
3347 * 4: vld_part_reset
3348 * 5: vfifo reset
3349 * 6: iqidct
3350 * 7: mc
3351 * 8: dblk
3352 * 9: pic_dc
3353 * 10: psc
3354 * 11: mcpu
3355 * 12: ccpu
3356 * 13: ddr
3357 * 14: afifo
3358 */
3359 if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3360 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) {
3361 WRITE_VREG(DOS_SW_RESET0, (1<<3)|(1<<4)|(1<<5)|(1<<7)|(1<<8)|(1<<9));
3362 } else {
3363 WRITE_VREG(DOS_SW_RESET0,
3364 (1<<3)|(1<<4)|(1<<5));
3365 }
3366 WRITE_VREG(DOS_SW_RESET0, 0);
3367
3368 spin_lock_irqsave(&vdec_spin_lock, flags);
3369 codec_dmcbus_write(DMC_REQ_CTRL,
3370 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3371 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3372}
3373EXPORT_SYMBOL(vdec_reset_core);
3374
3375void hevc_mmu_dma_check(struct vdec_s *vdec)
3376{
3377 ulong timeout;
3378 u32 data;
3379 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A)
3380 return;
3381 timeout = jiffies + HZ/100;
3382 while (1) {
3383 data = READ_VREG(HEVC_CM_CORE_STATUS);
3384 if ((data & 0x1) == 0)
3385 break;
3386 if (time_after(jiffies, timeout)) {
3387 if (debug & 0x10)
3388 pr_info(" %s sao mmu dma idle\n", __func__);
3389 break;
3390 }
3391 }
3392 /*disable sao mmu dma */
3393 CLEAR_VREG_MASK(HEVC_SAO_MMU_DMA_CTRL, 1 << 0);
3394 timeout = jiffies + HZ/100;
3395 while (1) {
3396 data = READ_VREG(HEVC_SAO_MMU_DMA_STATUS);
3397 if ((data & 0x1))
3398 break;
3399 if (time_after(jiffies, timeout)) {
3400 if (debug & 0x10)
3401 pr_err("%s sao mmu dma timeout, num_buf_used = 0x%x\n",
3402 __func__, (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16));
3403 break;
3404 }
3405 }
3406}
3407EXPORT_SYMBOL(hevc_mmu_dma_check);
3408void hevc_reset_core(struct vdec_s *vdec)
3409{
3410 unsigned long flags;
3411 unsigned int mask = 0;
3412 mask = 1 << 4; /*bit4: hevc*/
3413 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3414 mask |= 1 << 8; /*bit8: hevcb*/
3415 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3416 spin_lock_irqsave(&vdec_spin_lock, flags);
3417 codec_dmcbus_write(DMC_REQ_CTRL,
3418 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3419 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3420
3421 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3422 & mask))
3423 ;
3424
3425 if (vdec == NULL || input_frame_based(vdec))
3426 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3427
3428 /*
3429 * 2: assist
3430 * 3: parser
3431 * 4: parser_state
3432 * 8: dblk
3433 * 11:mcpu
3434 * 12:ccpu
3435 * 13:ddr
3436 * 14:iqit
3437 * 15:ipp
3438 * 17:qdct
3439 * 18:mpred
3440 * 19:sao
3441 * 24:hevc_afifo
3442 */
3443 WRITE_VREG(DOS_SW_RESET3,
3444 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
3445 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
3446 (1<<17)|(1<<18)|(1<<19)|(1<<24));
3447
3448 WRITE_VREG(DOS_SW_RESET3, 0);
3449
3450
3451 spin_lock_irqsave(&vdec_spin_lock, flags);
3452 codec_dmcbus_write(DMC_REQ_CTRL,
3453 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3454 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3455
3456}
3457EXPORT_SYMBOL(hevc_reset_core);
3458
3459int vdec2_source_changed(int format, int width, int height, int fps)
3460{
3461 int ret = -1;
3462 static int on_setting;
3463
3464 if (has_vdec2()) {
3465 /* todo: add level routines for clock adjustment per chips */
3466 if (on_setting != 0)
3467 return ret;/*on changing clk,ignore this change*/
3468
3469 if (vdec_source_get(VDEC_2) == width * height * fps)
3470 return ret;
3471
3472 on_setting = 1;
3473 ret = vdec_source_changed_for_clk_set(format,
3474 width, height, fps);
3475 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
3476 width, height, fps, vdec_clk_get(VDEC_2));
3477 on_setting = 0;
3478 return ret;
3479 }
3480 return 0;
3481}
3482EXPORT_SYMBOL(vdec2_source_changed);
3483
3484int hevc_source_changed(int format, int width, int height, int fps)
3485{
3486 /* todo: add level routines for clock adjustment per chips */
3487 int ret = -1;
3488 static int on_setting;
3489
3490 if (on_setting != 0)
3491 return ret;/*on changing clk,ignore this change*/
3492
3493 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
3494 return ret;
3495
3496 on_setting = 1;
3497 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3498 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
3499 width, height, fps, vdec_clk_get(VDEC_HEVC));
3500 on_setting = 0;
3501
3502 return ret;
3503}
3504EXPORT_SYMBOL(hevc_source_changed);
3505
3506static struct am_reg am_risc[] = {
3507 {"MSP", 0x300},
3508 {"MPSR", 0x301},
3509 {"MCPU_INT_BASE", 0x302},
3510 {"MCPU_INTR_GRP", 0x303},
3511 {"MCPU_INTR_MSK", 0x304},
3512 {"MCPU_INTR_REQ", 0x305},
3513 {"MPC-P", 0x306},
3514 {"MPC-D", 0x307},
3515 {"MPC_E", 0x308},
3516 {"MPC_W", 0x309},
3517 {"CSP", 0x320},
3518 {"CPSR", 0x321},
3519 {"CCPU_INT_BASE", 0x322},
3520 {"CCPU_INTR_GRP", 0x323},
3521 {"CCPU_INTR_MSK", 0x324},
3522 {"CCPU_INTR_REQ", 0x325},
3523 {"CPC-P", 0x326},
3524 {"CPC-D", 0x327},
3525 {"CPC_E", 0x328},
3526 {"CPC_W", 0x329},
3527 {"AV_SCRATCH_0", 0x09c0},
3528 {"AV_SCRATCH_1", 0x09c1},
3529 {"AV_SCRATCH_2", 0x09c2},
3530 {"AV_SCRATCH_3", 0x09c3},
3531 {"AV_SCRATCH_4", 0x09c4},
3532 {"AV_SCRATCH_5", 0x09c5},
3533 {"AV_SCRATCH_6", 0x09c6},
3534 {"AV_SCRATCH_7", 0x09c7},
3535 {"AV_SCRATCH_8", 0x09c8},
3536 {"AV_SCRATCH_9", 0x09c9},
3537 {"AV_SCRATCH_A", 0x09ca},
3538 {"AV_SCRATCH_B", 0x09cb},
3539 {"AV_SCRATCH_C", 0x09cc},
3540 {"AV_SCRATCH_D", 0x09cd},
3541 {"AV_SCRATCH_E", 0x09ce},
3542 {"AV_SCRATCH_F", 0x09cf},
3543 {"AV_SCRATCH_G", 0x09d0},
3544 {"AV_SCRATCH_H", 0x09d1},
3545 {"AV_SCRATCH_I", 0x09d2},
3546 {"AV_SCRATCH_J", 0x09d3},
3547 {"AV_SCRATCH_K", 0x09d4},
3548 {"AV_SCRATCH_L", 0x09d5},
3549 {"AV_SCRATCH_M", 0x09d6},
3550 {"AV_SCRATCH_N", 0x09d7},
3551};
3552
3553static ssize_t amrisc_regs_show(struct class *class,
3554 struct class_attribute *attr, char *buf)
3555{
3556 char *pbuf = buf;
3557 struct am_reg *regs = am_risc;
3558 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
3559 int i;
3560 unsigned int val;
3561 ssize_t ret;
3562
3563 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
3564 mutex_lock(&vdec_mutex);
3565 if (!vdec_on(VDEC_1)) {
3566 mutex_unlock(&vdec_mutex);
3567 pbuf += sprintf(pbuf, "amrisc is power off\n");
3568 ret = pbuf - buf;
3569 return ret;
3570 }
3571 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3572 /*TODO:M6 define */
3573 /*
3574 * switch_mod_gate_by_type(MOD_VDEC, 1);
3575 */
3576 amports_switch_gate("vdec", 1);
3577 }
3578 pbuf += sprintf(pbuf, "amrisc registers show:\n");
3579 for (i = 0; i < rsize; i++) {
3580 val = READ_VREG(regs[i].offset);
3581 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
3582 regs[i].name, regs[i].offset, val, val);
3583 }
3584 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
3585 mutex_unlock(&vdec_mutex);
3586 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3587 /*TODO:M6 define */
3588 /*
3589 * switch_mod_gate_by_type(MOD_VDEC, 0);
3590 */
3591 amports_switch_gate("vdec", 0);
3592 }
3593 ret = pbuf - buf;
3594 return ret;
3595}
3596
3597static ssize_t dump_trace_show(struct class *class,
3598 struct class_attribute *attr, char *buf)
3599{
3600 int i;
3601 char *pbuf = buf;
3602 ssize_t ret;
3603 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
3604
3605 if (!trace_buf) {
3606 pbuf += sprintf(pbuf, "No Memory bug\n");
3607 ret = pbuf - buf;
3608 return ret;
3609 }
3610 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
3611 mutex_lock(&vdec_mutex);
3612 if (!vdec_on(VDEC_1)) {
3613 mutex_unlock(&vdec_mutex);
3614 kfree(trace_buf);
3615 pbuf += sprintf(pbuf, "amrisc is power off\n");
3616 ret = pbuf - buf;
3617 return ret;
3618 }
3619 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3620 /*TODO:M6 define */
3621 /*
3622 * switch_mod_gate_by_type(MOD_VDEC, 1);
3623 */
3624 amports_switch_gate("vdec", 1);
3625 }
3626 pr_info("dump trace steps:%d start\n", debug_trace_num);
3627 i = 0;
3628 while (i <= debug_trace_num - 16) {
3629 trace_buf[i] = READ_VREG(MPC_E);
3630 trace_buf[i + 1] = READ_VREG(MPC_E);
3631 trace_buf[i + 2] = READ_VREG(MPC_E);
3632 trace_buf[i + 3] = READ_VREG(MPC_E);
3633 trace_buf[i + 4] = READ_VREG(MPC_E);
3634 trace_buf[i + 5] = READ_VREG(MPC_E);
3635 trace_buf[i + 6] = READ_VREG(MPC_E);
3636 trace_buf[i + 7] = READ_VREG(MPC_E);
3637 trace_buf[i + 8] = READ_VREG(MPC_E);
3638 trace_buf[i + 9] = READ_VREG(MPC_E);
3639 trace_buf[i + 10] = READ_VREG(MPC_E);
3640 trace_buf[i + 11] = READ_VREG(MPC_E);
3641 trace_buf[i + 12] = READ_VREG(MPC_E);
3642 trace_buf[i + 13] = READ_VREG(MPC_E);
3643 trace_buf[i + 14] = READ_VREG(MPC_E);
3644 trace_buf[i + 15] = READ_VREG(MPC_E);
3645 i += 16;
3646 };
3647 pr_info("dump trace steps:%d finished\n", debug_trace_num);
3648 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
3649 mutex_unlock(&vdec_mutex);
3650 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3651 /*TODO:M6 define */
3652 /*
3653 * switch_mod_gate_by_type(MOD_VDEC, 0);
3654 */
3655 amports_switch_gate("vdec", 0);
3656 }
3657 for (i = 0; i < debug_trace_num; i++) {
3658 if (i % 4 == 0) {
3659 if (i % 16 == 0)
3660 pbuf += sprintf(pbuf, "\n");
3661 else if (i % 8 == 0)
3662 pbuf += sprintf(pbuf, " ");
3663 else /* 4 */
3664 pbuf += sprintf(pbuf, " ");
3665 }
3666 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
3667 }
3668 while (i < debug_trace_num)
3669 ;
3670 kfree(trace_buf);
3671 pbuf += sprintf(pbuf, "\n");
3672 ret = pbuf - buf;
3673 return ret;
3674}
3675
3676static ssize_t clock_level_show(struct class *class,
3677 struct class_attribute *attr, char *buf)
3678{
3679 char *pbuf = buf;
3680 size_t ret;
3681
3682 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
3683
3684 if (has_vdec2())
3685 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
3686
3687 if (has_hevc_vdec())
3688 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
3689
3690 ret = pbuf - buf;
3691 return ret;
3692}
3693
3694static ssize_t enable_mvdec_info_show(struct class *cla,
3695 struct class_attribute *attr, char *buf)
3696{
3697 return sprintf(buf, "%d\n", enable_mvdec_info);
3698}
3699
3700static ssize_t enable_mvdec_info_store(struct class *cla,
3701 struct class_attribute *attr,
3702 const char *buf, size_t count)
3703{
3704 int r;
3705 int val;
3706
3707 r = kstrtoint(buf, 0, &val);
3708 if (r < 0)
3709 return -EINVAL;
3710 enable_mvdec_info = val;
3711
3712 return count;
3713}
3714static ssize_t poweron_clock_level_store(struct class *class,
3715 struct class_attribute *attr,
3716 const char *buf, size_t size)
3717{
3718 unsigned int val;
3719 ssize_t ret;
3720
3721 /*ret = sscanf(buf, "%d", &val);*/
3722 ret = kstrtoint(buf, 0, &val);
3723
3724 if (ret != 0)
3725 return -EINVAL;
3726 poweron_clock_level = val;
3727 return size;
3728}
3729
3730static ssize_t poweron_clock_level_show(struct class *class,
3731 struct class_attribute *attr, char *buf)
3732{
3733 return sprintf(buf, "%d\n", poweron_clock_level);
3734}
3735
3736/*
3737 *if keep_vdec_mem == 1
3738 *always don't release
3739 *vdec 64 memory for fast play.
3740 */
3741static ssize_t keep_vdec_mem_store(struct class *class,
3742 struct class_attribute *attr,
3743 const char *buf, size_t size)
3744{
3745 unsigned int val;
3746 ssize_t ret;
3747
3748 /*ret = sscanf(buf, "%d", &val);*/
3749 ret = kstrtoint(buf, 0, &val);
3750 if (ret != 0)
3751 return -EINVAL;
3752 keep_vdec_mem = val;
3753 return size;
3754}
3755
3756static ssize_t keep_vdec_mem_show(struct class *class,
3757 struct class_attribute *attr, char *buf)
3758{
3759 return sprintf(buf, "%d\n", keep_vdec_mem);
3760}
3761
3762#ifdef VDEC_DEBUG_SUPPORT
3763static ssize_t debug_store(struct class *class,
3764 struct class_attribute *attr,
3765 const char *buf, size_t size)
3766{
3767 struct vdec_s *vdec;
3768 struct vdec_core_s *core = vdec_core;
3769 unsigned long flags;
3770
3771 unsigned id;
3772 unsigned val;
3773 ssize_t ret;
3774 char cbuf[32];
3775
3776 cbuf[0] = 0;
3777 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
3778 /*pr_info(
3779 "%s(%s)=>ret %ld: %s, %x, %x\n",
3780 __func__, buf, ret, cbuf, id, val);*/
3781 if (strcmp(cbuf, "schedule") == 0) {
3782 pr_info("VDEC_DEBUG: force schedule\n");
3783 up(&core->sem);
3784 } else if (strcmp(cbuf, "power_off") == 0) {
3785 pr_info("VDEC_DEBUG: power off core %d\n", id);
3786 vdec_poweroff(id);
3787 } else if (strcmp(cbuf, "power_on") == 0) {
3788 pr_info("VDEC_DEBUG: power_on core %d\n", id);
3789 vdec_poweron(id);
3790 } else if (strcmp(cbuf, "wr") == 0) {
3791 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
3792 id, val);
3793 WRITE_VREG(id, val);
3794 } else if (strcmp(cbuf, "rd") == 0) {
3795 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
3796 id, READ_VREG(id));
3797 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
3798 pr_info(
3799 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
3800 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
3801 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
3802 }
3803
3804 flags = vdec_core_lock(vdec_core);
3805
3806 list_for_each_entry(vdec,
3807 &core->connected_vdec_list, list) {
3808 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
3809 if (((vdec->status == VDEC_STATUS_CONNECTED
3810 || vdec->status == VDEC_STATUS_ACTIVE)) &&
3811 (vdec->id == id)) {
3812 /*to add*/
3813 break;
3814 }
3815 }
3816 vdec_core_unlock(vdec_core, flags);
3817 return size;
3818}
3819
3820static ssize_t debug_show(struct class *class,
3821 struct class_attribute *attr, char *buf)
3822{
3823 char *pbuf = buf;
3824 struct vdec_s *vdec;
3825 struct vdec_core_s *core = vdec_core;
3826 unsigned long flags = vdec_core_lock(vdec_core);
3827 u64 tmp;
3828
3829 pbuf += sprintf(pbuf,
3830 "============== help:\n");
3831 pbuf += sprintf(pbuf,
3832 "'echo xxx > debug' usuage:\n");
3833 pbuf += sprintf(pbuf,
3834 "schedule - trigger schedule thread to run\n");
3835 pbuf += sprintf(pbuf,
3836 "power_off core_num - call vdec_poweroff(core_num)\n");
3837 pbuf += sprintf(pbuf,
3838 "power_on core_num - call vdec_poweron(core_num)\n");
3839 pbuf += sprintf(pbuf,
3840 "wr adr val - call WRITE_VREG(adr, val)\n");
3841 pbuf += sprintf(pbuf,
3842 "rd adr - call READ_VREG(adr)\n");
3843 pbuf += sprintf(pbuf,
3844 "read_hevc_clk_reg - read HHI register for hevc clk\n");
3845 pbuf += sprintf(pbuf,
3846 "===================\n");
3847
3848 pbuf += sprintf(pbuf,
3849 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
3850 list_for_each_entry(vdec,
3851 &core->connected_vdec_list, list) {
3852 enum vdec_type_e type;
3853 if ((vdec->status == VDEC_STATUS_CONNECTED
3854 || vdec->status == VDEC_STATUS_ACTIVE)) {
3855 for (type = VDEC_1; type < VDEC_MAX; type++) {
3856 if (vdec->core_mask & (1 << type)) {
3857 pbuf += sprintf(pbuf, "%s(%d):",
3858 vdec->vf_provider_name, type);
3859 pbuf += sprintf(pbuf, "\t%d",
3860 vdec->check_count[type]);
3861 pbuf += sprintf(pbuf, "\t%d",
3862 vdec->run_count[type]);
3863 pbuf += sprintf(pbuf, "\t%d",
3864 vdec->input_underrun_count[type]);
3865 pbuf += sprintf(pbuf, "\t%d",
3866 vdec->not_run_ready_count[type]);
3867 tmp = vdec->run_clk[type] * 100;
3868 do_div(tmp, vdec->total_clk[type]);
3869 pbuf += sprintf(pbuf,
3870 "\t%d%%\n",
3871 vdec->total_clk[type] == 0 ? 0 :
3872 (u32)tmp);
3873 }
3874 }
3875 }
3876 }
3877
3878 vdec_core_unlock(vdec_core, flags);
3879 return pbuf - buf;
3880
3881}
3882#endif
3883
3884/*irq num as same as .dts*/
3885/*
3886 * interrupts = <0 3 1
3887 * 0 23 1
3888 * 0 32 1
3889 * 0 43 1
3890 * 0 44 1
3891 * 0 45 1>;
3892 * interrupt-names = "vsync",
3893 * "demux",
3894 * "parser",
3895 * "mailbox_0",
3896 * "mailbox_1",
3897 * "mailbox_2";
3898 */
3899s32 vdec_request_threaded_irq(enum vdec_irq_num num,
3900 irq_handler_t handler,
3901 irq_handler_t thread_fn,
3902 unsigned long irqflags,
3903 const char *devname, void *dev)
3904{
3905 s32 res_irq;
3906 s32 ret = 0;
3907
3908 if (num >= VDEC_IRQ_MAX) {
3909 pr_err("[%s] request irq error, irq num too big!", __func__);
3910 return -EINVAL;
3911 }
3912
3913 if (vdec_core->isr_context[num].irq < 0) {
3914 res_irq = platform_get_irq(
3915 vdec_core->vdec_core_platform_device, num);
3916 if (res_irq < 0) {
3917 pr_err("[%s] get irq error!", __func__);
3918 return -EINVAL;
3919 }
3920
3921 vdec_core->isr_context[num].irq = res_irq;
3922 vdec_core->isr_context[num].dev_isr = handler;
3923 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3924 vdec_core->isr_context[num].dev_id = dev;
3925
3926 ret = request_threaded_irq(res_irq,
3927 vdec_isr,
3928 vdec_thread_isr,
3929 (thread_fn) ? IRQF_ONESHOT : irqflags,
3930 devname,
3931 &vdec_core->isr_context[num]);
3932
3933 if (ret) {
3934 vdec_core->isr_context[num].irq = -1;
3935 vdec_core->isr_context[num].dev_isr = NULL;
3936 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3937 vdec_core->isr_context[num].dev_id = NULL;
3938
3939 pr_err("vdec irq register error for %s.\n", devname);
3940 return -EIO;
3941 }
3942 } else {
3943 vdec_core->isr_context[num].dev_isr = handler;
3944 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
3945 vdec_core->isr_context[num].dev_id = dev;
3946 }
3947
3948 return ret;
3949}
3950EXPORT_SYMBOL(vdec_request_threaded_irq);
3951
3952s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
3953 const char *devname, void *dev)
3954{
3955 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
3956
3957 return vdec_request_threaded_irq(num,
3958 handler,
3959 NULL,/*no thread_fn*/
3960 IRQF_SHARED,
3961 devname,
3962 dev);
3963}
3964EXPORT_SYMBOL(vdec_request_irq);
3965
3966void vdec_free_irq(enum vdec_irq_num num, void *dev)
3967{
3968 if (num >= VDEC_IRQ_MAX) {
3969 pr_err("[%s] request irq error, irq num too big!", __func__);
3970 return;
3971 }
3972 /*
3973 *assume amrisc is stopped already and there is no mailbox interrupt
3974 * when we reset pointers here.
3975 */
3976 vdec_core->isr_context[num].dev_isr = NULL;
3977 vdec_core->isr_context[num].dev_threaded_isr = NULL;
3978 vdec_core->isr_context[num].dev_id = NULL;
3979 synchronize_irq(vdec_core->isr_context[num].irq);
3980}
3981EXPORT_SYMBOL(vdec_free_irq);
3982
3983struct vdec_s *vdec_get_default_vdec_for_userdata(void)
3984{
3985 struct vdec_s *vdec;
3986 struct vdec_s *ret_vdec;
3987 struct vdec_core_s *core = vdec_core;
3988 unsigned long flags;
3989 int id;
3990
3991 flags = vdec_core_lock(vdec_core);
3992
3993 id = 0x10000000;
3994 ret_vdec = NULL;
3995 if (!list_empty(&core->connected_vdec_list)) {
3996 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
3997 if (vdec->id < id) {
3998 id = vdec->id;
3999 ret_vdec = vdec;
4000 }
4001 }
4002 }
4003
4004 vdec_core_unlock(vdec_core, flags);
4005
4006 return ret_vdec;
4007}
4008EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
4009
4010struct vdec_s *vdec_get_vdec_by_id(int vdec_id)
4011{
4012 struct vdec_s *vdec;
4013 struct vdec_s *ret_vdec;
4014 struct vdec_core_s *core = vdec_core;
4015 unsigned long flags;
4016
4017 flags = vdec_core_lock(vdec_core);
4018
4019 ret_vdec = NULL;
4020 if (!list_empty(&core->connected_vdec_list)) {
4021 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4022 if (vdec->id == vdec_id) {
4023 ret_vdec = vdec;
4024 break;
4025 }
4026 }
4027 }
4028
4029 vdec_core_unlock(vdec_core, flags);
4030
4031 return ret_vdec;
4032}
4033EXPORT_SYMBOL(vdec_get_vdec_by_id);
4034
4035int vdec_read_user_data(struct vdec_s *vdec,
4036 struct userdata_param_t *p_userdata_param)
4037{
4038 int ret = 0;
4039
4040 if (!vdec)
4041 vdec = vdec_get_default_vdec_for_userdata();
4042
4043 if (vdec) {
4044 if (vdec->user_data_read)
4045 ret = vdec->user_data_read(vdec, p_userdata_param);
4046 }
4047 return ret;
4048}
4049EXPORT_SYMBOL(vdec_read_user_data);
4050
4051int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
4052{
4053 if (vdec) {
4054 if (vdec->wakeup_userdata_poll)
4055 vdec->wakeup_userdata_poll(vdec);
4056 }
4057
4058 return 0;
4059}
4060EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
4061
4062void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
4063{
4064 if (!vdec)
4065 vdec = vdec_get_default_vdec_for_userdata();
4066
4067 if (vdec) {
4068 if (vdec->reset_userdata_fifo)
4069 vdec->reset_userdata_fifo(vdec, bInit);
4070 }
4071}
4072EXPORT_SYMBOL(vdec_reset_userdata_fifo);
4073
4074static int dump_mode;
4075static ssize_t dump_risc_mem_store(struct class *class,
4076 struct class_attribute *attr,
4077 const char *buf, size_t size)/*set*/
4078{
4079 unsigned int val;
4080 ssize_t ret;
4081 char dump_mode_str[4] = "PRL";
4082
4083 /*ret = sscanf(buf, "%d", &val);*/
4084 ret = kstrtoint(buf, 0, &val);
4085
4086 if (ret != 0)
4087 return -EINVAL;
4088 dump_mode = val & 0x3;
4089 pr_info("set dump mode to %d,%c_mem\n",
4090 dump_mode, dump_mode_str[dump_mode]);
4091 return size;
4092}
4093static u32 read_amrisc_reg(int reg)
4094{
4095 WRITE_VREG(0x31b, reg);
4096 return READ_VREG(0x31c);
4097}
4098
4099static void dump_pmem(void)
4100{
4101 int i;
4102
4103 WRITE_VREG(0x301, 0x8000);
4104 WRITE_VREG(0x31d, 0);
4105 pr_info("start dump amrisc pmem of risc\n");
4106 for (i = 0; i < 0xfff; i++) {
4107 /*same as .o format*/
4108 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
4109 }
4110}
4111
4112static void dump_lmem(void)
4113{
4114 int i;
4115
4116 WRITE_VREG(0x301, 0x8000);
4117 WRITE_VREG(0x31d, 2);
4118 pr_info("start dump amrisc lmem\n");
4119 for (i = 0; i < 0x3ff; i++) {
4120 /*same as */
4121 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
4122 }
4123}
4124
4125static ssize_t dump_risc_mem_show(struct class *class,
4126 struct class_attribute *attr, char *buf)
4127{
4128 char *pbuf = buf;
4129 int ret;
4130
4131 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4132 mutex_lock(&vdec_mutex);
4133 if (!vdec_on(VDEC_1)) {
4134 mutex_unlock(&vdec_mutex);
4135 pbuf += sprintf(pbuf, "amrisc is power off\n");
4136 ret = pbuf - buf;
4137 return ret;
4138 }
4139 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4140 /*TODO:M6 define */
4141 /*
4142 * switch_mod_gate_by_type(MOD_VDEC, 1);
4143 */
4144 amports_switch_gate("vdec", 1);
4145 }
4146 /*start do**/
4147 switch (dump_mode) {
4148 case 0:
4149 dump_pmem();
4150 break;
4151 case 2:
4152 dump_lmem();
4153 break;
4154 default:
4155 break;
4156 }
4157
4158 /*done*/
4159 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4160 mutex_unlock(&vdec_mutex);
4161 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4162 /*TODO:M6 define */
4163 /*
4164 * switch_mod_gate_by_type(MOD_VDEC, 0);
4165 */
4166 amports_switch_gate("vdec", 0);
4167 }
4168 return sprintf(buf, "done\n");
4169}
4170
4171static ssize_t core_show(struct class *class, struct class_attribute *attr,
4172 char *buf)
4173{
4174 struct vdec_core_s *core = vdec_core;
4175 char *pbuf = buf;
4176
4177 if (list_empty(&core->connected_vdec_list))
4178 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4179 else {
4180 struct vdec_s *vdec;
4181
4182 pbuf += sprintf(pbuf,
4183 " Core: last_sched %p, sched_mask %lx\n",
4184 core->last_vdec,
4185 core->sched_mask);
4186
4187 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4188 pbuf += sprintf(pbuf,
4189 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
4190 vdec->id,
4191 vdec,
4192 vdec_device_name[vdec->format * 2],
4193 vdec_status_str(vdec),
4194 vdec_type_str(vdec),
4195 vdec->active_mask);
4196 }
4197 }
4198
4199 return pbuf - buf;
4200}
4201
4202static ssize_t vdec_status_show(struct class *class,
4203 struct class_attribute *attr, char *buf)
4204{
4205 char *pbuf = buf;
4206 struct vdec_s *vdec;
4207 struct vdec_info vs;
4208 unsigned char vdec_num = 0;
4209 struct vdec_core_s *core = vdec_core;
4210 unsigned long flags = vdec_core_lock(vdec_core);
4211
4212 if (list_empty(&core->connected_vdec_list)) {
4213 pbuf += sprintf(pbuf, "No vdec.\n");
4214 goto out;
4215 }
4216
4217 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4218 if ((vdec->status == VDEC_STATUS_CONNECTED
4219 || vdec->status == VDEC_STATUS_ACTIVE)) {
4220 memset(&vs, 0, sizeof(vs));
4221 if (vdec_status(vdec, &vs)) {
4222 pbuf += sprintf(pbuf, "err.\n");
4223 goto out;
4224 }
4225 pbuf += sprintf(pbuf,
4226 "vdec channel %u statistics:\n",
4227 vdec_num);
4228 pbuf += sprintf(pbuf,
4229 "%13s : %s\n", "device name",
4230 vs.vdec_name);
4231 pbuf += sprintf(pbuf,
4232 "%13s : %u\n", "frame width",
4233 vs.frame_width);
4234 pbuf += sprintf(pbuf,
4235 "%13s : %u\n", "frame height",
4236 vs.frame_height);
4237 pbuf += sprintf(pbuf,
4238 "%13s : %u %s\n", "frame rate",
4239 vs.frame_rate, "fps");
4240 pbuf += sprintf(pbuf,
4241 "%13s : %u %s\n", "bit rate",
4242 vs.bit_rate / 1024 * 8, "kbps");
4243 pbuf += sprintf(pbuf,
4244 "%13s : %u\n", "status",
4245 vs.status);
4246 pbuf += sprintf(pbuf,
4247 "%13s : %u\n", "frame dur",
4248 vs.frame_dur);
4249 pbuf += sprintf(pbuf,
4250 "%13s : %u %s\n", "frame data",
4251 vs.frame_data / 1024, "KB");
4252 pbuf += sprintf(pbuf,
4253 "%13s : %u\n", "frame count",
4254 vs.frame_count);
4255 pbuf += sprintf(pbuf,
4256 "%13s : %u\n", "drop count",
4257 vs.drop_frame_count);
4258 pbuf += sprintf(pbuf,
4259 "%13s : %u\n", "fra err count",
4260 vs.error_frame_count);
4261 pbuf += sprintf(pbuf,
4262 "%13s : %u\n", "hw err count",
4263 vs.error_count);
4264 pbuf += sprintf(pbuf,
4265 "%13s : %llu %s\n", "total data",
4266 vs.total_data / 1024, "KB");
4267 pbuf += sprintf(pbuf,
4268 "%13s : %x\n\n", "ratio_control",
4269 vs.ratio_control);
4270
4271 vdec_num++;
4272 }
4273 }
4274out:
4275 vdec_core_unlock(vdec_core, flags);
4276 return pbuf - buf;
4277}
4278
4279static ssize_t dump_vdec_blocks_show(struct class *class,
4280 struct class_attribute *attr, char *buf)
4281{
4282 struct vdec_core_s *core = vdec_core;
4283 char *pbuf = buf;
4284 unsigned long flags = vdec_core_lock(vdec_core);
4285
4286 if (list_empty(&core->connected_vdec_list))
4287 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4288 else {
4289 struct vdec_s *vdec;
4290 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4291 pbuf += vdec_input_dump_blocks(&vdec->input,
4292 pbuf, PAGE_SIZE - (pbuf - buf));
4293 }
4294 }
4295 vdec_core_unlock(vdec_core, flags);
4296
4297 return pbuf - buf;
4298}
4299static ssize_t dump_vdec_chunks_show(struct class *class,
4300 struct class_attribute *attr, char *buf)
4301{
4302 struct vdec_core_s *core = vdec_core;
4303 char *pbuf = buf;
4304 unsigned long flags = vdec_core_lock(vdec_core);
4305
4306 if (list_empty(&core->connected_vdec_list))
4307 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4308 else {
4309 struct vdec_s *vdec;
4310 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4311 pbuf += vdec_input_dump_chunks(vdec->id, &vdec->input,
4312 pbuf, PAGE_SIZE - (pbuf - buf));
4313 }
4314 }
4315 vdec_core_unlock(vdec_core, flags);
4316
4317 return pbuf - buf;
4318}
4319
4320static ssize_t dump_decoder_state_show(struct class *class,
4321 struct class_attribute *attr, char *buf)
4322{
4323 char *pbuf = buf;
4324 struct vdec_s *vdec;
4325 struct vdec_core_s *core = vdec_core;
4326 unsigned long flags = vdec_core_lock(vdec_core);
4327
4328 if (list_empty(&core->connected_vdec_list)) {
4329 pbuf += sprintf(pbuf, "No vdec.\n");
4330 } else {
4331 list_for_each_entry(vdec,
4332 &core->connected_vdec_list, list) {
4333 if ((vdec->status == VDEC_STATUS_CONNECTED
4334 || vdec->status == VDEC_STATUS_ACTIVE)
4335 && vdec->dump_state)
4336 vdec->dump_state(vdec);
4337 }
4338 }
4339 vdec_core_unlock(vdec_core, flags);
4340
4341 return pbuf - buf;
4342}
4343
4344static ssize_t dump_fps_show(struct class *class,
4345 struct class_attribute *attr, char *buf)
4346{
4347 char *pbuf = buf;
4348 struct vdec_core_s *core = vdec_core;
4349 int i;
4350
4351 unsigned long flags = vdec_fps_lock(vdec_core);
4352 for (i = 0; i < MAX_INSTANCE_MUN; i++)
4353 pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps);
4354
4355 pbuf += sprintf(pbuf, "\n");
4356 vdec_fps_unlock(vdec_core, flags);
4357
4358 return pbuf - buf;
4359}
4360
4361static CLASS_ATTR_RO(amrisc_regs);
4362static CLASS_ATTR_RO(dump_trace);
4363static CLASS_ATTR_RO(clock_level);
4364static CLASS_ATTR_RW(poweron_clock_level);
4365static CLASS_ATTR_RW(dump_risc_mem);
4366static CLASS_ATTR_RW(keep_vdec_mem);
4367static CLASS_ATTR_RW(enable_mvdec_info);
4368static CLASS_ATTR_RO(core);
4369static CLASS_ATTR_RO(vdec_status);
4370static CLASS_ATTR_RO(dump_vdec_blocks);
4371static CLASS_ATTR_RO(dump_vdec_chunks);
4372static CLASS_ATTR_RO(dump_decoder_state);
4373#ifdef VDEC_DEBUG_SUPPORT
4374static CLASS_ATTR_RW(debug);
4375#endif
4376#ifdef FRAME_CHECK
4377static CLASS_ATTR_RW(dump_yuv);
4378static CLASS_ATTR_RW(frame_check);
4379#endif
4380static CLASS_ATTR_RO(dump_fps);
4381
4382static struct attribute *vdec_class_attrs[] = {
4383 &class_attr_amrisc_regs.attr,
4384 &class_attr_dump_trace.attr,
4385 &class_attr_clock_level.attr,
4386 &class_attr_poweron_clock_level.attr,
4387 &class_attr_dump_risc_mem.attr,
4388 &class_attr_keep_vdec_mem.attr,
4389 &class_attr_enable_mvdec_info.attr,
4390 &class_attr_core.attr,
4391 &class_attr_vdec_status.attr,
4392 &class_attr_dump_vdec_blocks.attr,
4393 &class_attr_dump_vdec_chunks.attr,
4394 &class_attr_dump_decoder_state.attr,
4395#ifdef VDEC_DEBUG_SUPPORT
4396 &class_attr_debug.attr,
4397#endif
4398#ifdef FRAME_CHECK
4399 &class_attr_dump_yuv.attr,
4400 &class_attr_frame_check.attr,
4401#endif
4402 &class_attr_dump_fps.attr,
4403 NULL
4404};
4405
4406ATTRIBUTE_GROUPS(vdec_class);
4407
4408static struct class vdec_class = {
4409 .name = "vdec",
4410 .class_groups = vdec_class_groups,
4411};
4412
4413struct device *get_vdec_device(void)
4414{
4415 return &vdec_core->vdec_core_platform_device->dev;
4416}
4417EXPORT_SYMBOL(get_vdec_device);
4418
4419static int vdec_probe(struct platform_device *pdev)
4420{
4421 s32 i, r;
4422
4423 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
4424 sizeof(struct vdec_core_s), GFP_KERNEL);
4425 if (vdec_core == NULL) {
4426 pr_err("vdec core allocation failed.\n");
4427 return -ENOMEM;
4428 }
4429
4430 atomic_set(&vdec_core->vdec_nr, 0);
4431 sema_init(&vdec_core->sem, 1);
4432
4433 r = class_register(&vdec_class);
4434 if (r) {
4435 pr_info("vdec class create fail.\n");
4436 return r;
4437 }
4438
4439 vdec_core->vdec_core_platform_device = pdev;
4440
4441 platform_set_drvdata(pdev, vdec_core);
4442
4443 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4444 vdec_core->isr_context[i].index = i;
4445 vdec_core->isr_context[i].irq = -1;
4446 }
4447
4448 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
4449 IRQF_ONESHOT, "vdec-0", NULL);
4450 if (r < 0) {
4451 pr_err("vdec interrupt request failed\n");
4452 return r;
4453 }
4454
4455 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
4456 IRQF_ONESHOT, "vdec-1", NULL);
4457 if (r < 0) {
4458 pr_err("vdec interrupt request failed\n");
4459 return r;
4460 }
4461#if 0
4462 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
4463 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
4464 IRQF_ONESHOT, "vdec-hevc_back", NULL);
4465 if (r < 0) {
4466 pr_err("vdec interrupt request failed\n");
4467 return r;
4468 }
4469 }
4470#endif
4471 r = of_reserved_mem_device_init(&pdev->dev);
4472 if (r == 0)
4473 pr_info("vdec_probe done\n");
4474
4475 vdec_core->cma_dev = &pdev->dev;
4476
4477 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
4478 /* default to 250MHz */
4479 vdec_clock_hi_enable();
4480 }
4481
4482 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
4483 /* set vdec dmc request to urgent */
4484 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
4485 }
4486 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
4487 spin_lock_init(&vdec_core->lock);
4488 spin_lock_init(&vdec_core->canvas_lock);
4489 spin_lock_init(&vdec_core->fps_lock);
4490 spin_lock_init(&vdec_core->input_lock);
4491 ida_init(&vdec_core->ida);
4492 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
4493 "vdec-core");
4494
4495 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
4496 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
4497 /*work queue priority lower than vdec-core.*/
4498
4499 /* power manager init. */
4500 vdec_core->pm = (struct power_manager_s *)
4501 of_device_get_match_data(&pdev->dev);
4502 if (vdec_core->pm->init) {
4503 r = vdec_core->pm->init(&pdev->dev);
4504 if (r) {
4505 pr_err("vdec power manager init failed\n");
4506 return r;
4507 }
4508 }
4509
4510 return 0;
4511}
4512
4513static int vdec_remove(struct platform_device *pdev)
4514{
4515 int i;
4516
4517 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4518 if (vdec_core->isr_context[i].irq >= 0) {
4519 free_irq(vdec_core->isr_context[i].irq,
4520 &vdec_core->isr_context[i]);
4521 vdec_core->isr_context[i].irq = -1;
4522 vdec_core->isr_context[i].dev_isr = NULL;
4523 vdec_core->isr_context[i].dev_threaded_isr = NULL;
4524 vdec_core->isr_context[i].dev_id = NULL;
4525 }
4526 }
4527
4528 kthread_stop(vdec_core->thread);
4529
4530 destroy_workqueue(vdec_core->vdec_core_wq);
4531
4532 if (vdec_core->pm->release)
4533 vdec_core->pm->release(&pdev->dev);
4534
4535 class_unregister(&vdec_class);
4536
4537 return 0;
4538}
4539
4540static struct mconfig vdec_configs[] = {
4541 MC_PU32("debug_trace_num", &debug_trace_num),
4542 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
4543 MC_PU32("clk_config", &clk_config),
4544 MC_PI32("step_mode", &step_mode),
4545 MC_PI32("poweron_clock_level", &poweron_clock_level),
4546};
4547static struct mconfig_node vdec_node;
4548
4549extern const struct of_device_id amlogic_vdec_matches[];
4550
4551static struct platform_driver vdec_driver = {
4552 .probe = vdec_probe,
4553 .remove = vdec_remove,
4554 .driver = {
4555 .name = "vdec",
4556 .of_match_table = amlogic_vdec_matches,
4557 }
4558};
4559
4560static struct codec_profile_t amvdec_input_profile = {
4561 .name = "vdec_input",
4562 .profile = "drm_framemode"
4563};
4564
4565int vdec_module_init(void)
4566{
4567 if (platform_driver_register(&vdec_driver)) {
4568 pr_info("failed to register vdec module\n");
4569 return -ENODEV;
4570 }
4571 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4572 "vdec", vdec_configs, CONFIG_FOR_RW);
4573 vcodec_profile_register(&amvdec_input_profile);
4574 return 0;
4575}
4576EXPORT_SYMBOL(vdec_module_init);
4577
4578void vdec_module_exit(void)
4579{
4580 platform_driver_unregister(&vdec_driver);
4581}
4582EXPORT_SYMBOL(vdec_module_exit);
4583
4584#if 0
4585static int __init vdec_module_init(void)
4586{
4587 if (platform_driver_register(&vdec_driver)) {
4588 pr_info("failed to register vdec module\n");
4589 return -ENODEV;
4590 }
4591 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4592 "vdec", vdec_configs, CONFIG_FOR_RW);
4593 return 0;
4594}
4595
4596static void __exit vdec_module_exit(void)
4597{
4598 platform_driver_unregister(&vdec_driver);
4599}
4600#endif
4601
4602static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4603{
4604 vdec_core->cma_dev = dev;
4605
4606 return 0;
4607}
4608
4609static const struct reserved_mem_ops rmem_vdec_ops = {
4610 .device_init = vdec_mem_device_init,
4611};
4612
4613static int __init vdec_mem_setup(struct reserved_mem *rmem)
4614{
4615 rmem->ops = &rmem_vdec_ops;
4616 pr_info("vdec: reserved mem setup\n");
4617
4618 return 0;
4619}
4620
4621
4622void vdec_set_vframe_comm(struct vdec_s *vdec, char *n)
4623{
4624 struct vdec_frames_s *mvfrm = vdec->mvfrm;
4625
4626 if (!mvfrm)
4627 return;
4628
4629 mvfrm->comm.vdec_id = vdec->id;
4630
4631 snprintf(mvfrm->comm.vdec_name, sizeof(mvfrm->comm.vdec_name)-1,
4632 "%s", n);
4633 mvfrm->comm.vdec_type = vdec->type;
4634}
4635EXPORT_SYMBOL(vdec_set_vframe_comm);
4636
4637void vdec_fill_vdec_frame(struct vdec_s *vdec, struct vframe_qos_s *vframe_qos,
4638 struct vdec_info *vinfo,struct vframe_s *vf,
4639 u32 hw_dec_time)
4640{
4641 u32 i;
4642 struct vframe_counter_s *fifo_buf;
4643 struct vdec_frames_s *mvfrm = vdec->mvfrm;
4644
4645 if (!mvfrm)
4646 return;
4647 fifo_buf = mvfrm->fifo_buf;
4648
4649 /* assume fps==60,mv->wr max value can support system running 828 days,
4650 this is enough for us */
4651 i = mvfrm->wr & (NUM_FRAME_VDEC-1); //find the slot num in fifo_buf
4652 mvfrm->fifo_buf[i].decode_time_cost = hw_dec_time;
4653 if (vframe_qos)
4654 memcpy(&fifo_buf[i].qos, vframe_qos, sizeof(struct vframe_qos_s));
4655 if (vinfo) {
4656 memcpy(&fifo_buf[i].frame_width, &vinfo->frame_width,
4657 ((char*)&vinfo->reserved[0] - (char*)&vinfo->frame_width));
4658 }
4659 if (vf) {
4660 fifo_buf[i].vf_type = vf->type;
4661 fifo_buf[i].signal_type = vf->signal_type;
4662 fifo_buf[i].pts = vf->pts;
4663 fifo_buf[i].pts_us64 = vf->pts_us64;
4664 }
4665 mvfrm->wr++;
4666}
4667EXPORT_SYMBOL(vdec_fill_vdec_frame);
4668
4669/* In this function,if we use copy_to_user, we may encounter sleep,
4670which may block the vdec_fill_vdec_frame,this is not acceptable.
4671So, we should use a tmp buffer(passed by caller) to get the content */
4672u32 vdec_get_frame_vdec(struct vdec_s *vdec, struct vframe_counter_s *tmpbuf)
4673{
4674 u32 toread = 0;
4675 u32 slot_rd;
4676 struct vframe_counter_s *fifo_buf = NULL;
4677 struct vdec_frames_s *mvfrm = NULL;
4678
4679 /*
4680 switch (version) {
4681 case version_1:
4682 f1();
4683 case version_2:
4684 f2();
4685 default:
4686 break;
4687 }
4688 */
4689
4690 if (!vdec)
4691 return 0;
4692 mvfrm = vdec->mvfrm;
4693 if (!mvfrm)
4694 return 0;
4695
4696 fifo_buf = &mvfrm->fifo_buf[0];
4697
4698 toread = mvfrm->wr - mvfrm->rd;
4699 if (toread) {
4700 if (toread >= NUM_FRAME_VDEC - QOS_FRAME_NUM) {
4701 /* round the fifo_buf length happens, give QOS_FRAME_NUM for buffer */
4702 mvfrm->rd = mvfrm->wr - (NUM_FRAME_VDEC - QOS_FRAME_NUM);
4703 }
4704
4705 if (toread >= QOS_FRAME_NUM) {
4706 toread = QOS_FRAME_NUM; //by default, we use this num
4707 }
4708
4709 slot_rd = mvfrm->rd &( NUM_FRAME_VDEC-1); //In this case it equals to x%y
4710 if (slot_rd + toread <= NUM_FRAME_VDEC) {
4711 memcpy(tmpbuf, &fifo_buf[slot_rd], toread*sizeof(struct vframe_counter_s));
4712 } else {
4713 u32 exeed;
4714 exeed = slot_rd + toread - NUM_FRAME_VDEC;
4715 memcpy(tmpbuf, &fifo_buf[slot_rd], (NUM_FRAME_VDEC - slot_rd)*sizeof(struct vframe_counter_s));
4716 memcpy(&tmpbuf[NUM_FRAME_VDEC-slot_rd], &fifo_buf[0], exeed*sizeof(struct vframe_counter_s));
4717 }
4718
4719 mvfrm->rd += toread;
4720 }
4721 return toread;
4722}
4723EXPORT_SYMBOL(vdec_get_frame_vdec);
4724
4725
4726RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
4727/*
4728uint force_hevc_clock_cntl;
4729EXPORT_SYMBOL(force_hevc_clock_cntl);
4730
4731module_param(force_hevc_clock_cntl, uint, 0664);
4732*/
4733module_param(debug, uint, 0664);
4734module_param(debug_trace_num, uint, 0664);
4735module_param(hevc_max_reset_count, int, 0664);
4736module_param(clk_config, uint, 0664);
4737module_param(step_mode, int, 0664);
4738module_param(debugflags, int, 0664);
4739module_param(parallel_decode, int, 0664);
4740module_param(fps_detection, int, 0664);
4741module_param(fps_clear, int, 0664);
4742module_param(force_nosecure_even_drm, int, 0664);
4743module_param(disable_switch_single_to_mult, int, 0664);
4744
4745module_param(frameinfo_flag, int, 0664);
4746MODULE_PARM_DESC(frameinfo_flag,
4747 "\n frameinfo_flag\n");
4748module_param(v4lvideo_add_di, int, 0664);
4749MODULE_PARM_DESC(v4lvideo_add_di,
4750 "\n v4lvideo_add_di\n");
4751
4752module_param(max_di_instance, int, 0664);
4753MODULE_PARM_DESC(max_di_instance,
4754 "\n max_di_instance\n");
4755
4756/*
4757*module_init(vdec_module_init);
4758*module_exit(vdec_module_exit);
4759*/
4760#define CREATE_TRACE_POINTS
4761#include "vdec_trace.h"
4762MODULE_DESCRIPTION("AMLOGIC vdec driver");
4763MODULE_LICENSE("GPL");
4764MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
4765