summaryrefslogtreecommitdiff
path: root/drivers/frame_provider/decoder/utils/vdec.c (plain)
blob: 3d3ececb86d3b5012b9d808522f1f58014cb5f6f
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 */
17#define DEBUG
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/video_sink/v4lvideo_ext.h>
37#include <linux/amlogic/media/vfm/vfm_ext.h>
38/*for VDEC_DEBUG_SUPPORT*/
39#include <linux/time.h>
40
41#include <linux/amlogic/media/utils/vdec_reg.h>
42#include "vdec.h"
43#include "vdec_trace.h"
44#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
45#include "vdec_profile.h"
46#endif
47#include <linux/of.h>
48#include <linux/of_fdt.h>
49#include <linux/libfdt_env.h>
50#include <linux/of_reserved_mem.h>
51#include <linux/dma-contiguous.h>
52#include <linux/cma.h>
53#include <linux/module.h>
54#include <linux/slab.h>
55#include <linux/dma-mapping.h>
56#include <linux/dma-contiguous.h>
57#include "../../../stream_input/amports/amports_priv.h"
58
59#include <linux/amlogic/media/utils/amports_config.h>
60#include "../utils/amvdec.h"
61#include "vdec_input.h"
62
63#include "../../../common/media_clock/clk/clk.h"
64#include <linux/reset.h>
65#include <linux/amlogic/cpu_version.h>
66#include <linux/amlogic/media/codec_mm/codec_mm.h>
67#include <linux/amlogic/media/video_sink/video_keeper.h>
68#include <linux/amlogic/media/codec_mm/configs.h>
69#include <linux/amlogic/media/frame_sync/ptsserv.h>
70#include "secprot.h"
71#include "../../../common/chips/decoder_cpu_ver_info.h"
72#include "frame_check.h"
73
74#ifdef CONFIG_AMLOGIC_POWER
75#include <linux/amlogic/power_ctrl.h>
76#endif
77
78static DEFINE_MUTEX(vdec_mutex);
79
80#define MC_SIZE (4096 * 4)
81#define CMA_ALLOC_SIZE SZ_64M
82#define MEM_NAME "vdec_prealloc"
83static int inited_vcodec_num;
84#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
85static int poweron_clock_level;
86static int keep_vdec_mem;
87static unsigned int debug_trace_num = 16 * 20;
88static int step_mode;
89static unsigned int clk_config;
90/*
91 &1: sched_priority to MAX_RT_PRIO -1.
92 &2: always reload firmware.
93 &4: vdec canvas debug enable
94 */
95static unsigned int debug;
96
97static int hevc_max_reset_count;
98
99static int no_powerdown;
100static int parallel_decode = 1;
101static int fps_detection;
102static int fps_clear;
103
104
105static int force_nosecure_even_drm;
106static int disable_switch_single_to_mult;
107
108static DEFINE_SPINLOCK(vdec_spin_lock);
109
110#define HEVC_TEST_LIMIT 100
111#define GXBB_REV_A_MINOR 0xA
112
113#define PRINT_FRAME_INFO 1
114#define DISABLE_FRAME_INFO 2
115
116static int frameinfo_flag = 0;
117static int v4lvideo_add_di = 1;
118static int max_di_instance = 1;
119
120//static int path_debug = 0;
121
122static struct vframe_qos_s *frame_info_buf_in = NULL;
123static struct vframe_qos_s *frame_info_buf_out = NULL;
124static int frame_qos_wr = 0;
125static int frame_qos_rd = 0;
126int decode_underflow = 0;
127
128#define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1)
129
130struct am_reg {
131 char *name;
132 int offset;
133};
134
135struct vdec_isr_context_s {
136 int index;
137 int irq;
138 irq_handler_t dev_isr;
139 irq_handler_t dev_threaded_isr;
140 void *dev_id;
141 struct vdec_s *vdec;
142};
143
144struct decode_fps_s {
145 u32 frame_count;
146 u64 start_timestamp;
147 u64 last_timestamp;
148 u32 fps;
149};
150
151struct vdec_core_s {
152 struct list_head connected_vdec_list;
153 spinlock_t lock;
154 spinlock_t canvas_lock;
155 spinlock_t fps_lock;
156 spinlock_t input_lock;
157 struct ida ida;
158 atomic_t vdec_nr;
159 struct vdec_s *vfm_vdec;
160 struct vdec_s *active_vdec;
161 struct vdec_s *active_hevc;
162 struct vdec_s *hint_fr_vdec;
163 struct platform_device *vdec_core_platform_device;
164 struct device *cma_dev;
165 struct semaphore sem;
166 struct task_struct *thread;
167 struct workqueue_struct *vdec_core_wq;
168
169 unsigned long sched_mask;
170 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
171 int power_ref_count[VDEC_MAX];
172 struct vdec_s *last_vdec;
173 int parallel_dec;
174 unsigned long power_ref_mask;
175 int vdec_combine_flag;
176 struct decode_fps_s decode_fps[MAX_INSTANCE_MUN];
177 unsigned long buff_flag;
178 unsigned long stream_buff_flag;
179};
180
181struct canvas_status_s {
182 int type;
183 int canvas_used_flag;
184 int id;
185};
186
187
188static struct vdec_core_s *vdec_core;
189
190static const char * const vdec_status_string[] = {
191 "VDEC_STATUS_UNINITIALIZED",
192 "VDEC_STATUS_DISCONNECTED",
193 "VDEC_STATUS_CONNECTED",
194 "VDEC_STATUS_ACTIVE"
195};
196
197static int debugflags;
198
199static struct canvas_status_s canvas_stat[AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1];
200
201
202int vdec_get_debug_flags(void)
203{
204 return debugflags;
205}
206EXPORT_SYMBOL(vdec_get_debug_flags);
207
208unsigned char is_mult_inc(unsigned int type)
209{
210 unsigned char ret = 0;
211 if (vdec_get_debug_flags() & 0xf000)
212 ret = (vdec_get_debug_flags() & 0x1000)
213 ? 1 : 0;
214 else if (type & PORT_TYPE_DECODER_SCHED)
215 ret = 1;
216 return ret;
217}
218EXPORT_SYMBOL(is_mult_inc);
219
220static const bool cores_with_input[VDEC_MAX] = {
221 true, /* VDEC_1 */
222 false, /* VDEC_HCODEC */
223 false, /* VDEC_2 */
224 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
225 false, /* VDEC_HEVC_BACK */
226};
227
228static const int cores_int[VDEC_MAX] = {
229 VDEC_IRQ_1,
230 VDEC_IRQ_2,
231 VDEC_IRQ_0,
232 VDEC_IRQ_0,
233 VDEC_IRQ_HEVC_BACK
234};
235
236unsigned long vdec_canvas_lock(struct vdec_core_s *core)
237{
238 unsigned long flags;
239 spin_lock_irqsave(&core->canvas_lock, flags);
240
241 return flags;
242}
243
244void vdec_canvas_unlock(struct vdec_core_s *core, unsigned long flags)
245{
246 spin_unlock_irqrestore(&core->canvas_lock, flags);
247}
248
249unsigned long vdec_fps_lock(struct vdec_core_s *core)
250{
251 unsigned long flags;
252 spin_lock_irqsave(&core->fps_lock, flags);
253
254 return flags;
255}
256
257void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags)
258{
259 spin_unlock_irqrestore(&core->fps_lock, flags);
260}
261
262unsigned long vdec_core_lock(struct vdec_core_s *core)
263{
264 unsigned long flags;
265
266 spin_lock_irqsave(&core->lock, flags);
267
268 return flags;
269}
270
271void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
272{
273 spin_unlock_irqrestore(&core->lock, flags);
274}
275
276unsigned long vdec_inputbuff_lock(struct vdec_core_s *core)
277{
278 unsigned long flags;
279
280 spin_lock_irqsave(&core->input_lock, flags);
281
282 return flags;
283}
284
285void vdec_inputbuff_unlock(struct vdec_core_s *core, unsigned long flags)
286{
287 spin_unlock_irqrestore(&core->input_lock, flags);
288}
289
290
291static bool vdec_is_input_frame_empty(struct vdec_s *vdec) {
292 struct vdec_core_s *core = vdec_core;
293 bool ret;
294 unsigned long flags;
295
296 flags = vdec_inputbuff_lock(core);
297 ret = !(vdec->core_mask & core->buff_flag);
298 vdec_inputbuff_unlock(core, flags);
299
300 return ret;
301}
302
303static void vdec_up(struct vdec_s *vdec)
304{
305 struct vdec_core_s *core = vdec_core;
306
307 if (debug & 8)
308 pr_info("vdec_up, id:%d\n", vdec->id);
309 up(&core->sem);
310}
311
312
313static u64 vdec_get_us_time_system(void)
314{
315 struct timeval tv;
316
317 do_gettimeofday(&tv);
318
319 return div64_u64(timeval_to_ns(&tv), 1000);
320}
321
322static void vdec_fps_clear(int id)
323{
324 if (id >= MAX_INSTANCE_MUN)
325 return;
326
327 vdec_core->decode_fps[id].frame_count = 0;
328 vdec_core->decode_fps[id].start_timestamp = 0;
329 vdec_core->decode_fps[id].last_timestamp = 0;
330 vdec_core->decode_fps[id].fps = 0;
331}
332
333static void vdec_fps_clearall(void)
334{
335 int i;
336
337 for (i = 0; i < MAX_INSTANCE_MUN; i++) {
338 vdec_core->decode_fps[i].frame_count = 0;
339 vdec_core->decode_fps[i].start_timestamp = 0;
340 vdec_core->decode_fps[i].last_timestamp = 0;
341 vdec_core->decode_fps[i].fps = 0;
342 }
343}
344
345static void vdec_fps_detec(int id)
346{
347 unsigned long flags;
348
349 if (fps_detection == 0)
350 return;
351
352 if (id >= MAX_INSTANCE_MUN)
353 return;
354
355 flags = vdec_fps_lock(vdec_core);
356
357 if (fps_clear == 1) {
358 vdec_fps_clearall();
359 fps_clear = 0;
360 }
361
362 vdec_core->decode_fps[id].frame_count++;
363 if (vdec_core->decode_fps[id].frame_count == 1) {
364 vdec_core->decode_fps[id].start_timestamp =
365 vdec_get_us_time_system();
366 vdec_core->decode_fps[id].last_timestamp =
367 vdec_core->decode_fps[id].start_timestamp;
368 } else {
369 vdec_core->decode_fps[id].last_timestamp =
370 vdec_get_us_time_system();
371 vdec_core->decode_fps[id].fps =
372 (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) *
373 10000000000),
374 (vdec_core->decode_fps[id].last_timestamp -
375 vdec_core->decode_fps[id].start_timestamp));
376 }
377 vdec_fps_unlock(vdec_core, flags);
378}
379
380
381
382static int get_canvas(unsigned int index, unsigned int base)
383{
384 int start;
385 int canvas_index = index * base;
386 int ret;
387
388 if ((base > 4) || (base == 0))
389 return -1;
390
391 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
392 <= AMVDEC_CANVAS_MAX1) {
393 start = AMVDEC_CANVAS_START_INDEX + base * index;
394 } else {
395 canvas_index -= (AMVDEC_CANVAS_MAX1 -
396 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
397 if (canvas_index <= AMVDEC_CANVAS_MAX2)
398 start = canvas_index / base;
399 else
400 return -1;
401 }
402
403 if (base == 1) {
404 ret = start;
405 } else if (base == 2) {
406 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
407 } else if (base == 3) {
408 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
409 } else if (base == 4) {
410 ret = (((start + 3) << 24) | (start + 2) << 16) |
411 ((start + 1) << 8) | start;
412 }
413
414 return ret;
415}
416
417static int get_canvas_ex(int type, int id)
418{
419 int i;
420 unsigned long flags;
421
422 flags = vdec_canvas_lock(vdec_core);
423
424 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
425 /*0x10-0x15 has been used by rdma*/
426 if ((i >= 0x10) && (i <= 0x15))
427 continue;
428 if ((canvas_stat[i].type == type) &&
429 (canvas_stat[i].id & (1 << id)) == 0) {
430 canvas_stat[i].canvas_used_flag++;
431 canvas_stat[i].id |= (1 << id);
432 if (debug & 4)
433 pr_debug("get used canvas %d\n", i);
434 vdec_canvas_unlock(vdec_core, flags);
435 if (i < AMVDEC_CANVAS_MAX2 + 1)
436 return i;
437 else
438 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
439 }
440 }
441
442 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
443 /*0x10-0x15 has been used by rdma*/
444 if ((i >= 0x10) && (i <= 0x15))
445 continue;
446 if (canvas_stat[i].type == 0) {
447 canvas_stat[i].type = type;
448 canvas_stat[i].canvas_used_flag = 1;
449 canvas_stat[i].id = (1 << id);
450 if (debug & 4) {
451 pr_debug("get canvas %d\n", i);
452 pr_debug("canvas_used_flag %d\n",
453 canvas_stat[i].canvas_used_flag);
454 pr_debug("canvas_stat[i].id %d\n",
455 canvas_stat[i].id);
456 }
457 vdec_canvas_unlock(vdec_core, flags);
458 if (i < AMVDEC_CANVAS_MAX2 + 1)
459 return i;
460 else
461 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
462 }
463 }
464 vdec_canvas_unlock(vdec_core, flags);
465
466 pr_info("cannot get canvas\n");
467
468 return -1;
469}
470
471static void free_canvas_ex(int index, int id)
472{
473 unsigned long flags;
474 int offset;
475
476 flags = vdec_canvas_lock(vdec_core);
477 if (index >= 0 &&
478 index < AMVDEC_CANVAS_MAX2 + 1)
479 offset = index;
480 else if ((index >= AMVDEC_CANVAS_START_INDEX) &&
481 (index <= AMVDEC_CANVAS_MAX1))
482 offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX;
483 else {
484 vdec_canvas_unlock(vdec_core, flags);
485 return;
486 }
487
488 if ((canvas_stat[offset].canvas_used_flag > 0) &&
489 (canvas_stat[offset].id & (1 << id))) {
490 canvas_stat[offset].canvas_used_flag--;
491 canvas_stat[offset].id &= ~(1 << id);
492 if (canvas_stat[offset].canvas_used_flag == 0) {
493 canvas_stat[offset].type = 0;
494 canvas_stat[offset].id = 0;
495 }
496 if (debug & 4) {
497 pr_debug("free index %d used_flag %d, type = %d, id = %d\n",
498 offset,
499 canvas_stat[offset].canvas_used_flag,
500 canvas_stat[offset].type,
501 canvas_stat[offset].id);
502 }
503 }
504 vdec_canvas_unlock(vdec_core, flags);
505
506 return;
507
508}
509
510static void vdec_dmc_pipeline_reset(void)
511{
512 /*
513 * bit15: vdec_piple
514 * bit14: hevc_dmc_piple
515 * bit13: hevcf_dmc_pipl
516 * bit12: wave420_dmc_pipl
517 * bit11: hcodec_dmc_pipl
518 */
519
520 WRITE_RESET_REG(RESET7_REGISTER,
521 (1 << 15) | (1 << 14) | (1 << 13) |
522 (1 << 12) | (1 << 11));
523}
524
525static void vdec_stop_armrisc(int hw)
526{
527 ulong timeout = jiffies + HZ;
528
529 if (hw == VDEC_INPUT_TARGET_VLD) {
530 WRITE_VREG(MPSR, 0);
531 WRITE_VREG(CPSR, 0);
532
533 while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
534 if (time_after(jiffies, timeout))
535 break;
536 }
537
538 timeout = jiffies + HZ;
539 while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) {
540 if (time_after(jiffies, timeout))
541 break;
542 }
543 } else if (hw == VDEC_INPUT_TARGET_HEVC) {
544 WRITE_VREG(HEVC_MPSR, 0);
545 WRITE_VREG(HEVC_CPSR, 0);
546
547 while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) {
548 if (time_after(jiffies, timeout))
549 break;
550 }
551
552 timeout = jiffies + HZ/10;
553 while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) {
554 if (time_after(jiffies, timeout))
555 break;
556 }
557 }
558}
559
560static void vdec_disable_DMC(struct vdec_s *vdec)
561{
562 /*close first,then wait pedding end,timing suggestion from vlsi*/
563 struct vdec_input_s *input = &vdec->input;
564 unsigned long flags;
565 unsigned int mask = 0;
566
567 if (input->target == VDEC_INPUT_TARGET_VLD) {
568 mask = (1 << 13);
569 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
570 mask = (1 << 21);
571 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
572 mask = (1 << 4); /*hevc*/
573 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
574 mask |= (1 << 8); /*hevcb */
575 }
576
577 /* need to stop armrisc. */
578 if (!IS_ERR_OR_NULL(vdec->dev))
579 vdec_stop_armrisc(input->target);
580
581 spin_lock_irqsave(&vdec_spin_lock, flags);
582 codec_dmcbus_write(DMC_REQ_CTRL,
583 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
584 spin_unlock_irqrestore(&vdec_spin_lock, flags);
585
586 while (!(codec_dmcbus_read(DMC_CHAN_STS)
587 & mask))
588 ;
589
590 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
591}
592
593static void vdec_enable_DMC(struct vdec_s *vdec)
594{
595 struct vdec_input_s *input = &vdec->input;
596 unsigned long flags;
597 unsigned int mask = 0;
598
599 if (input->target == VDEC_INPUT_TARGET_VLD) {
600 mask = (1 << 13);
601 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
602 mask = (1 << 21);
603 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
604 mask = (1 << 4); /*hevc*/
605 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
606 mask |= (1 << 8); /*hevcb */
607 }
608
609 /*must to be reset the dmc pipeline if it's g12b.*/
610 if (get_cpu_type() == AM_MESON_CPU_MAJOR_ID_G12B)
611 vdec_dmc_pipeline_reset();
612
613 spin_lock_irqsave(&vdec_spin_lock, flags);
614 codec_dmcbus_write(DMC_REQ_CTRL,
615 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
616 spin_unlock_irqrestore(&vdec_spin_lock, flags);
617 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
618}
619
620
621
622static int vdec_get_hw_type(int value)
623{
624 int type;
625 switch (value) {
626 case VFORMAT_HEVC:
627 case VFORMAT_VP9:
628 case VFORMAT_AVS2:
629 type = CORE_MASK_HEVC;
630 break;
631
632 case VFORMAT_MPEG12:
633 case VFORMAT_MPEG4:
634 case VFORMAT_H264:
635 case VFORMAT_MJPEG:
636 case VFORMAT_REAL:
637 case VFORMAT_JPEG:
638 case VFORMAT_VC1:
639 case VFORMAT_AVS:
640 case VFORMAT_YUV:
641 case VFORMAT_H264MVC:
642 case VFORMAT_H264_4K2K:
643 case VFORMAT_H264_ENC:
644 case VFORMAT_JPEG_ENC:
645 type = CORE_MASK_VDEC_1;
646 break;
647
648 default:
649 type = -1;
650 }
651
652 return type;
653}
654
655
656static void vdec_save_active_hw(struct vdec_s *vdec)
657{
658 int type;
659
660 type = vdec_get_hw_type(vdec->port->vformat);
661
662 if (type == CORE_MASK_HEVC) {
663 vdec_core->active_hevc = vdec;
664 } else if (type == CORE_MASK_VDEC_1) {
665 vdec_core->active_vdec = vdec;
666 } else {
667 pr_info("save_active_fw wrong\n");
668 }
669}
670
671static void vdec_update_buff_status(void)
672{
673 struct vdec_core_s *core = vdec_core;
674 unsigned long flags;
675 struct vdec_s *vdec;
676
677 flags = vdec_inputbuff_lock(core);
678 core->buff_flag = 0;
679 core->stream_buff_flag = 0;
680 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
681 struct vdec_input_s *input = &vdec->input;
682 if (input_frame_based(input)) {
683 if (input->have_frame_num || input->eos)
684 core->buff_flag |= vdec->core_mask;
685 } else if (input_stream_based(input)) {
686 core->stream_buff_flag |= vdec->core_mask;
687 }
688 }
689 vdec_inputbuff_unlock(core, flags);
690}
691
692#if 0
693void vdec_update_streambuff_status(void)
694{
695 struct vdec_core_s *core = vdec_core;
696 struct vdec_s *vdec;
697
698 /* check streaming prepare level threshold if not EOS */
699 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
700 struct vdec_input_s *input = &vdec->input;
701 if (input && input_stream_based(input) && !input->eos &&
702 (vdec->need_more_data & VDEC_NEED_MORE_DATA)) {
703 u32 rp, wp, level;
704
705 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
706 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
707 if (wp < rp)
708 level = input->size + wp - rp;
709 else
710 level = wp - rp;
711 if ((level < input->prepare_level) &&
712 (pts_get_rec_num(PTS_TYPE_VIDEO,
713 vdec->input.total_rd_count) < 2)) {
714 break;
715 } else if (level > input->prepare_level) {
716 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
717 if (debug & 8)
718 pr_info("vdec_flush_streambuff_status up\n");
719 vdec_up(vdec);
720 }
721 break;
722 }
723 }
724}
725EXPORT_SYMBOL(vdec_update_streambuff_status);
726#endif
727
728int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
729{
730 if (vdec && vdec->dec_status &&
731 ((vdec->status == VDEC_STATUS_CONNECTED ||
732 vdec->status == VDEC_STATUS_ACTIVE)))
733 return vdec->dec_status(vdec, vstatus);
734
735 return 0;
736}
737EXPORT_SYMBOL(vdec_status);
738
739int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
740{
741 int r;
742
743 if (vdec->set_trickmode) {
744 r = vdec->set_trickmode(vdec, trickmode);
745
746 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
747 r = vdec->slave->set_trickmode(vdec->slave,
748 trickmode);
749 return r;
750 }
751
752 return -1;
753}
754EXPORT_SYMBOL(vdec_set_trickmode);
755
756int vdec_set_isreset(struct vdec_s *vdec, int isreset)
757{
758 vdec->is_reset = isreset;
759 pr_info("is_reset=%d\n", isreset);
760 if (vdec->set_isreset)
761 return vdec->set_isreset(vdec, isreset);
762 return 0;
763}
764EXPORT_SYMBOL(vdec_set_isreset);
765
766int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
767{
768 vdec->dolby_meta_with_el = isdvmetawithel;
769 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
770 return 0;
771}
772EXPORT_SYMBOL(vdec_set_dv_metawithel);
773
774void vdec_set_no_powerdown(int flag)
775{
776 no_powerdown = flag;
777 pr_info("no_powerdown=%d\n", no_powerdown);
778 return;
779}
780EXPORT_SYMBOL(vdec_set_no_powerdown);
781
782void vdec_count_info(struct vdec_info *vs, unsigned int err,
783 unsigned int offset)
784{
785 if (err)
786 vs->error_frame_count++;
787 if (offset) {
788 if (0 == vs->frame_count) {
789 vs->offset = 0;
790 vs->samp_cnt = 0;
791 }
792 vs->frame_data = offset > vs->total_data ?
793 offset - vs->total_data : vs->total_data - offset;
794 vs->total_data = offset;
795 if (vs->samp_cnt < 96000 * 2) { /* 2s */
796 if (0 == vs->samp_cnt)
797 vs->offset = offset;
798 vs->samp_cnt += vs->frame_dur;
799 } else {
800 vs->bit_rate = (offset - vs->offset) / 2;
801 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
802 vs->samp_cnt = 0;
803 }
804 vs->frame_count++;
805 }
806 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
807 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
808 return;
809}
810EXPORT_SYMBOL(vdec_count_info);
811int vdec_is_support_4k(void)
812{
813 return !is_meson_gxl_package_805X();
814}
815EXPORT_SYMBOL(vdec_is_support_4k);
816
817/*
818 * clk_config:
819 *0:default
820 *1:no gp0_pll;
821 *2:always used gp0_pll;
822 *>=10:fixed n M clk;
823 *== 100 , 100M clks;
824 */
825unsigned int get_vdec_clk_config_settings(void)
826{
827 return clk_config;
828}
829void update_vdec_clk_config_settings(unsigned int config)
830{
831 clk_config = config;
832}
833EXPORT_SYMBOL(update_vdec_clk_config_settings);
834
835static bool hevc_workaround_needed(void)
836{
837 return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) &&
838 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
839 == GXBB_REV_A_MINOR);
840}
841
842struct device *get_codec_cma_device(void)
843{
844 return vdec_core->cma_dev;
845}
846
847#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
848static const char * const vdec_device_name[] = {
849 "amvdec_mpeg12", "ammvdec_mpeg12",
850 "amvdec_mpeg4", "ammvdec_mpeg4",
851 "amvdec_h264", "ammvdec_h264",
852 "amvdec_mjpeg", "ammvdec_mjpeg",
853 "amvdec_real", "ammvdec_real",
854 "amjpegdec", "ammjpegdec",
855 "amvdec_vc1", "ammvdec_vc1",
856 "amvdec_avs", "ammvdec_avs",
857 "amvdec_yuv", "ammvdec_yuv",
858 "amvdec_h264mvc", "ammvdec_h264mvc",
859 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
860 "amvdec_h265", "ammvdec_h265",
861 "amvenc_avc", "amvenc_avc",
862 "jpegenc", "jpegenc",
863 "amvdec_vp9", "ammvdec_vp9",
864 "amvdec_avs2", "ammvdec_avs2"
865};
866
867
868#else
869
870static const char * const vdec_device_name[] = {
871 "amvdec_mpeg12",
872 "amvdec_mpeg4",
873 "amvdec_h264",
874 "amvdec_mjpeg",
875 "amvdec_real",
876 "amjpegdec",
877 "amvdec_vc1",
878 "amvdec_avs",
879 "amvdec_yuv",
880 "amvdec_h264mvc",
881 "amvdec_h264_4k2k",
882 "amvdec_h265",
883 "amvenc_avc",
884 "jpegenc",
885 "amvdec_vp9",
886 "amvdec_avs2"
887};
888
889#endif
890
891/*
892 * Only support time sliced decoding for frame based input,
893 * so legacy decoder can exist with time sliced decoder.
894 */
895static const char *get_dev_name(bool use_legacy_vdec, int format)
896{
897#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
898 if (use_legacy_vdec)
899 return vdec_device_name[format * 2];
900 else
901 return vdec_device_name[format * 2 + 1];
902#else
903 return vdec_device_name[format];
904#endif
905}
906
907#ifdef VDEC_DEBUG_SUPPORT
908static u64 get_current_clk(void)
909{
910 /*struct timespec xtime = current_kernel_time();
911 u64 usec = xtime.tv_sec * 1000000;
912 usec += xtime.tv_nsec / 1000;
913 */
914 u64 usec = sched_clock();
915 return usec;
916}
917
918static void inc_profi_count(unsigned long mask, u32 *count)
919{
920 enum vdec_type_e type;
921
922 for (type = VDEC_1; type < VDEC_MAX; type++) {
923 if (mask & (1 << type))
924 count[type]++;
925 }
926}
927
928static void update_profi_clk_run(struct vdec_s *vdec,
929 unsigned long mask, u64 clk)
930{
931 enum vdec_type_e type;
932
933 for (type = VDEC_1; type < VDEC_MAX; type++) {
934 if (mask & (1 << type)) {
935 vdec->start_run_clk[type] = clk;
936 if (vdec->profile_start_clk[type] == 0)
937 vdec->profile_start_clk[type] = clk;
938 vdec->total_clk[type] = clk
939 - vdec->profile_start_clk[type];
940 /*pr_info("set start_run_clk %ld\n",
941 vdec->start_run_clk);*/
942
943 }
944 }
945}
946
947static void update_profi_clk_stop(struct vdec_s *vdec,
948 unsigned long mask, u64 clk)
949{
950 enum vdec_type_e type;
951
952 for (type = VDEC_1; type < VDEC_MAX; type++) {
953 if (mask & (1 << type)) {
954 if (vdec->start_run_clk[type] == 0)
955 pr_info("error, start_run_clk[%d] not set\n", type);
956
957 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
958 type,
959 clk,
960 vdec->start_run_clk[type],
961 vdec->run_clk[type]);*/
962 vdec->run_clk[type] +=
963 (clk - vdec->start_run_clk[type]);
964 }
965 }
966}
967
968#endif
969
970int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
971{
972 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
973 sizeof(struct dec_sysinfo)))
974 return -EFAULT;
975
976 /* force switch to mult instance if supports this profile. */
977 if ((vdec->type == VDEC_TYPE_SINGLE) &&
978 !disable_switch_single_to_mult) {
979 const char *str = NULL;
980 char fmt[16] = {0};
981
982 str = strchr(get_dev_name(false, vdec->format), '_');
983 if (!str)
984 return -1;
985
986 sprintf(fmt, "m%s", ++str);
987 if (is_support_profile(fmt) &&
988 vdec->sys_info->format != VIDEO_DEC_FORMAT_H263)
989 vdec->type = VDEC_TYPE_STREAM_PARSER;
990 }
991
992 return 0;
993}
994EXPORT_SYMBOL(vdec_set_decinfo);
995
996/* construct vdec strcture */
997struct vdec_s *vdec_create(struct stream_port_s *port,
998 struct vdec_s *master)
999{
1000 struct vdec_s *vdec;
1001 int type = VDEC_TYPE_SINGLE;
1002 int id;
1003
1004 if (is_mult_inc(port->type))
1005 type = (port->type & PORT_TYPE_FRAME) ?
1006 VDEC_TYPE_FRAME_BLOCK :
1007 VDEC_TYPE_STREAM_PARSER;
1008
1009 id = ida_simple_get(&vdec_core->ida,
1010 0, MAX_INSTANCE_MUN, GFP_KERNEL);
1011 if (id < 0) {
1012 pr_info("vdec_create request id failed!ret =%d\n", id);
1013 return NULL;
1014 }
1015 vdec = vzalloc(sizeof(struct vdec_s));
1016
1017 /* TBD */
1018 if (vdec) {
1019 vdec->magic = 0x43454456;
1020 vdec->id = -1;
1021 vdec->type = type;
1022 vdec->port = port;
1023 vdec->sys_info = &vdec->sys_info_store;
1024
1025 INIT_LIST_HEAD(&vdec->list);
1026
1027 atomic_inc(&vdec_core->vdec_nr);
1028 vdec->id = id;
1029 vdec_input_init(&vdec->input, vdec);
1030 vdec->input.vdec_is_input_frame_empty = vdec_is_input_frame_empty;
1031 vdec->input.vdec_up = vdec_up;
1032 if (master) {
1033 vdec->master = master;
1034 master->slave = vdec;
1035 master->sched = 1;
1036 }
1037 }
1038
1039 pr_debug("vdec_create instance %p, total %d\n", vdec,
1040 atomic_read(&vdec_core->vdec_nr));
1041
1042 //trace_vdec_create(vdec); /*DEBUG_TMP*/
1043
1044 return vdec;
1045}
1046EXPORT_SYMBOL(vdec_create);
1047
1048int vdec_set_format(struct vdec_s *vdec, int format)
1049{
1050 vdec->format = format;
1051 vdec->port_flag |= PORT_FLAG_VFORMAT;
1052
1053 if (vdec->slave) {
1054 vdec->slave->format = format;
1055 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
1056 }
1057 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
1058
1059 return 0;
1060}
1061EXPORT_SYMBOL(vdec_set_format);
1062
1063int vdec_set_pts(struct vdec_s *vdec, u32 pts)
1064{
1065 vdec->pts = pts;
1066 vdec->pts64 = div64_u64((u64)pts * 100, 9);
1067 vdec->pts_valid = true;
1068 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
1069 return 0;
1070}
1071EXPORT_SYMBOL(vdec_set_pts);
1072
1073void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
1074{
1075 vdec->timestamp = timestamp;
1076 vdec->timestamp_valid = true;
1077}
1078EXPORT_SYMBOL(vdec_set_timestamp);
1079
1080int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
1081{
1082 vdec->pts64 = pts64;
1083 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
1084 vdec->pts_valid = true;
1085
1086 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
1087 return 0;
1088}
1089EXPORT_SYMBOL(vdec_set_pts64);
1090
1091int vdec_get_status(struct vdec_s *vdec)
1092{
1093 return vdec->status;
1094}
1095EXPORT_SYMBOL(vdec_get_status);
1096
1097int vdec_get_frame_num(struct vdec_s *vdec)
1098{
1099 return vdec->input.have_frame_num;
1100}
1101EXPORT_SYMBOL(vdec_get_frame_num);
1102
1103void vdec_set_status(struct vdec_s *vdec, int status)
1104{
1105 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
1106 vdec->status = status;
1107}
1108EXPORT_SYMBOL(vdec_set_status);
1109
1110void vdec_set_next_status(struct vdec_s *vdec, int status)
1111{
1112 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
1113 vdec->next_status = status;
1114}
1115EXPORT_SYMBOL(vdec_set_next_status);
1116
1117int vdec_set_video_path(struct vdec_s *vdec, int video_path)
1118{
1119 vdec->frame_base_video_path = video_path;
1120 return 0;
1121}
1122EXPORT_SYMBOL(vdec_set_video_path);
1123
1124int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
1125{
1126 vdec->vf_receiver_inst = receive_id;
1127 return 0;
1128}
1129EXPORT_SYMBOL(vdec_set_receive_id);
1130
1131/* add frame data to input chain */
1132int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
1133{
1134 return vdec_input_add_frame(&vdec->input, buf, count);
1135}
1136EXPORT_SYMBOL(vdec_write_vframe);
1137
1138/* add a work queue thread for vdec*/
1139void vdec_schedule_work(struct work_struct *work)
1140{
1141 if (vdec_core->vdec_core_wq)
1142 queue_work(vdec_core->vdec_core_wq, work);
1143 else
1144 schedule_work(work);
1145}
1146EXPORT_SYMBOL(vdec_schedule_work);
1147
1148static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
1149{
1150 if (vdec->master)
1151 return vdec->master;
1152 else if (vdec->slave)
1153 return vdec->slave;
1154 return NULL;
1155}
1156
1157static void vdec_sync_input_read(struct vdec_s *vdec)
1158{
1159 if (!vdec_stream_based(vdec))
1160 return;
1161
1162 if (vdec_dual(vdec)) {
1163 u32 me, other;
1164 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1165 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1166 other =
1167 vdec_get_associate(vdec)->input.stream_cookie;
1168 if (me > other)
1169 return;
1170 else if (me == other) {
1171 me = READ_VREG(VLD_MEM_VIFIFO_RP);
1172 other =
1173 vdec_get_associate(vdec)->input.swap_rp;
1174 if (me > other) {
1175 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1176 vdec_get_associate(vdec)->
1177 input.swap_rp);
1178 return;
1179 }
1180 }
1181 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1182 READ_VREG(VLD_MEM_VIFIFO_RP));
1183 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1184 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1185 if (((me & 0x80000000) == 0) &&
1186 (vdec->input.streaming_rp & 0x80000000))
1187 me += 1ULL << 32;
1188 other = vdec_get_associate(vdec)->input.streaming_rp;
1189 if (me > other) {
1190 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1191 vdec_get_associate(vdec)->
1192 input.swap_rp);
1193 return;
1194 }
1195
1196 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1197 READ_VREG(HEVC_STREAM_RD_PTR));
1198 }
1199 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1200 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1201 READ_VREG(VLD_MEM_VIFIFO_RP));
1202 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1203 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1204 READ_VREG(HEVC_STREAM_RD_PTR));
1205 }
1206}
1207
1208static void vdec_sync_input_write(struct vdec_s *vdec)
1209{
1210 if (!vdec_stream_based(vdec))
1211 return;
1212
1213 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1214 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1215 READ_PARSER_REG(PARSER_VIDEO_WP));
1216 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1217 WRITE_VREG(HEVC_STREAM_WR_PTR,
1218 READ_PARSER_REG(PARSER_VIDEO_WP));
1219 }
1220}
1221
1222/*
1223 *get next frame from input chain
1224 */
1225/*
1226 *THE VLD_FIFO is 512 bytes and Video buffer level
1227 * empty interrupt is set to 0x80 bytes threshold
1228 */
1229#define VLD_PADDING_SIZE 1024
1230#define HEVC_PADDING_SIZE (1024*16)
1231int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
1232{
1233 struct vdec_input_s *input = &vdec->input;
1234 struct vframe_chunk_s *chunk = NULL;
1235 struct vframe_block_list_s *block = NULL;
1236 int dummy;
1237
1238 /* full reset to HW input */
1239 if (input->target == VDEC_INPUT_TARGET_VLD) {
1240 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1241
1242 /* reset VLD fifo for all vdec */
1243 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
1244 WRITE_VREG(DOS_SW_RESET0, 0);
1245
1246 dummy = READ_RESET_REG(RESET0_REGISTER);
1247 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
1248 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1249#if 0
1250 /*move to driver*/
1251 if (input_frame_based(input))
1252 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
1253
1254 /*
1255 * 2: assist
1256 * 3: parser
1257 * 4: parser_state
1258 * 8: dblk
1259 * 11:mcpu
1260 * 12:ccpu
1261 * 13:ddr
1262 * 14:iqit
1263 * 15:ipp
1264 * 17:qdct
1265 * 18:mpred
1266 * 19:sao
1267 * 24:hevc_afifo
1268 */
1269 WRITE_VREG(DOS_SW_RESET3,
1270 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
1271 (1<<17)|(1<<18)|(1<<19));
1272 WRITE_VREG(DOS_SW_RESET3, 0);
1273#endif
1274 }
1275
1276 /*
1277 *setup HW decoder input buffer (VLD context)
1278 * based on input->type and input->target
1279 */
1280 if (input_frame_based(input)) {
1281 chunk = vdec_input_next_chunk(&vdec->input);
1282
1283 if (chunk == NULL) {
1284 *p = NULL;
1285 return -1;
1286 }
1287
1288 block = chunk->block;
1289
1290 if (input->target == VDEC_INPUT_TARGET_VLD) {
1291 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
1292 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
1293 block->size - 8);
1294 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1295 round_down(block->start + chunk->offset,
1296 VDEC_FIFO_ALIGN));
1297
1298 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1299 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1300
1301 /* set to manual mode */
1302 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1303 WRITE_VREG(VLD_MEM_VIFIFO_RP,
1304 round_down(block->start + chunk->offset,
1305 VDEC_FIFO_ALIGN));
1306 dummy = chunk->offset + chunk->size +
1307 VLD_PADDING_SIZE;
1308 if (dummy >= block->size)
1309 dummy -= block->size;
1310 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1311 round_down(block->start + dummy,
1312 VDEC_FIFO_ALIGN));
1313
1314 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
1315 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1316
1317 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1318 (0x11 << 16) | (1<<10) | (7<<3));
1319
1320 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1321 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
1322 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
1323 block->size);
1324 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
1325 chunk->offset);
1326 dummy = chunk->offset + chunk->size +
1327 HEVC_PADDING_SIZE;
1328 if (dummy >= block->size)
1329 dummy -= block->size;
1330 WRITE_VREG(HEVC_STREAM_WR_PTR,
1331 round_down(block->start + dummy,
1332 VDEC_FIFO_ALIGN));
1333
1334 /* set endian */
1335 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1336 }
1337
1338 *p = chunk;
1339 return chunk->size;
1340
1341 } else {
1342 /* stream based */
1343 u32 rp = 0, wp = 0, fifo_len = 0;
1344 int size;
1345 bool swap_valid = input->swap_valid;
1346 unsigned long swap_page_phys = input->swap_page_phys;
1347
1348 if (vdec_dual(vdec) &&
1349 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
1350 /* keep using previous input context */
1351 struct vdec_s *master = (vdec->slave) ?
1352 vdec : vdec->master;
1353 if (master->input.last_swap_slave) {
1354 swap_valid = master->slave->input.swap_valid;
1355 swap_page_phys =
1356 master->slave->input.swap_page_phys;
1357 } else {
1358 swap_valid = master->input.swap_valid;
1359 swap_page_phys = master->input.swap_page_phys;
1360 }
1361 }
1362
1363 if (swap_valid) {
1364 if (input->target == VDEC_INPUT_TARGET_VLD) {
1365 if (vdec->format == VFORMAT_H264)
1366 SET_VREG_MASK(POWER_CTL_VLD,
1367 (1 << 9));
1368
1369 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1370
1371 /* restore read side */
1372 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1373 swap_page_phys);
1374 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1375
1376 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1377 ;
1378 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1379
1380 /* restore wrap count */
1381 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
1382 input->stream_cookie);
1383
1384 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1385 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
1386
1387 /* enable */
1388 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1389 (0x11 << 16) | (1<<10));
1390
1391 /* sync with front end */
1392 vdec_sync_input_read(vdec);
1393 vdec_sync_input_write(vdec);
1394
1395 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1396 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1397 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1398
1399 /* restore read side */
1400 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1401 swap_page_phys);
1402 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1403
1404 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1405 & (1<<7))
1406 ;
1407 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1408
1409 /* restore stream offset */
1410 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
1411 input->stream_cookie);
1412
1413 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1414 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1415 >> 16) & 0x7f;
1416
1417
1418 /* enable */
1419
1420 /* sync with front end */
1421 vdec_sync_input_read(vdec);
1422 vdec_sync_input_write(vdec);
1423
1424 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1425
1426 /*pr_info("vdec: restore context\r\n");*/
1427 }
1428
1429 } else {
1430 if (input->target == VDEC_INPUT_TARGET_VLD) {
1431 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1432 input->start);
1433 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1434 input->start + input->size - 8);
1435 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1436 input->start);
1437
1438 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1439 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1440
1441 /* set to manual mode */
1442 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1443 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1444 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1445 READ_PARSER_REG(PARSER_VIDEO_WP));
1446
1447 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1448
1449 /* enable */
1450 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1451 (0x11 << 16) | (1<<10));
1452
1453 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1454
1455 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1456 WRITE_VREG(HEVC_STREAM_START_ADDR,
1457 input->start);
1458 WRITE_VREG(HEVC_STREAM_END_ADDR,
1459 input->start + input->size);
1460 WRITE_VREG(HEVC_STREAM_RD_PTR,
1461 input->start);
1462 WRITE_VREG(HEVC_STREAM_WR_PTR,
1463 READ_PARSER_REG(PARSER_VIDEO_WP));
1464
1465 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1466 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1467 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1468 >> 16) & 0x7f;
1469
1470 /* enable */
1471 }
1472 }
1473 *p = NULL;
1474 if (wp >= rp)
1475 size = wp - rp + fifo_len;
1476 else
1477 size = wp + input->size - rp + fifo_len;
1478 if (size < 0) {
1479 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1480 __func__, input->size, wp, rp, fifo_len, size);
1481 size = 0;
1482 }
1483 return size;
1484 }
1485}
1486EXPORT_SYMBOL(vdec_prepare_input);
1487
1488void vdec_enable_input(struct vdec_s *vdec)
1489{
1490 struct vdec_input_s *input = &vdec->input;
1491
1492 if (vdec->status != VDEC_STATUS_ACTIVE)
1493 return;
1494
1495 if (input->target == VDEC_INPUT_TARGET_VLD)
1496 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
1497 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1498 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1499 if (vdec_stream_based(vdec))
1500 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1501 else
1502 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1503 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
1504 }
1505}
1506EXPORT_SYMBOL(vdec_enable_input);
1507
1508int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
1509{
1510 int r = vdec_input_set_buffer(&vdec->input, start, size);
1511
1512 if (r)
1513 return r;
1514
1515 if (vdec->slave)
1516 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
1517
1518 return r;
1519}
1520EXPORT_SYMBOL(vdec_set_input_buffer);
1521
1522/*
1523 * vdec_eos returns the possibility that there are
1524 * more input can be used by decoder through vdec_prepare_input
1525 * Note: this function should be called prior to vdec_vframe_dirty
1526 * by decoder driver to determine if EOS happens for stream based
1527 * decoding when there is no sufficient data for a frame
1528 */
1529bool vdec_has_more_input(struct vdec_s *vdec)
1530{
1531 struct vdec_input_s *input = &vdec->input;
1532
1533 if (!input->eos)
1534 return true;
1535
1536 if (input_frame_based(input))
1537 return vdec_input_next_input_chunk(input) != NULL;
1538 else {
1539 if (input->target == VDEC_INPUT_TARGET_VLD)
1540 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
1541 READ_PARSER_REG(PARSER_VIDEO_WP);
1542 else {
1543 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1544 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
1545 }
1546 }
1547}
1548EXPORT_SYMBOL(vdec_has_more_input);
1549
1550void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1551{
1552 vdec->input.prepare_level = level;
1553}
1554EXPORT_SYMBOL(vdec_set_prepare_level);
1555
1556void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1557{
1558 vdec->flag = flag;
1559}
1560EXPORT_SYMBOL(vdec_set_flag);
1561
1562void vdec_set_eos(struct vdec_s *vdec, bool eos)
1563{
1564 struct vdec_core_s *core = vdec_core;
1565 vdec->input.eos = eos;
1566
1567 if (vdec->slave)
1568 vdec->slave->input.eos = eos;
1569 up(&core->sem);
1570}
1571EXPORT_SYMBOL(vdec_set_eos);
1572
1573#ifdef VDEC_DEBUG_SUPPORT
1574void vdec_set_step_mode(void)
1575{
1576 step_mode = 0x1ff;
1577}
1578EXPORT_SYMBOL(vdec_set_step_mode);
1579#endif
1580
1581void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1582{
1583 if (vdec && next_vdec) {
1584 vdec->sched = 0;
1585 next_vdec->sched = 1;
1586 }
1587}
1588EXPORT_SYMBOL(vdec_set_next_sched);
1589
1590/*
1591 * Swap Context: S0 S1 S2 S3 S4
1592 * Sample sequence: M S M M S
1593 * Master Context: S0 S0 S2 S3 S3
1594 * Slave context: NA S1 S1 S2 S4
1595 * ^
1596 * ^
1597 * ^
1598 * the tricky part
1599 * If there are back to back decoding of master or slave
1600 * then the context of the counter part should be updated
1601 * with current decoder. In this example, S1 should be
1602 * updated to S2.
1603 * This is done by swap the swap_page and related info
1604 * between two layers.
1605 */
1606static void vdec_borrow_input_context(struct vdec_s *vdec)
1607{
1608 struct page *swap_page;
1609 unsigned long swap_page_phys;
1610 struct vdec_input_s *me;
1611 struct vdec_input_s *other;
1612
1613 if (!vdec_dual(vdec))
1614 return;
1615
1616 me = &vdec->input;
1617 other = &vdec_get_associate(vdec)->input;
1618
1619 /* swap the swap_context, borrow counter part's
1620 * swap context storage and update all related info.
1621 * After vdec_vframe_dirty, vdec_save_input_context
1622 * will be called to update current vdec's
1623 * swap context
1624 */
1625 swap_page = other->swap_page;
1626 other->swap_page = me->swap_page;
1627 me->swap_page = swap_page;
1628
1629 swap_page_phys = other->swap_page_phys;
1630 other->swap_page_phys = me->swap_page_phys;
1631 me->swap_page_phys = swap_page_phys;
1632
1633 other->swap_rp = me->swap_rp;
1634 other->streaming_rp = me->streaming_rp;
1635 other->stream_cookie = me->stream_cookie;
1636 other->swap_valid = me->swap_valid;
1637}
1638
1639void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1640{
1641 if (chunk)
1642 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1643
1644 if (vdec_stream_based(vdec)) {
1645 vdec->input.swap_needed = true;
1646
1647 if (vdec_dual(vdec)) {
1648 vdec_get_associate(vdec)->input.dirty_count = 0;
1649 vdec->input.dirty_count++;
1650 if (vdec->input.dirty_count > 1) {
1651 vdec->input.dirty_count = 1;
1652 vdec_borrow_input_context(vdec);
1653 }
1654 }
1655
1656 /* for stream based mode, we update read and write pointer
1657 * also in case decoder wants to keep working on decoding
1658 * for more frames while input front end has more data
1659 */
1660 vdec_sync_input_read(vdec);
1661 vdec_sync_input_write(vdec);
1662
1663 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1664 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1665 }
1666}
1667EXPORT_SYMBOL(vdec_vframe_dirty);
1668
1669bool vdec_need_more_data(struct vdec_s *vdec)
1670{
1671 if (vdec_stream_based(vdec))
1672 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1673
1674 return false;
1675}
1676EXPORT_SYMBOL(vdec_need_more_data);
1677
1678
1679void hevc_wait_ddr(void)
1680{
1681 unsigned long flags;
1682 unsigned int mask = 0;
1683
1684 mask = 1 << 4; /* hevc */
1685 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
1686 mask |= (1 << 8); /* hevcb */
1687
1688 spin_lock_irqsave(&vdec_spin_lock, flags);
1689 codec_dmcbus_write(DMC_REQ_CTRL,
1690 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
1691 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1692
1693 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1694 & mask))
1695 ;
1696}
1697
1698void vdec_save_input_context(struct vdec_s *vdec)
1699{
1700 struct vdec_input_s *input = &vdec->input;
1701
1702#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1703 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1704#endif
1705
1706 if (input->target == VDEC_INPUT_TARGET_VLD)
1707 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1708
1709 if (input_stream_based(input) && (input->swap_needed)) {
1710 if (input->target == VDEC_INPUT_TARGET_VLD) {
1711 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1712 input->swap_page_phys);
1713 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1714 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1715 ;
1716 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1717 vdec->input.stream_cookie =
1718 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1719 vdec->input.swap_rp =
1720 READ_VREG(VLD_MEM_VIFIFO_RP);
1721 vdec->input.total_rd_count =
1722 (u64)vdec->input.stream_cookie *
1723 vdec->input.size + vdec->input.swap_rp -
1724 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
1725 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1726 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1727 input->swap_page_phys);
1728 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1729
1730 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1731 ;
1732 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1733
1734 vdec->input.stream_cookie =
1735 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1736 vdec->input.swap_rp =
1737 READ_VREG(HEVC_STREAM_RD_PTR);
1738 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1739 (vdec->input.streaming_rp & 0x80000000))
1740 vdec->input.streaming_rp += 1ULL << 32;
1741 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1742 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1743 vdec->input.total_rd_count = vdec->input.streaming_rp;
1744 }
1745
1746 input->swap_valid = true;
1747 input->swap_needed = false;
1748 /*pr_info("vdec: save context\r\n");*/
1749
1750 vdec_sync_input_read(vdec);
1751
1752 if (vdec_dual(vdec)) {
1753 struct vdec_s *master = (vdec->slave) ?
1754 vdec : vdec->master;
1755 master->input.last_swap_slave = (master->slave == vdec);
1756 /* pr_info("master->input.last_swap_slave = %d\n",
1757 master->input.last_swap_slave); */
1758 }
1759
1760 hevc_wait_ddr();
1761 }
1762}
1763EXPORT_SYMBOL(vdec_save_input_context);
1764
1765void vdec_clean_input(struct vdec_s *vdec)
1766{
1767 struct vdec_input_s *input = &vdec->input;
1768
1769 while (!list_empty(&input->vframe_chunk_list)) {
1770 struct vframe_chunk_s *chunk =
1771 vdec_input_next_chunk(input);
1772 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
1773 vdec_input_release_chunk(input, chunk);
1774 else
1775 break;
1776 }
1777 vdec_save_input_context(vdec);
1778}
1779EXPORT_SYMBOL(vdec_clean_input);
1780
1781
1782static int vdec_input_read_restore(struct vdec_s *vdec)
1783{
1784 struct vdec_input_s *input = &vdec->input;
1785
1786 if (!vdec_stream_based(vdec))
1787 return 0;
1788
1789 if (!input->swap_valid) {
1790 if (input->target == VDEC_INPUT_TARGET_VLD) {
1791 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1792 input->start);
1793 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1794 input->start + input->size - 8);
1795 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1796 input->start);
1797 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1798 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1799
1800 /* set to manual mode */
1801 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1802 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1803 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1804 WRITE_VREG(HEVC_STREAM_START_ADDR,
1805 input->start);
1806 WRITE_VREG(HEVC_STREAM_END_ADDR,
1807 input->start + input->size);
1808 WRITE_VREG(HEVC_STREAM_RD_PTR,
1809 input->start);
1810 }
1811 return 0;
1812 }
1813 if (input->target == VDEC_INPUT_TARGET_VLD) {
1814 /* restore read side */
1815 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1816 input->swap_page_phys);
1817
1818 /*swap active*/
1819 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1820
1821 /*wait swap busy*/
1822 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1823 ;
1824
1825 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1826 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1827 /* restore read side */
1828 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1829 input->swap_page_phys);
1830 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1831
1832 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1833 & (1<<7))
1834 ;
1835 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1836 }
1837
1838 return 0;
1839}
1840
1841
1842int vdec_sync_input(struct vdec_s *vdec)
1843{
1844 struct vdec_input_s *input = &vdec->input;
1845 u32 rp = 0, wp = 0, fifo_len = 0;
1846 int size;
1847
1848 vdec_input_read_restore(vdec);
1849 vdec_sync_input_read(vdec);
1850 vdec_sync_input_write(vdec);
1851 if (input->target == VDEC_INPUT_TARGET_VLD) {
1852 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1853 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1854
1855 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1856 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1857 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1858 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1859 >> 16) & 0x7f;
1860 }
1861 if (wp >= rp)
1862 size = wp - rp + fifo_len;
1863 else
1864 size = wp + input->size - rp + fifo_len;
1865 if (size < 0) {
1866 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1867 __func__, input->size, wp, rp, fifo_len, size);
1868 size = 0;
1869 }
1870 return size;
1871
1872}
1873EXPORT_SYMBOL(vdec_sync_input);
1874
1875const char *vdec_status_str(struct vdec_s *vdec)
1876{
1877 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1878 vdec_status_string[vdec->status] : "INVALID";
1879}
1880
1881const char *vdec_type_str(struct vdec_s *vdec)
1882{
1883 switch (vdec->type) {
1884 case VDEC_TYPE_SINGLE:
1885 return "VDEC_TYPE_SINGLE";
1886 case VDEC_TYPE_STREAM_PARSER:
1887 return "VDEC_TYPE_STREAM_PARSER";
1888 case VDEC_TYPE_FRAME_BLOCK:
1889 return "VDEC_TYPE_FRAME_BLOCK";
1890 case VDEC_TYPE_FRAME_CIRCULAR:
1891 return "VDEC_TYPE_FRAME_CIRCULAR";
1892 default:
1893 return "VDEC_TYPE_INVALID";
1894 }
1895}
1896
1897const char *vdec_device_name_str(struct vdec_s *vdec)
1898{
1899 return vdec_device_name[vdec->format * 2 + 1];
1900}
1901EXPORT_SYMBOL(vdec_device_name_str);
1902
1903void walk_vdec_core_list(char *s)
1904{
1905 struct vdec_s *vdec;
1906 struct vdec_core_s *core = vdec_core;
1907 unsigned long flags;
1908
1909 pr_info("%s --->\n", s);
1910
1911 flags = vdec_core_lock(vdec_core);
1912
1913 if (list_empty(&core->connected_vdec_list)) {
1914 pr_info("connected vdec list empty\n");
1915 } else {
1916 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1917 pr_info("\tvdec (%p), status = %s\n", vdec,
1918 vdec_status_str(vdec));
1919 }
1920 }
1921
1922 vdec_core_unlock(vdec_core, flags);
1923}
1924EXPORT_SYMBOL(walk_vdec_core_list);
1925
1926/* insert vdec to vdec_core for scheduling,
1927 * for dual running decoders, connect/disconnect always runs in pairs
1928 */
1929int vdec_connect(struct vdec_s *vdec)
1930{
1931 unsigned long flags;
1932
1933 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1934
1935 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1936 return 0;
1937
1938 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1939 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1940
1941 init_completion(&vdec->inactive_done);
1942
1943 if (vdec->slave) {
1944 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1945 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1946
1947 init_completion(&vdec->slave->inactive_done);
1948 }
1949
1950 flags = vdec_core_lock(vdec_core);
1951
1952 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1953
1954 if (vdec->slave) {
1955 list_add_tail(&vdec->slave->list,
1956 &vdec_core->connected_vdec_list);
1957 }
1958
1959 vdec_core_unlock(vdec_core, flags);
1960
1961 up(&vdec_core->sem);
1962
1963 return 0;
1964}
1965EXPORT_SYMBOL(vdec_connect);
1966
1967/* remove vdec from vdec_core scheduling */
1968int vdec_disconnect(struct vdec_s *vdec)
1969{
1970#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1971 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1972#endif
1973 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
1974
1975 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1976 (vdec->status != VDEC_STATUS_ACTIVE)) {
1977 return 0;
1978 }
1979 mutex_lock(&vdec_mutex);
1980 /*
1981 *when a vdec is under the management of scheduler
1982 * the status change will only be from vdec_core_thread
1983 */
1984 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1985
1986 if (vdec->slave)
1987 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1988 else if (vdec->master)
1989 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
1990 mutex_unlock(&vdec_mutex);
1991 up(&vdec_core->sem);
1992
1993 if(!wait_for_completion_timeout(&vdec->inactive_done,
1994 msecs_to_jiffies(2000)))
1995 goto discon_timeout;
1996
1997 if (vdec->slave) {
1998 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
1999 msecs_to_jiffies(2000)))
2000 goto discon_timeout;
2001 } else if (vdec->master) {
2002 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
2003 msecs_to_jiffies(2000)))
2004 goto discon_timeout;
2005 }
2006
2007 return 0;
2008discon_timeout:
2009 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
2010 return 0;
2011}
2012EXPORT_SYMBOL(vdec_disconnect);
2013
2014/* release vdec structure */
2015int vdec_destroy(struct vdec_s *vdec)
2016{
2017 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
2018
2019 vdec_input_release(&vdec->input);
2020
2021#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2022 vdec_profile_flush(vdec);
2023#endif
2024 ida_simple_remove(&vdec_core->ida, vdec->id);
2025 vfree(vdec);
2026
2027 atomic_dec(&vdec_core->vdec_nr);
2028
2029 return 0;
2030}
2031EXPORT_SYMBOL(vdec_destroy);
2032
2033/*
2034 *register vdec_device
2035 * create output, vfm or create ionvideo output
2036 */
2037s32 vdec_init(struct vdec_s *vdec, int is_4k)
2038{
2039 int r = 0;
2040 struct vdec_s *p = vdec;
2041 const char *dev_name;
2042 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
2043
2044 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
2045
2046 if (dev_name == NULL)
2047 return -ENODEV;
2048
2049 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
2050 dev_name, vdec_type_str(vdec));
2051
2052 /*
2053 *todo: VFM patch control should be configurable,
2054 * for now all stream based input uses default VFM path.
2055 */
2056 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
2057 if (vdec_core->vfm_vdec == NULL) {
2058 pr_debug("vdec_init set vfm decoder %p\n", vdec);
2059 vdec_core->vfm_vdec = vdec;
2060 } else {
2061 pr_info("vdec_init vfm path busy.\n");
2062 return -EBUSY;
2063 }
2064 }
2065
2066 mutex_lock(&vdec_mutex);
2067 inited_vcodec_num++;
2068 mutex_unlock(&vdec_mutex);
2069
2070 vdec_input_set_type(&vdec->input, vdec->type,
2071 (vdec->format == VFORMAT_HEVC ||
2072 vdec->format == VFORMAT_AVS2 ||
2073 vdec->format == VFORMAT_VP9) ?
2074 VDEC_INPUT_TARGET_HEVC :
2075 VDEC_INPUT_TARGET_VLD);
2076 if (vdec_single(vdec))
2077 vdec_enable_DMC(vdec);
2078 p->cma_dev = vdec_core->cma_dev;
2079 p->get_canvas = get_canvas;
2080 p->get_canvas_ex = get_canvas_ex;
2081 p->free_canvas_ex = free_canvas_ex;
2082 p->vdec_fps_detec = vdec_fps_detec;
2083 atomic_set(&p->inirq_flag, 0);
2084 atomic_set(&p->inirq_thread_flag, 0);
2085 /* todo */
2086 if (!vdec_dual(vdec))
2087 p->use_vfm_path = vdec_stream_based(vdec);
2088 /* vdec_dev_reg.flag = 0; */
2089 if (vdec->id >= 0)
2090 id = vdec->id;
2091 p->parallel_dec = parallel_decode;
2092 vdec_core->parallel_dec = parallel_decode;
2093 vdec->canvas_mode = CANVAS_BLKMODE_32X32;
2094#ifdef FRAME_CHECK
2095 vdec_frame_check_init(vdec);
2096#endif
2097 p->dev = platform_device_register_data(
2098 &vdec_core->vdec_core_platform_device->dev,
2099 dev_name,
2100 id,
2101 &p, sizeof(struct vdec_s *));
2102
2103 if (IS_ERR(p->dev)) {
2104 r = PTR_ERR(p->dev);
2105 pr_err("vdec: Decoder device %s register failed (%d)\n",
2106 dev_name, r);
2107
2108 mutex_lock(&vdec_mutex);
2109 inited_vcodec_num--;
2110 mutex_unlock(&vdec_mutex);
2111
2112 goto error;
2113 } else if (!p->dev->dev.driver) {
2114 pr_info("vdec: Decoder device %s driver probe failed.\n",
2115 dev_name);
2116 r = -ENODEV;
2117
2118 goto error;
2119 }
2120
2121 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
2122 r = -ENODEV;
2123 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
2124
2125 mutex_lock(&vdec_mutex);
2126 inited_vcodec_num--;
2127 mutex_unlock(&vdec_mutex);
2128
2129 goto error;
2130 }
2131
2132 if (p->use_vfm_path) {
2133 vdec->vf_receiver_inst = -1;
2134 vdec->vfm_map_id[0] = 0;
2135 } else if (!vdec_dual(vdec)) {
2136 /* create IONVIDEO instance and connect decoder's
2137 * vf_provider interface to it
2138 */
2139 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
2140 r = -ENODEV;
2141 pr_err("vdec: Incorrect decoder type\n");
2142
2143 mutex_lock(&vdec_mutex);
2144 inited_vcodec_num--;
2145 mutex_unlock(&vdec_mutex);
2146
2147 goto error;
2148 }
2149 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
2150#if 1
2151 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2152 &vdec->vf_receiver_inst);
2153#else
2154 /*
2155 * temporarily just use decoder instance ID as iondriver ID
2156 * to solve OMX iondriver instance number check time sequence
2157 * only the limitation is we can NOT mix different video
2158 * decoders since same ID will be used for different decoder
2159 * formats.
2160 */
2161 vdec->vf_receiver_inst = p->dev->id;
2162 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2163 &vdec->vf_receiver_inst);
2164#endif
2165 if (r < 0) {
2166 pr_err("IonVideo frame receiver allocation failed.\n");
2167
2168 mutex_lock(&vdec_mutex);
2169 inited_vcodec_num--;
2170 mutex_unlock(&vdec_mutex);
2171
2172 goto error;
2173 }
2174
2175 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2176 "%s %s", vdec->vf_provider_name,
2177 vdec->vf_receiver_name);
2178 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2179 "vdec-map-%d", vdec->id);
2180 } else if (p->frame_base_video_path ==
2181 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
2182 if (vdec_secure(vdec)) {
2183 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2184 "%s %s", vdec->vf_provider_name,
2185 "amlvideo amvideo");
2186 } else {
2187 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2188 "%s %s", vdec->vf_provider_name,
2189 "amlvideo ppmgr deinterlace amvideo");
2190 }
2191 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2192 "vdec-map-%d", vdec->id);
2193 } else if (p->frame_base_video_path ==
2194 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
2195 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2196 "%s %s", vdec->vf_provider_name,
2197 "aml_video.1 videosync.0 videopip");
2198 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2199 "vdec-map-%d", vdec->id);
2200 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_OSD) {
2201 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2202 "%s %s", vdec->vf_provider_name,
2203 vdec->vf_receiver_name);
2204 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2205 "vdec-map-%d", vdec->id);
2206 } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) {
2207 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2208 "%s %s", vdec->vf_provider_name,
2209 "amvideo");
2210 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2211 "vdec-map-%d", vdec->id);
2212 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
2213 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2214 "%s %s %s", vdec->vf_provider_name,
2215 vdec->vf_receiver_name, "amvideo");
2216 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2217 "vdec-map-%d", vdec->id);
2218 } else if (p->frame_base_video_path ==
2219 FRAME_BASE_PATH_DI_V4LVIDEO) {
2220 r = v4lvideo_assign_map(&vdec->vf_receiver_name,
2221 &vdec->vf_receiver_inst);
2222 if (r < 0) {
2223 pr_err("V4lVideo frame receiver allocation failed.\n");
2224 mutex_lock(&vdec_mutex);
2225 inited_vcodec_num--;
2226 mutex_unlock(&vdec_mutex);
2227 goto error;
2228 }
2229 if (!v4lvideo_add_di || vdec_secure(vdec))
2230 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2231 "%s %s", vdec->vf_provider_name,
2232 vdec->vf_receiver_name);
2233 else {
2234 if (vdec->vf_receiver_inst == 0)
2235 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2236 "%s %s %s", vdec->vf_provider_name,
2237 "deinterlace",
2238 vdec->vf_receiver_name);
2239 else if (vdec->vf_receiver_inst < max_di_instance)
2240 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2241 "%s %s%d %s", vdec->vf_provider_name,
2242 "dimulti.",
2243 vdec->vf_receiver_inst,
2244 vdec->vf_receiver_name);
2245 else
2246 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2247 "%s %s", vdec->vf_provider_name,
2248 vdec->vf_receiver_name);
2249 }
2250 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2251 "vdec-map-%d", vdec->id);
2252 }
2253
2254 if (vfm_map_add(vdec->vfm_map_id,
2255 vdec->vfm_map_chain) < 0) {
2256 r = -ENOMEM;
2257 pr_err("Decoder pipeline map creation failed %s.\n",
2258 vdec->vfm_map_id);
2259 vdec->vfm_map_id[0] = 0;
2260
2261 mutex_lock(&vdec_mutex);
2262 inited_vcodec_num--;
2263 mutex_unlock(&vdec_mutex);
2264
2265 goto error;
2266 }
2267
2268 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
2269
2270 /*
2271 *assume IONVIDEO driver already have a few vframe_receiver
2272 * registered.
2273 * 1. Call iondriver function to allocate a IONVIDEO path and
2274 * provide receiver's name and receiver op.
2275 * 2. Get decoder driver's provider name from driver instance
2276 * 3. vfm_map_add(name, "<decoder provider name>
2277 * <iondriver receiver name>"), e.g.
2278 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
2279 * 4. vf_reg_provider and vf_reg_receiver
2280 * Note: the decoder provider's op uses vdec as op_arg
2281 * the iondriver receiver's op uses iondev device as
2282 * op_arg
2283 */
2284
2285 }
2286
2287 if (!vdec_single(vdec)) {
2288 vf_reg_provider(&p->vframe_provider);
2289
2290 vf_notify_receiver(p->vf_provider_name,
2291 VFRAME_EVENT_PROVIDER_START,
2292 vdec);
2293
2294 if (vdec_core->hint_fr_vdec == NULL)
2295 vdec_core->hint_fr_vdec = vdec;
2296
2297 if (vdec_core->hint_fr_vdec == vdec) {
2298 if (p->sys_info->rate != 0) {
2299 if (!vdec->is_reset) {
2300 vf_notify_receiver(p->vf_provider_name,
2301 VFRAME_EVENT_PROVIDER_FR_HINT,
2302 (void *)
2303 ((unsigned long)
2304 p->sys_info->rate));
2305 vdec->fr_hint_state = VDEC_HINTED;
2306 }
2307 } else {
2308 vdec->fr_hint_state = VDEC_NEED_HINT;
2309 }
2310 }
2311 }
2312
2313 p->dolby_meta_with_el = 0;
2314 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
2315 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
2316 &vdec->input,
2317 vdec->sys_info->width,
2318 vdec->sys_info->height);
2319 /* vdec is now ready to be active */
2320 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2321 if (p->use_vfm_path) {
2322 frame_info_buf_in = (struct vframe_qos_s *)
2323 kmalloc(QOS_FRAME_NUM*sizeof(struct vframe_qos_s), GFP_KERNEL);
2324 if (!frame_info_buf_in)
2325 pr_err("kmalloc: frame_info_buf_in failed\n");
2326 else
2327 memset(frame_info_buf_in, 0,
2328 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
2329
2330 frame_info_buf_out = (struct vframe_qos_s *)
2331 kmalloc(QOS_FRAME_NUM*sizeof(struct vframe_qos_s), GFP_KERNEL);
2332 if (!frame_info_buf_out)
2333 pr_err("kmalloc: frame_info_buf_out failed\n");
2334 else
2335 memset(frame_info_buf_out, 0,
2336 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
2337 frame_qos_wr = 0;
2338 frame_qos_rd = 0;
2339 }
2340 return 0;
2341
2342error:
2343 return r;
2344}
2345EXPORT_SYMBOL(vdec_init);
2346
2347/* vdec_create/init/release/destroy are applied to both dual running decoders
2348 */
2349void vdec_release(struct vdec_s *vdec)
2350{
2351 //trace_vdec_release(vdec);/*DEBUG_TMP*/
2352#ifdef VDEC_DEBUG_SUPPORT
2353 if (step_mode) {
2354 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
2355 while (step_mode)
2356 udelay(10);
2357 pr_info("VDEC_DEBUG: step_mode is clear\n");
2358 }
2359#endif
2360 vdec_disconnect(vdec);
2361
2362 if (vdec->vframe_provider.name) {
2363 if (!vdec_single(vdec)) {
2364 if (vdec_core->hint_fr_vdec == vdec
2365 && vdec->fr_hint_state == VDEC_HINTED)
2366 vf_notify_receiver(
2367 vdec->vf_provider_name,
2368 VFRAME_EVENT_PROVIDER_FR_END_HINT,
2369 NULL);
2370 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
2371 }
2372 vf_unreg_provider(&vdec->vframe_provider);
2373 }
2374
2375 if (vdec_core->vfm_vdec == vdec)
2376 vdec_core->vfm_vdec = NULL;
2377
2378 if (vdec_core->hint_fr_vdec == vdec)
2379 vdec_core->hint_fr_vdec = NULL;
2380
2381 if (vdec->vf_receiver_inst >= 0) {
2382 if (vdec->vfm_map_id[0]) {
2383 vfm_map_remove(vdec->vfm_map_id);
2384 vdec->vfm_map_id[0] = 0;
2385 }
2386 }
2387
2388 while ((atomic_read(&vdec->inirq_flag) > 0)
2389 || (atomic_read(&vdec->inirq_thread_flag) > 0))
2390 schedule();
2391
2392#ifdef FRAME_CHECK
2393 vdec_frame_check_exit(vdec);
2394#endif
2395 vdec_fps_clear(vdec->id);
2396 if (atomic_read(&vdec_core->vdec_nr) == 1)
2397 vdec_disable_DMC(vdec);
2398 platform_device_unregister(vdec->dev);
2399 pr_debug("vdec_release instance %p, total %d\n", vdec,
2400 atomic_read(&vdec_core->vdec_nr));
2401 if (vdec->use_vfm_path) {
2402 kfree(frame_info_buf_in);
2403 frame_info_buf_in = NULL;
2404 kfree(frame_info_buf_out);
2405 frame_info_buf_out = NULL;
2406 frame_qos_wr = 0;
2407 frame_qos_rd = 0;
2408 }
2409 vdec_destroy(vdec);
2410
2411 mutex_lock(&vdec_mutex);
2412 inited_vcodec_num--;
2413 mutex_unlock(&vdec_mutex);
2414
2415}
2416EXPORT_SYMBOL(vdec_release);
2417
2418/* For dual running decoders, vdec_reset is only called with master vdec.
2419 */
2420int vdec_reset(struct vdec_s *vdec)
2421{
2422 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2423
2424 vdec_disconnect(vdec);
2425
2426 if (vdec->vframe_provider.name)
2427 vf_unreg_provider(&vdec->vframe_provider);
2428
2429 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2430 vf_unreg_provider(&vdec->slave->vframe_provider);
2431
2432 if (vdec->reset) {
2433 vdec->reset(vdec);
2434 if (vdec->slave)
2435 vdec->slave->reset(vdec->slave);
2436 }
2437 vdec->mc_loaded = 0;/*clear for reload firmware*/
2438 vdec_input_release(&vdec->input);
2439
2440 vdec_input_init(&vdec->input, vdec);
2441
2442 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2443 vdec->sys_info->height);
2444
2445 vf_reg_provider(&vdec->vframe_provider);
2446 vf_notify_receiver(vdec->vf_provider_name,
2447 VFRAME_EVENT_PROVIDER_START, vdec);
2448
2449 if (vdec->slave) {
2450 vf_reg_provider(&vdec->slave->vframe_provider);
2451 vf_notify_receiver(vdec->slave->vf_provider_name,
2452 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2453 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2454 }
2455
2456 vdec_connect(vdec);
2457
2458 return 0;
2459}
2460EXPORT_SYMBOL(vdec_reset);
2461
2462void vdec_free_cmabuf(void)
2463{
2464 mutex_lock(&vdec_mutex);
2465
2466 /*if (inited_vcodec_num > 0) {
2467 mutex_unlock(&vdec_mutex);
2468 return;
2469 }*/
2470 mutex_unlock(&vdec_mutex);
2471}
2472
2473void vdec_core_request(struct vdec_s *vdec, unsigned long mask)
2474{
2475 vdec->core_mask |= mask;
2476
2477 if (vdec->slave)
2478 vdec->slave->core_mask |= mask;
2479 if (vdec_core->parallel_dec == 1) {
2480 if (mask & CORE_MASK_COMBINE)
2481 vdec_core->vdec_combine_flag++;
2482 }
2483
2484}
2485EXPORT_SYMBOL(vdec_core_request);
2486
2487int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
2488{
2489 vdec->core_mask &= ~mask;
2490
2491 if (vdec->slave)
2492 vdec->slave->core_mask &= ~mask;
2493 if (vdec_core->parallel_dec == 1) {
2494 if (mask & CORE_MASK_COMBINE)
2495 vdec_core->vdec_combine_flag--;
2496 }
2497 return 0;
2498}
2499EXPORT_SYMBOL(vdec_core_release);
2500
2501bool vdec_core_with_input(unsigned long mask)
2502{
2503 enum vdec_type_e type;
2504
2505 for (type = VDEC_1; type < VDEC_MAX; type++) {
2506 if ((mask & (1 << type)) && cores_with_input[type])
2507 return true;
2508 }
2509
2510 return false;
2511}
2512
2513void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
2514{
2515 unsigned long i;
2516 unsigned long t = mask;
2517 mutex_lock(&vdec_mutex);
2518 while (t) {
2519 i = __ffs(t);
2520 clear_bit(i, &vdec->active_mask);
2521 t &= ~(1 << i);
2522 }
2523
2524 if (vdec->active_mask == 0)
2525 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
2526
2527 mutex_unlock(&vdec_mutex);
2528}
2529EXPORT_SYMBOL(vdec_core_finish_run);
2530/*
2531 * find what core resources are available for vdec
2532 */
2533static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
2534 unsigned long active_mask)
2535{
2536 unsigned long mask = vdec->core_mask &
2537 ~CORE_MASK_COMBINE;
2538
2539 if (vdec->core_mask & CORE_MASK_COMBINE) {
2540 /* combined cores must be granted together */
2541 if ((mask & ~active_mask) == mask)
2542 return mask;
2543 else
2544 return 0;
2545 } else
2546 return mask & ~vdec->sched_mask & ~active_mask;
2547}
2548
2549/*
2550 *Decoder callback
2551 * Each decoder instance uses this callback to notify status change, e.g. when
2552 * decoder finished using HW resource.
2553 * a sample callback from decoder's driver is following:
2554 *
2555 * if (hw->vdec_cb) {
2556 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2557 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
2558 * }
2559 */
2560static void vdec_callback(struct vdec_s *vdec, void *data)
2561{
2562 struct vdec_core_s *core = (struct vdec_core_s *)data;
2563
2564#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2565 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
2566#endif
2567
2568 up(&core->sem);
2569}
2570
2571static irqreturn_t vdec_isr(int irq, void *dev_id)
2572{
2573 struct vdec_isr_context_s *c =
2574 (struct vdec_isr_context_s *)dev_id;
2575 struct vdec_s *vdec = vdec_core->last_vdec;
2576 irqreturn_t ret = IRQ_HANDLED;
2577
2578 if (vdec_core->parallel_dec == 1) {
2579 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2580 vdec = vdec_core->active_hevc;
2581 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2582 vdec = vdec_core->active_vdec;
2583 else
2584 vdec = NULL;
2585 }
2586
2587 if (vdec) {
2588 atomic_set(&vdec->inirq_flag, 1);
2589 vdec->isr_ns = local_clock();
2590 }
2591 if (c->dev_isr) {
2592 ret = c->dev_isr(irq, c->dev_id);
2593 goto isr_done;
2594 }
2595
2596 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
2597 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
2598 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
2599#if 0
2600 pr_warn("vdec interrupt w/o a valid receiver\n");
2601#endif
2602 goto isr_done;
2603 }
2604
2605 if (!vdec) {
2606#if 0
2607 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
2608 core);
2609#endif
2610 goto isr_done;
2611 }
2612
2613 if (!vdec->irq_handler) {
2614#if 0
2615 pr_warn("vdec instance has no irq handle.\n");
2616#endif
2617 goto isr_done;
2618 }
2619
2620 ret = vdec->irq_handler(vdec, c->index);
2621isr_done:
2622 if (vdec)
2623 atomic_set(&vdec->inirq_flag, 0);
2624 return ret;
2625}
2626
2627static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
2628{
2629 struct vdec_isr_context_s *c =
2630 (struct vdec_isr_context_s *)dev_id;
2631 struct vdec_s *vdec = vdec_core->last_vdec;
2632 irqreturn_t ret = IRQ_HANDLED;
2633
2634 if (vdec_core->parallel_dec == 1) {
2635 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2636 vdec = vdec_core->active_hevc;
2637 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2638 vdec = vdec_core->active_vdec;
2639 else
2640 vdec = NULL;
2641 }
2642
2643 if (vdec) {
2644 u32 isr2tfn = 0;
2645 atomic_set(&vdec->inirq_thread_flag, 1);
2646 vdec->tfn_ns = local_clock();
2647 isr2tfn = vdec->tfn_ns - vdec->isr_ns;
2648 if (isr2tfn > 10000000)
2649 pr_err("!!!!!!! %s vdec_isr to %s took %uns !!!\n",
2650 vdec->vf_provider_name, __func__, isr2tfn);
2651 }
2652 if (c->dev_threaded_isr) {
2653 ret = c->dev_threaded_isr(irq, c->dev_id);
2654 goto thread_isr_done;
2655 }
2656 if (!vdec)
2657 goto thread_isr_done;
2658
2659 if (!vdec->threaded_irq_handler)
2660 goto thread_isr_done;
2661 ret = vdec->threaded_irq_handler(vdec, c->index);
2662thread_isr_done:
2663 if (vdec)
2664 atomic_set(&vdec->inirq_thread_flag, 0);
2665 return ret;
2666}
2667
2668unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
2669{
2670 unsigned long ready_mask;
2671 struct vdec_input_s *input = &vdec->input;
2672 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2673 (vdec->status != VDEC_STATUS_ACTIVE))
2674 return false;
2675
2676 if (!vdec->run_ready)
2677 return false;
2678
2679 /* when crc32 error, block at error frame */
2680 if (vdec->vfc.err_crc_block)
2681 return false;
2682
2683 if ((vdec->slave || vdec->master) &&
2684 (vdec->sched == 0))
2685 return false;
2686#ifdef VDEC_DEBUG_SUPPORT
2687 inc_profi_count(mask, vdec->check_count);
2688#endif
2689 if (vdec_core_with_input(mask)) {
2690 /* check frame based input underrun */
2691 if (input && !input->eos && input_frame_based(input)
2692 && (!vdec_input_next_chunk(input))) {
2693#ifdef VDEC_DEBUG_SUPPORT
2694 inc_profi_count(mask, vdec->input_underrun_count);
2695#endif
2696 return false;
2697 }
2698 /* check streaming prepare level threshold if not EOS */
2699 if (input && input_stream_based(input) && !input->eos) {
2700 u32 rp, wp, level;
2701
2702 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
2703 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
2704 if (wp < rp)
2705 level = input->size + wp - rp;
2706 else
2707 level = wp - rp;
2708
2709 if ((level < input->prepare_level) &&
2710 (pts_get_rec_num(PTS_TYPE_VIDEO,
2711 vdec->input.total_rd_count) < 2)) {
2712 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2713#ifdef VDEC_DEBUG_SUPPORT
2714 inc_profi_count(mask, vdec->input_underrun_count);
2715 if (step_mode & 0x200) {
2716 if ((step_mode & 0xff) == vdec->id) {
2717 step_mode |= 0xff;
2718 return mask;
2719 }
2720 }
2721#endif
2722 return false;
2723 } else if (level > input->prepare_level)
2724 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
2725 }
2726 }
2727
2728 if (step_mode) {
2729 if ((step_mode & 0xff) != vdec->id)
2730 return 0;
2731 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
2732 }
2733
2734 /*step_mode &= ~0xff; not work for id of 0, removed*/
2735
2736#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2737 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
2738#endif
2739
2740 ready_mask = vdec->run_ready(vdec, mask) & mask;
2741#ifdef VDEC_DEBUG_SUPPORT
2742 if (ready_mask != mask)
2743 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
2744#endif
2745#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2746 if (ready_mask)
2747 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
2748#endif
2749
2750 return ready_mask;
2751}
2752
2753/* bridge on/off vdec's interrupt processing to vdec core */
2754static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
2755 bool enable)
2756{
2757 enum vdec_type_e type;
2758
2759 for (type = VDEC_1; type < VDEC_MAX; type++) {
2760 if (mask & (1 << type)) {
2761 struct vdec_isr_context_s *c =
2762 &vdec_core->isr_context[cores_int[type]];
2763 if (enable)
2764 c->vdec = vdec;
2765 else if (c->vdec == vdec)
2766 c->vdec = NULL;
2767 }
2768 }
2769}
2770
2771/*
2772 * Set up secure protection for each decoder instance running.
2773 * Note: The operation from REE side only resets memory access
2774 * to a default policy and even a non_secure type will still be
2775 * changed to secure type automatically when secure source is
2776 * detected inside TEE.
2777 * Perform need_more_data checking and set flag is decoder
2778 * is not consuming data.
2779 */
2780void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
2781{
2782 struct vdec_input_s *input = &vdec->input;
2783 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
2784 DMC_DEV_TYPE_NON_SECURE;
2785
2786 vdec_route_interrupt(vdec, mask, true);
2787
2788 if (!vdec_core_with_input(mask))
2789 return;
2790
2791 if (secure && vdec_stream_based(vdec) && force_nosecure_even_drm)
2792 {
2793 /* Verimatrix ultra webclient (HLS) was played in drmmode and used hw demux. In drmmode VDEC only can access secure.
2794 Now HW demux parsed es data to no-secure buffer. So the VDEC input was no-secure, VDEC playback failed. Forcing
2795 use nosecure for verimatrix webclient HLS. If in the future HW demux can parse es data to secure buffer, make
2796 VDEC r/w secure.*/
2797 secure = 0;
2798 //pr_debug("allow VDEC can access nosecure even in drmmode\n");
2799 }
2800 if (input->target == VDEC_INPUT_TARGET_VLD)
2801 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
2802 else if (input->target == VDEC_INPUT_TARGET_HEVC)
2803 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
2804
2805 if (vdec_stream_based(vdec) &&
2806 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
2807 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
2808 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2809 }
2810
2811 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
2812 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
2813}
2814
2815
2816/* struct vdec_core_shread manages all decoder instance in active list. When
2817 * a vdec is added into the active list, it can onlt be in two status:
2818 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
2819 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
2820 * Removing a decoder from active list is only performed within core thread.
2821 * Adding a decoder into active list is performed from user thread.
2822 */
2823static int vdec_core_thread(void *data)
2824{
2825 struct vdec_core_s *core = (struct vdec_core_s *)data;
2826 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
2827 unsigned long flags;
2828 int i;
2829
2830 sched_setscheduler(current, SCHED_FIFO, &param);
2831
2832 allow_signal(SIGTERM);
2833
2834 while (down_interruptible(&core->sem) == 0) {
2835 struct vdec_s *vdec, *tmp, *worker;
2836 unsigned long sched_mask = 0;
2837 LIST_HEAD(disconnecting_list);
2838
2839 if (kthread_should_stop())
2840 break;
2841 mutex_lock(&vdec_mutex);
2842
2843 if (core->parallel_dec == 1) {
2844 for (i = VDEC_1; i < VDEC_MAX; i++) {
2845 core->power_ref_mask =
2846 core->power_ref_count[i] > 0 ?
2847 (core->power_ref_mask | (1 << i)) :
2848 (core->power_ref_mask & ~(1 << i));
2849 }
2850 }
2851 /* clean up previous active vdec's input */
2852 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2853 unsigned long mask = vdec->sched_mask &
2854 (vdec->active_mask ^ vdec->sched_mask);
2855
2856 vdec_route_interrupt(vdec, mask, false);
2857
2858#ifdef VDEC_DEBUG_SUPPORT
2859 update_profi_clk_stop(vdec, mask, get_current_clk());
2860#endif
2861 /*
2862 * If decoder released some core resources (mask), then
2863 * check if these core resources are associated
2864 * with any input side and do input clean up accordingly
2865 */
2866 if (vdec_core_with_input(mask)) {
2867 struct vdec_input_s *input = &vdec->input;
2868 while (!list_empty(
2869 &input->vframe_chunk_list)) {
2870 struct vframe_chunk_s *chunk =
2871 vdec_input_next_chunk(input);
2872 if (chunk && (chunk->flag &
2873 VFRAME_CHUNK_FLAG_CONSUMED))
2874 vdec_input_release_chunk(input,
2875 chunk);
2876 else
2877 break;
2878 }
2879
2880 vdec_save_input_context(vdec);
2881 }
2882
2883 vdec->sched_mask &= ~mask;
2884 core->sched_mask &= ~mask;
2885 }
2886 vdec_update_buff_status();
2887 /*
2888 *todo:
2889 * this is the case when the decoder is in active mode and
2890 * the system side wants to stop it. Currently we rely on
2891 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2892 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2893 * reason the decoder can not exist by itself (dead decoding
2894 * or whatever), then we may have to add another vdec API
2895 * to kill the vdec and release its HW resource and make it
2896 * become inactive again.
2897 * if ((core->active_vdec) &&
2898 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2899 * }
2900 */
2901
2902 /* check disconnected decoders */
2903 flags = vdec_core_lock(vdec_core);
2904 list_for_each_entry_safe(vdec, tmp,
2905 &core->connected_vdec_list, list) {
2906 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2907 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
2908 if (core->parallel_dec == 1) {
2909 if (vdec_core->active_hevc == vdec)
2910 vdec_core->active_hevc = NULL;
2911 if (vdec_core->active_vdec == vdec)
2912 vdec_core->active_vdec = NULL;
2913 }
2914 if (core->last_vdec == vdec)
2915 core->last_vdec = NULL;
2916 list_move(&vdec->list, &disconnecting_list);
2917 }
2918 }
2919 vdec_core_unlock(vdec_core, flags);
2920 mutex_unlock(&vdec_mutex);
2921 /* elect next vdec to be scheduled */
2922 vdec = core->last_vdec;
2923 if (vdec) {
2924 vdec = list_entry(vdec->list.next, struct vdec_s, list);
2925 list_for_each_entry_from(vdec,
2926 &core->connected_vdec_list, list) {
2927 sched_mask = vdec_schedule_mask(vdec,
2928 core->sched_mask);
2929 if (!sched_mask)
2930 continue;
2931 sched_mask = vdec_ready_to_run(vdec,
2932 sched_mask);
2933 if (sched_mask)
2934 break;
2935 }
2936
2937 if (&vdec->list == &core->connected_vdec_list)
2938 vdec = NULL;
2939 }
2940
2941 if (!vdec) {
2942 /* search from beginning */
2943 list_for_each_entry(vdec,
2944 &core->connected_vdec_list, list) {
2945 sched_mask = vdec_schedule_mask(vdec,
2946 core->sched_mask);
2947 if (vdec == core->last_vdec) {
2948 if (!sched_mask) {
2949 vdec = NULL;
2950 break;
2951 }
2952
2953 sched_mask = vdec_ready_to_run(vdec,
2954 sched_mask);
2955
2956 if (!sched_mask) {
2957 vdec = NULL;
2958 break;
2959 }
2960 break;
2961 }
2962
2963 if (!sched_mask)
2964 continue;
2965
2966 sched_mask = vdec_ready_to_run(vdec,
2967 sched_mask);
2968 if (sched_mask)
2969 break;
2970 }
2971
2972 if (&vdec->list == &core->connected_vdec_list)
2973 vdec = NULL;
2974 }
2975
2976 worker = vdec;
2977
2978 if (vdec) {
2979 unsigned long mask = sched_mask;
2980 unsigned long i;
2981
2982 /* setting active_mask should be atomic.
2983 * it can be modified by decoder driver callbacks.
2984 */
2985 while (sched_mask) {
2986 i = __ffs(sched_mask);
2987 set_bit(i, &vdec->active_mask);
2988 sched_mask &= ~(1 << i);
2989 }
2990
2991 /* vdec's sched_mask is only set from core thread */
2992 vdec->sched_mask |= mask;
2993 if (core->last_vdec) {
2994 if ((core->last_vdec != vdec) &&
2995 (core->last_vdec->mc_type != vdec->mc_type))
2996 vdec->mc_loaded = 0;/*clear for reload firmware*/
2997 } else
2998 vdec->mc_loaded = 0;
2999 core->last_vdec = vdec;
3000 if (debug & 2)
3001 vdec->mc_loaded = 0;/*alway reload firmware*/
3002 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
3003
3004 core->sched_mask |= mask;
3005 if (core->parallel_dec == 1)
3006 vdec_save_active_hw(vdec);
3007#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
3008 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
3009#endif
3010 vdec_prepare_run(vdec, mask);
3011#ifdef VDEC_DEBUG_SUPPORT
3012 inc_profi_count(mask, vdec->run_count);
3013 update_profi_clk_run(vdec, mask, get_current_clk());
3014#endif
3015 vdec->run(vdec, mask, vdec_callback, core);
3016
3017
3018 /* we have some cores scheduled, keep working until
3019 * all vdecs are checked with no cores to schedule
3020 */
3021 if (core->parallel_dec == 1) {
3022 if (vdec_core->vdec_combine_flag == 0)
3023 up(&core->sem);
3024 } else
3025 up(&core->sem);
3026 }
3027
3028 /* remove disconnected decoder from active list */
3029 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
3030 list_del(&vdec->list);
3031 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
3032 /*core->last_vdec = NULL;*/
3033 complete(&vdec->inactive_done);
3034 }
3035
3036 /* if there is no new work scheduled and nothing
3037 * is running, sleep 20ms
3038 */
3039 if (core->parallel_dec == 1) {
3040 if (vdec_core->vdec_combine_flag == 0) {
3041 if ((!worker) &&
3042 ((core->sched_mask != core->power_ref_mask)) &&
3043 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3044 ((core->buff_flag | core->stream_buff_flag) &
3045 (core->sched_mask ^ core->power_ref_mask))) {
3046 usleep_range(1000, 2000);
3047 up(&core->sem);
3048 }
3049 } else {
3050 if ((!worker) && (!core->sched_mask) &&
3051 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3052 (core->buff_flag | core->stream_buff_flag)) {
3053 usleep_range(1000, 2000);
3054 up(&core->sem);
3055 }
3056 }
3057 } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
3058 usleep_range(1000, 2000);
3059 up(&core->sem);
3060 }
3061
3062 }
3063
3064 return 0;
3065}
3066
3067#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
3068static bool test_hevc(u32 decomp_addr, u32 us_delay)
3069{
3070 int i;
3071
3072 /* SW_RESET IPP */
3073 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
3074 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
3075
3076 /* initialize all canvas table */
3077 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
3078 for (i = 0; i < 32; i++)
3079 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
3080 0x1 | (i << 8) | decomp_addr);
3081 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
3082 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
3083 for (i = 0; i < 32; i++)
3084 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
3085
3086 /* Initialize mcrcc */
3087 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
3088 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
3089 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
3090 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
3091
3092 /* Decomp initialize */
3093 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
3094 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
3095
3096 /* Frame level initialization */
3097 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
3098 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
3099 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
3100 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
3101
3102 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
3103 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
3104
3105 /* Enable SWIMP mode */
3106 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
3107
3108 /* Enable frame */
3109 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
3110 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
3111
3112 /* Send SW-command CTB info */
3113 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
3114
3115 /* Send PU_command */
3116 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
3117 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
3118 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
3119 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
3120
3121 udelay(us_delay);
3122
3123 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
3124
3125 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
3126}
3127
3128void vdec_power_reset(void)
3129{
3130 /* enable vdec1 isolation */
3131 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3132 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
3133 /* power off vdec1 memories */
3134 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3135 /* vdec1 power off */
3136 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3137 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
3138
3139 if (has_vdec2()) {
3140 /* enable vdec2 isolation */
3141 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3142 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
3143 /* power off vdec2 memories */
3144 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3145 /* vdec2 power off */
3146 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3147 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
3148 }
3149
3150 if (has_hdec()) {
3151 /* enable hcodec isolation */
3152 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3153 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
3154 /* power off hcodec memories */
3155 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3156 /* hcodec power off */
3157 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3158 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
3159 }
3160
3161 if (has_hevc_vdec()) {
3162 /* enable hevc isolation */
3163 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3164 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
3165 /* power off hevc memories */
3166 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3167 /* hevc power off */
3168 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3169 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
3170 }
3171}
3172EXPORT_SYMBOL(vdec_power_reset);
3173
3174void vdec_poweron(enum vdec_type_e core)
3175{
3176 void *decomp_addr = NULL;
3177 dma_addr_t decomp_dma_addr;
3178 u32 decomp_addr_aligned = 0;
3179 int hevc_loop = 0;
3180 int sleep_val, iso_val;
3181 bool is_power_ctrl_ver2 = false;
3182
3183 if (core >= VDEC_MAX)
3184 return;
3185
3186 mutex_lock(&vdec_mutex);
3187
3188 vdec_core->power_ref_count[core]++;
3189 if (vdec_core->power_ref_count[core] > 1) {
3190 mutex_unlock(&vdec_mutex);
3191 return;
3192 }
3193
3194 if (vdec_on(core)) {
3195 mutex_unlock(&vdec_mutex);
3196 return;
3197 }
3198
3199 is_power_ctrl_ver2 =
3200 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3201 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3202
3203 if (hevc_workaround_needed() &&
3204 (core == VDEC_HEVC)) {
3205 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
3206 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
3207
3208 if (decomp_addr) {
3209 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
3210 memset((u8 *)decomp_addr +
3211 (decomp_addr_aligned - decomp_dma_addr),
3212 0xff, SZ_4K);
3213 } else
3214 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
3215 }
3216
3217 if (core == VDEC_1) {
3218 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3219 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3220
3221 /* vdec1 power on */
3222#ifdef CONFIG_AMLOGIC_POWER
3223 if (is_support_power_ctrl()) {
3224 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3225 mutex_unlock(&vdec_mutex);
3226 pr_err("vdec-1 power on ctrl sleep fail.\n");
3227 return;
3228 }
3229 } else {
3230 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3231 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3232 }
3233#else
3234 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3235 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3236#endif
3237 /* wait 10uS */
3238 udelay(10);
3239 /* vdec1 soft reset */
3240 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3241 WRITE_VREG(DOS_SW_RESET0, 0);
3242 /* enable vdec1 clock */
3243 /*
3244 *add power on vdec clock level setting,only for m8 chip,
3245 * m8baby and m8m2 can dynamic adjust vdec clock,
3246 * power on with default clock level
3247 */
3248 amports_switch_gate("clk_vdec_mux", 1);
3249 vdec_clock_hi_enable();
3250 /* power up vdec memories */
3251 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
3252
3253 /* remove vdec1 isolation */
3254#ifdef CONFIG_AMLOGIC_POWER
3255 if (is_support_power_ctrl()) {
3256 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3257 mutex_unlock(&vdec_mutex);
3258 pr_err("vdec-1 power on ctrl iso fail.\n");
3259 return;
3260 }
3261 } else {
3262 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3263 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3264 }
3265#else
3266 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3267 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3268#endif
3269 /* reset DOS top registers */
3270 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3271 } else if (core == VDEC_2) {
3272 if (has_vdec2()) {
3273 /* vdec2 power on */
3274 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3275 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3276 ~0x30);
3277 /* wait 10uS */
3278 udelay(10);
3279 /* vdec2 soft reset */
3280 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3281 WRITE_VREG(DOS_SW_RESET2, 0);
3282 /* enable vdec1 clock */
3283 vdec2_clock_hi_enable();
3284 /* power up vdec memories */
3285 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
3286 /* remove vdec2 isolation */
3287 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3288 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
3289 ~0x300);
3290 /* reset DOS top registers */
3291 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3292 }
3293 } else if (core == VDEC_HCODEC) {
3294 if (has_hdec()) {
3295 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3296 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3297
3298 /* hcodec power on */
3299#ifdef CONFIG_AMLOGIC_POWER
3300 if (is_support_power_ctrl()) {
3301 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3302 mutex_unlock(&vdec_mutex);
3303 pr_err("hcodec power on ctrl sleep fail.\n");
3304 return;
3305 }
3306 } else {
3307 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3308 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3309 }
3310#else
3311 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3312 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3313#endif
3314 /* wait 10uS */
3315 udelay(10);
3316 /* hcodec soft reset */
3317 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3318 WRITE_VREG(DOS_SW_RESET1, 0);
3319 /* enable hcodec clock */
3320 hcodec_clock_enable();
3321 /* power up hcodec memories */
3322 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
3323 /* remove hcodec isolation */
3324#ifdef CONFIG_AMLOGIC_POWER
3325 if (is_support_power_ctrl()) {
3326 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3327 mutex_unlock(&vdec_mutex);
3328 pr_err("hcodec power on ctrl iso fail.\n");
3329 return;
3330 }
3331 } else {
3332 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3333 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3334 }
3335#else
3336 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3337 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3338#endif
3339 }
3340 } else if (core == VDEC_HEVC) {
3341 if (has_hevc_vdec()) {
3342 bool hevc_fixed = false;
3343
3344 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3345 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3346
3347 while (!hevc_fixed) {
3348 /* hevc power on */
3349#ifdef CONFIG_AMLOGIC_POWER
3350 if (is_support_power_ctrl()) {
3351 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3352 mutex_unlock(&vdec_mutex);
3353 pr_err("hevc power on ctrl sleep fail.\n");
3354 return;
3355 }
3356 } else {
3357 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3358 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3359 }
3360#else
3361 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3362 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3363#endif
3364 /* wait 10uS */
3365 udelay(10);
3366 /* hevc soft reset */
3367 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3368 WRITE_VREG(DOS_SW_RESET3, 0);
3369 /* enable hevc clock */
3370 amports_switch_gate("clk_hevc_mux", 1);
3371 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3372 amports_switch_gate("clk_hevcb_mux", 1);
3373 hevc_clock_hi_enable();
3374 hevc_back_clock_hi_enable();
3375 /* power up hevc memories */
3376 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
3377 /* remove hevc isolation */
3378#ifdef CONFIG_AMLOGIC_POWER
3379 if (is_support_power_ctrl()) {
3380 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3381 mutex_unlock(&vdec_mutex);
3382 pr_err("hevc power on ctrl iso fail.\n");
3383 return;
3384 }
3385 } else {
3386 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3387 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3388 }
3389#else
3390 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3391 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3392#endif
3393 if (!hevc_workaround_needed())
3394 break;
3395
3396 if (decomp_addr)
3397 hevc_fixed = test_hevc(
3398 decomp_addr_aligned, 20);
3399
3400 if (!hevc_fixed) {
3401 hevc_loop++;
3402
3403 mutex_unlock(&vdec_mutex);
3404
3405 if (hevc_loop >= HEVC_TEST_LIMIT) {
3406 pr_warn("hevc power sequence over limit\n");
3407 pr_warn("=====================================================\n");
3408 pr_warn(" This chip is identified to have HW failure.\n");
3409 pr_warn(" Please contact sqa-platform to replace the platform.\n");
3410 pr_warn("=====================================================\n");
3411
3412 panic("Force panic for chip detection !!!\n");
3413
3414 break;
3415 }
3416
3417 vdec_poweroff(VDEC_HEVC);
3418
3419 mdelay(10);
3420
3421 mutex_lock(&vdec_mutex);
3422 }
3423 }
3424
3425 if (hevc_loop > hevc_max_reset_count)
3426 hevc_max_reset_count = hevc_loop;
3427
3428 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3429 udelay(10);
3430 WRITE_VREG(DOS_SW_RESET3, 0);
3431 }
3432 }
3433
3434 if (decomp_addr)
3435 codec_mm_dma_free_coherent(MEM_NAME,
3436 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
3437
3438 mutex_unlock(&vdec_mutex);
3439}
3440EXPORT_SYMBOL(vdec_poweron);
3441
3442void vdec_poweroff(enum vdec_type_e core)
3443{
3444 int sleep_val, iso_val;
3445 bool is_power_ctrl_ver2 = false;
3446
3447 if (core >= VDEC_MAX)
3448 return;
3449
3450 mutex_lock(&vdec_mutex);
3451
3452 vdec_core->power_ref_count[core]--;
3453 if (vdec_core->power_ref_count[core] > 0) {
3454 mutex_unlock(&vdec_mutex);
3455 return;
3456 }
3457
3458 is_power_ctrl_ver2 =
3459 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3460 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3461
3462 if (core == VDEC_1) {
3463 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3464 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3465
3466 /* enable vdec1 isolation */
3467#ifdef CONFIG_AMLOGIC_POWER
3468 if (is_support_power_ctrl()) {
3469 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3470 mutex_unlock(&vdec_mutex);
3471 pr_err("vdec-1 power off ctrl iso fail.\n");
3472 return;
3473 }
3474 } else {
3475 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3476 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3477 }
3478#else
3479 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3480 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3481#endif
3482 /* power off vdec1 memories */
3483 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3484 /* disable vdec1 clock */
3485 vdec_clock_off();
3486 /* vdec1 power off */
3487#ifdef CONFIG_AMLOGIC_POWER
3488 if (is_support_power_ctrl()) {
3489 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3490 mutex_unlock(&vdec_mutex);
3491 pr_err("vdec-1 power off ctrl sleep fail.\n");
3492 return;
3493 }
3494 } else {
3495 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3496 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3497 }
3498#else
3499 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3500 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3501#endif
3502 } else if (core == VDEC_2) {
3503 if (has_vdec2()) {
3504 /* enable vdec2 isolation */
3505 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3506 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3507 0x300);
3508 /* power off vdec2 memories */
3509 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3510 /* disable vdec2 clock */
3511 vdec2_clock_off();
3512 /* vdec2 power off */
3513 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3514 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
3515 0x30);
3516 }
3517 } else if (core == VDEC_HCODEC) {
3518 if (has_hdec()) {
3519 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3520 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3521
3522 /* enable hcodec isolation */
3523#ifdef CONFIG_AMLOGIC_POWER
3524 if (is_support_power_ctrl()) {
3525 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3526 mutex_unlock(&vdec_mutex);
3527 pr_err("hcodec power off ctrl iso fail.\n");
3528 return;
3529 }
3530 } else {
3531 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3532 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3533 }
3534#else
3535 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3536 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3537#endif
3538 /* power off hcodec memories */
3539 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3540 /* disable hcodec clock */
3541 hcodec_clock_off();
3542 /* hcodec power off */
3543#ifdef CONFIG_AMLOGIC_POWER
3544 if (is_support_power_ctrl()) {
3545 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3546 mutex_unlock(&vdec_mutex);
3547 pr_err("hcodec power off ctrl sleep fail.\n");
3548 return;
3549 }
3550 } else {
3551 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3552 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3553 }
3554#else
3555 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3556 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3557#endif
3558 }
3559 } else if (core == VDEC_HEVC) {
3560 if (has_hevc_vdec()) {
3561 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3562 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3563
3564 if (no_powerdown == 0) {
3565 /* enable hevc isolation */
3566#ifdef CONFIG_AMLOGIC_POWER
3567 if (is_support_power_ctrl()) {
3568 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3569 mutex_unlock(&vdec_mutex);
3570 pr_err("hevc power off ctrl iso fail.\n");
3571 return;
3572 }
3573 } else {
3574 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3575 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3576 }
3577#else
3578 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3579 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3580#endif
3581 /* power off hevc memories */
3582 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3583
3584 /* disable hevc clock */
3585 hevc_clock_off();
3586 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3587 hevc_back_clock_off();
3588
3589 /* hevc power off */
3590#ifdef CONFIG_AMLOGIC_POWER
3591 if (is_support_power_ctrl()) {
3592 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3593 mutex_unlock(&vdec_mutex);
3594 pr_err("hevc power off ctrl sleep fail.\n");
3595 return;
3596 }
3597 } else {
3598 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3599 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3600 }
3601#else
3602 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3603 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3604#endif
3605 } else {
3606 pr_info("!!!!!!!!not power down\n");
3607 hevc_reset_core(NULL);
3608 no_powerdown = 0;
3609 }
3610 }
3611 }
3612 mutex_unlock(&vdec_mutex);
3613}
3614EXPORT_SYMBOL(vdec_poweroff);
3615
3616bool vdec_on(enum vdec_type_e core)
3617{
3618 bool ret = false;
3619
3620 if (core == VDEC_1) {
3621 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3622 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3623 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3624 ? 0x2 : 0xc)) == 0) &&
3625 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
3626 ret = true;
3627 } else if (core == VDEC_2) {
3628 if (has_vdec2()) {
3629 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
3630 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
3631 ret = true;
3632 }
3633 } else if (core == VDEC_HCODEC) {
3634 if (has_hdec()) {
3635 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3636 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3637 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3638 ? 0x1 : 0x3)) == 0) &&
3639 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
3640 ret = true;
3641 }
3642 } else if (core == VDEC_HEVC) {
3643 if (has_hevc_vdec()) {
3644 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3645 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3646 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3647 ? 0x4 : 0xc0)) == 0) &&
3648 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
3649 ret = true;
3650 }
3651 }
3652
3653 return ret;
3654}
3655EXPORT_SYMBOL(vdec_on);
3656
3657#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
3658void vdec_poweron(enum vdec_type_e core)
3659{
3660 ulong flags;
3661
3662 spin_lock_irqsave(&lock, flags);
3663
3664 if (core == VDEC_1) {
3665 /* vdec1 soft reset */
3666 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3667 WRITE_VREG(DOS_SW_RESET0, 0);
3668 /* enable vdec1 clock */
3669 vdec_clock_enable();
3670 /* reset DOS top registers */
3671 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3672 } else if (core == VDEC_2) {
3673 /* vdec2 soft reset */
3674 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3675 WRITE_VREG(DOS_SW_RESET2, 0);
3676 /* enable vdec2 clock */
3677 vdec2_clock_enable();
3678 /* reset DOS top registers */
3679 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3680 } else if (core == VDEC_HCODEC) {
3681 /* hcodec soft reset */
3682 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3683 WRITE_VREG(DOS_SW_RESET1, 0);
3684 /* enable hcodec clock */
3685 hcodec_clock_enable();
3686 }
3687
3688 spin_unlock_irqrestore(&lock, flags);
3689}
3690
3691void vdec_poweroff(enum vdec_type_e core)
3692{
3693 ulong flags;
3694
3695 spin_lock_irqsave(&lock, flags);
3696
3697 if (core == VDEC_1) {
3698 /* disable vdec1 clock */
3699 vdec_clock_off();
3700 } else if (core == VDEC_2) {
3701 /* disable vdec2 clock */
3702 vdec2_clock_off();
3703 } else if (core == VDEC_HCODEC) {
3704 /* disable hcodec clock */
3705 hcodec_clock_off();
3706 }
3707
3708 spin_unlock_irqrestore(&lock, flags);
3709}
3710
3711bool vdec_on(enum vdec_type_e core)
3712{
3713 bool ret = false;
3714
3715 if (core == VDEC_1) {
3716 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
3717 ret = true;
3718 } else if (core == VDEC_2) {
3719 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
3720 ret = true;
3721 } else if (core == VDEC_HCODEC) {
3722 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
3723 ret = true;
3724 }
3725
3726 return ret;
3727}
3728#endif
3729
3730int vdec_source_changed(int format, int width, int height, int fps)
3731{
3732 /* todo: add level routines for clock adjustment per chips */
3733 int ret = -1;
3734 static int on_setting;
3735
3736 if (on_setting > 0)
3737 return ret;/*on changing clk,ignore this change*/
3738
3739 if (vdec_source_get(VDEC_1) == width * height * fps)
3740 return ret;
3741
3742
3743 on_setting = 1;
3744 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3745 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
3746 width, height, fps, vdec_clk_get(VDEC_1));
3747 on_setting = 0;
3748 return ret;
3749
3750}
3751EXPORT_SYMBOL(vdec_source_changed);
3752
3753void vdec_reset_core(struct vdec_s *vdec)
3754{
3755 unsigned long flags;
3756 unsigned int mask = 0;
3757
3758 mask = 1 << 13; /*bit13: DOS VDEC interface*/
3759 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3760 mask = 1 << 21; /*bit21: DOS VDEC interface*/
3761
3762 spin_lock_irqsave(&vdec_spin_lock, flags);
3763 codec_dmcbus_write(DMC_REQ_CTRL,
3764 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3765 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3766
3767 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3768 & mask))
3769 ;
3770 /*
3771 * 2: assist
3772 * 3: vld_reset
3773 * 4: vld_part_reset
3774 * 5: vfifo reset
3775 * 6: iqidct
3776 * 7: mc
3777 * 8: dblk
3778 * 9: pic_dc
3779 * 10: psc
3780 * 11: mcpu
3781 * 12: ccpu
3782 * 13: ddr
3783 * 14: afifo
3784 */
3785 if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3786 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) {
3787 WRITE_VREG(DOS_SW_RESET0, (1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9));
3788 } else {
3789 WRITE_VREG(DOS_SW_RESET0,
3790 (1<<3)|(1<<4)|(1<<5));
3791 }
3792 WRITE_VREG(DOS_SW_RESET0, 0);
3793
3794 spin_lock_irqsave(&vdec_spin_lock, flags);
3795 codec_dmcbus_write(DMC_REQ_CTRL,
3796 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3797 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3798}
3799EXPORT_SYMBOL(vdec_reset_core);
3800
3801void hevc_mmu_dma_check(struct vdec_s *vdec)
3802{
3803 ulong timeout;
3804 u32 data;
3805 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A)
3806 return;
3807 timeout = jiffies + HZ/100;
3808 while (1) {
3809 data = READ_VREG(HEVC_CM_CORE_STATUS);
3810 if ((data & 0x1) == 0)
3811 break;
3812 if (time_after(jiffies, timeout)) {
3813 if (debug & 0x10)
3814 pr_info(" %s sao mmu dma idle\n", __func__);
3815 break;
3816 }
3817 }
3818 /*disable sao mmu dma */
3819 CLEAR_VREG_MASK(HEVC_SAO_MMU_DMA_CTRL, 1 << 0);
3820 timeout = jiffies + HZ/100;
3821 while (1) {
3822 data = READ_VREG(HEVC_SAO_MMU_DMA_STATUS);
3823 if ((data & 0x1))
3824 break;
3825 if (time_after(jiffies, timeout)) {
3826 if (debug & 0x10)
3827 pr_err("%s sao mmu dma timeout, num_buf_used = 0x%x\n",
3828 __func__, (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16));
3829 break;
3830 }
3831 }
3832}
3833EXPORT_SYMBOL(hevc_mmu_dma_check);
3834
3835void hevc_reset_core(struct vdec_s *vdec)
3836{
3837 unsigned long flags;
3838 unsigned int mask = 0;
3839
3840 mask = 1 << 4; /*bit4: hevc*/
3841 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3842 mask |= 1 << 8; /*bit8: hevcb*/
3843
3844 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3845 spin_lock_irqsave(&vdec_spin_lock, flags);
3846 codec_dmcbus_write(DMC_REQ_CTRL,
3847 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3848 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3849
3850 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3851 & mask))
3852 ;
3853
3854 if (vdec == NULL || input_frame_based(vdec))
3855 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3856
3857 /*
3858 * 2: assist
3859 * 3: parser
3860 * 4: parser_state
3861 * 8: dblk
3862 * 11:mcpu
3863 * 12:ccpu
3864 * 13:ddr
3865 * 14:iqit
3866 * 15:ipp
3867 * 17:qdct
3868 * 18:mpred
3869 * 19:sao
3870 * 24:hevc_afifo
3871 */
3872 WRITE_VREG(DOS_SW_RESET3,
3873 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
3874 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
3875 (1<<17)|(1<<18)|(1<<19)|(1<<24));
3876
3877 WRITE_VREG(DOS_SW_RESET3, 0);
3878
3879
3880 spin_lock_irqsave(&vdec_spin_lock, flags);
3881 codec_dmcbus_write(DMC_REQ_CTRL,
3882 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3883 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3884
3885}
3886EXPORT_SYMBOL(hevc_reset_core);
3887
3888int vdec2_source_changed(int format, int width, int height, int fps)
3889{
3890 int ret = -1;
3891 static int on_setting;
3892
3893 if (has_vdec2()) {
3894 /* todo: add level routines for clock adjustment per chips */
3895 if (on_setting != 0)
3896 return ret;/*on changing clk,ignore this change*/
3897
3898 if (vdec_source_get(VDEC_2) == width * height * fps)
3899 return ret;
3900
3901 on_setting = 1;
3902 ret = vdec_source_changed_for_clk_set(format,
3903 width, height, fps);
3904 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
3905 width, height, fps, vdec_clk_get(VDEC_2));
3906 on_setting = 0;
3907 return ret;
3908 }
3909 return 0;
3910}
3911EXPORT_SYMBOL(vdec2_source_changed);
3912
3913int hevc_source_changed(int format, int width, int height, int fps)
3914{
3915 /* todo: add level routines for clock adjustment per chips */
3916 int ret = -1;
3917 static int on_setting;
3918
3919 if (on_setting != 0)
3920 return ret;/*on changing clk,ignore this change*/
3921
3922 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
3923 return ret;
3924
3925 on_setting = 1;
3926 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3927 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
3928 width, height, fps, vdec_clk_get(VDEC_HEVC));
3929 on_setting = 0;
3930
3931 return ret;
3932}
3933EXPORT_SYMBOL(hevc_source_changed);
3934
3935static struct am_reg am_risc[] = {
3936 {"MSP", 0x300},
3937 {"MPSR", 0x301},
3938 {"MCPU_INT_BASE", 0x302},
3939 {"MCPU_INTR_GRP", 0x303},
3940 {"MCPU_INTR_MSK", 0x304},
3941 {"MCPU_INTR_REQ", 0x305},
3942 {"MPC-P", 0x306},
3943 {"MPC-D", 0x307},
3944 {"MPC_E", 0x308},
3945 {"MPC_W", 0x309},
3946 {"CSP", 0x320},
3947 {"CPSR", 0x321},
3948 {"CCPU_INT_BASE", 0x322},
3949 {"CCPU_INTR_GRP", 0x323},
3950 {"CCPU_INTR_MSK", 0x324},
3951 {"CCPU_INTR_REQ", 0x325},
3952 {"CPC-P", 0x326},
3953 {"CPC-D", 0x327},
3954 {"CPC_E", 0x328},
3955 {"CPC_W", 0x329},
3956 {"AV_SCRATCH_0", 0x09c0},
3957 {"AV_SCRATCH_1", 0x09c1},
3958 {"AV_SCRATCH_2", 0x09c2},
3959 {"AV_SCRATCH_3", 0x09c3},
3960 {"AV_SCRATCH_4", 0x09c4},
3961 {"AV_SCRATCH_5", 0x09c5},
3962 {"AV_SCRATCH_6", 0x09c6},
3963 {"AV_SCRATCH_7", 0x09c7},
3964 {"AV_SCRATCH_8", 0x09c8},
3965 {"AV_SCRATCH_9", 0x09c9},
3966 {"AV_SCRATCH_A", 0x09ca},
3967 {"AV_SCRATCH_B", 0x09cb},
3968 {"AV_SCRATCH_C", 0x09cc},
3969 {"AV_SCRATCH_D", 0x09cd},
3970 {"AV_SCRATCH_E", 0x09ce},
3971 {"AV_SCRATCH_F", 0x09cf},
3972 {"AV_SCRATCH_G", 0x09d0},
3973 {"AV_SCRATCH_H", 0x09d1},
3974 {"AV_SCRATCH_I", 0x09d2},
3975 {"AV_SCRATCH_J", 0x09d3},
3976 {"AV_SCRATCH_K", 0x09d4},
3977 {"AV_SCRATCH_L", 0x09d5},
3978 {"AV_SCRATCH_M", 0x09d6},
3979 {"AV_SCRATCH_N", 0x09d7},
3980};
3981
3982static ssize_t amrisc_regs_show(struct class *class,
3983 struct class_attribute *attr, char *buf)
3984{
3985 char *pbuf = buf;
3986 struct am_reg *regs = am_risc;
3987 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
3988 int i;
3989 unsigned int val;
3990 ssize_t ret;
3991
3992 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
3993 mutex_lock(&vdec_mutex);
3994 if (!vdec_on(VDEC_1)) {
3995 mutex_unlock(&vdec_mutex);
3996 pbuf += sprintf(pbuf, "amrisc is power off\n");
3997 ret = pbuf - buf;
3998 return ret;
3999 }
4000 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4001 /*TODO:M6 define */
4002 /*
4003 * switch_mod_gate_by_type(MOD_VDEC, 1);
4004 */
4005 amports_switch_gate("vdec", 1);
4006 }
4007 pbuf += sprintf(pbuf, "amrisc registers show:\n");
4008 for (i = 0; i < rsize; i++) {
4009 val = READ_VREG(regs[i].offset);
4010 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
4011 regs[i].name, regs[i].offset, val, val);
4012 }
4013 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4014 mutex_unlock(&vdec_mutex);
4015 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4016 /*TODO:M6 define */
4017 /*
4018 * switch_mod_gate_by_type(MOD_VDEC, 0);
4019 */
4020 amports_switch_gate("vdec", 0);
4021 }
4022 ret = pbuf - buf;
4023 return ret;
4024}
4025
4026static ssize_t dump_trace_show(struct class *class,
4027 struct class_attribute *attr, char *buf)
4028{
4029 int i;
4030 char *pbuf = buf;
4031 ssize_t ret;
4032 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
4033
4034 if (!trace_buf) {
4035 pbuf += sprintf(pbuf, "No Memory bug\n");
4036 ret = pbuf - buf;
4037 return ret;
4038 }
4039 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4040 mutex_lock(&vdec_mutex);
4041 if (!vdec_on(VDEC_1)) {
4042 mutex_unlock(&vdec_mutex);
4043 kfree(trace_buf);
4044 pbuf += sprintf(pbuf, "amrisc is power off\n");
4045 ret = pbuf - buf;
4046 return ret;
4047 }
4048 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4049 /*TODO:M6 define */
4050 /*
4051 * switch_mod_gate_by_type(MOD_VDEC, 1);
4052 */
4053 amports_switch_gate("vdec", 1);
4054 }
4055 pr_info("dump trace steps:%d start\n", debug_trace_num);
4056 i = 0;
4057 while (i <= debug_trace_num - 16) {
4058 trace_buf[i] = READ_VREG(MPC_E);
4059 trace_buf[i + 1] = READ_VREG(MPC_E);
4060 trace_buf[i + 2] = READ_VREG(MPC_E);
4061 trace_buf[i + 3] = READ_VREG(MPC_E);
4062 trace_buf[i + 4] = READ_VREG(MPC_E);
4063 trace_buf[i + 5] = READ_VREG(MPC_E);
4064 trace_buf[i + 6] = READ_VREG(MPC_E);
4065 trace_buf[i + 7] = READ_VREG(MPC_E);
4066 trace_buf[i + 8] = READ_VREG(MPC_E);
4067 trace_buf[i + 9] = READ_VREG(MPC_E);
4068 trace_buf[i + 10] = READ_VREG(MPC_E);
4069 trace_buf[i + 11] = READ_VREG(MPC_E);
4070 trace_buf[i + 12] = READ_VREG(MPC_E);
4071 trace_buf[i + 13] = READ_VREG(MPC_E);
4072 trace_buf[i + 14] = READ_VREG(MPC_E);
4073 trace_buf[i + 15] = READ_VREG(MPC_E);
4074 i += 16;
4075 };
4076 pr_info("dump trace steps:%d finished\n", debug_trace_num);
4077 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4078 mutex_unlock(&vdec_mutex);
4079 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4080 /*TODO:M6 define */
4081 /*
4082 * switch_mod_gate_by_type(MOD_VDEC, 0);
4083 */
4084 amports_switch_gate("vdec", 0);
4085 }
4086 for (i = 0; i < debug_trace_num; i++) {
4087 if (i % 4 == 0) {
4088 if (i % 16 == 0)
4089 pbuf += sprintf(pbuf, "\n");
4090 else if (i % 8 == 0)
4091 pbuf += sprintf(pbuf, " ");
4092 else /* 4 */
4093 pbuf += sprintf(pbuf, " ");
4094 }
4095 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
4096 }
4097 while (i < debug_trace_num)
4098 ;
4099 kfree(trace_buf);
4100 pbuf += sprintf(pbuf, "\n");
4101 ret = pbuf - buf;
4102 return ret;
4103}
4104
4105static ssize_t clock_level_show(struct class *class,
4106 struct class_attribute *attr, char *buf)
4107{
4108 char *pbuf = buf;
4109 size_t ret;
4110
4111 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
4112
4113 if (has_vdec2())
4114 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
4115
4116 if (has_hevc_vdec())
4117 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
4118
4119 ret = pbuf - buf;
4120 return ret;
4121}
4122
4123static ssize_t store_poweron_clock_level(struct class *class,
4124 struct class_attribute *attr,
4125 const char *buf, size_t size)
4126{
4127 unsigned int val;
4128 ssize_t ret;
4129
4130 /*ret = sscanf(buf, "%d", &val);*/
4131 ret = kstrtoint(buf, 0, &val);
4132
4133 if (ret != 0)
4134 return -EINVAL;
4135 poweron_clock_level = val;
4136 return size;
4137}
4138
4139static ssize_t show_poweron_clock_level(struct class *class,
4140 struct class_attribute *attr, char *buf)
4141{
4142 return sprintf(buf, "%d\n", poweron_clock_level);
4143}
4144
4145/*
4146 *if keep_vdec_mem == 1
4147 *always don't release
4148 *vdec 64 memory for fast play.
4149 */
4150static ssize_t store_keep_vdec_mem(struct class *class,
4151 struct class_attribute *attr,
4152 const char *buf, size_t size)
4153{
4154 unsigned int val;
4155 ssize_t ret;
4156
4157 /*ret = sscanf(buf, "%d", &val);*/
4158 ret = kstrtoint(buf, 0, &val);
4159 if (ret != 0)
4160 return -EINVAL;
4161 keep_vdec_mem = val;
4162 return size;
4163}
4164
4165static ssize_t show_keep_vdec_mem(struct class *class,
4166 struct class_attribute *attr, char *buf)
4167{
4168 return sprintf(buf, "%d\n", keep_vdec_mem);
4169}
4170
4171#ifdef VDEC_DEBUG_SUPPORT
4172static ssize_t store_debug(struct class *class,
4173 struct class_attribute *attr,
4174 const char *buf, size_t size)
4175{
4176 struct vdec_s *vdec;
4177 struct vdec_core_s *core = vdec_core;
4178 unsigned long flags;
4179
4180 unsigned id;
4181 unsigned val;
4182 ssize_t ret;
4183 char cbuf[32];
4184
4185 cbuf[0] = 0;
4186 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
4187 /*pr_info(
4188 "%s(%s)=>ret %ld: %s, %x, %x\n",
4189 __func__, buf, ret, cbuf, id, val);*/
4190 if (strcmp(cbuf, "schedule") == 0) {
4191 pr_info("VDEC_DEBUG: force schedule\n");
4192 up(&core->sem);
4193 } else if (strcmp(cbuf, "power_off") == 0) {
4194 pr_info("VDEC_DEBUG: power off core %d\n", id);
4195 vdec_poweroff(id);
4196 } else if (strcmp(cbuf, "power_on") == 0) {
4197 pr_info("VDEC_DEBUG: power_on core %d\n", id);
4198 vdec_poweron(id);
4199 } else if (strcmp(cbuf, "wr") == 0) {
4200 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
4201 id, val);
4202 WRITE_VREG(id, val);
4203 } else if (strcmp(cbuf, "rd") == 0) {
4204 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
4205 id, READ_VREG(id));
4206 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
4207 pr_info(
4208 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
4209 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
4210 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
4211 }
4212
4213 flags = vdec_core_lock(vdec_core);
4214
4215 list_for_each_entry(vdec,
4216 &core->connected_vdec_list, list) {
4217 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
4218 if (((vdec->status == VDEC_STATUS_CONNECTED
4219 || vdec->status == VDEC_STATUS_ACTIVE)) &&
4220 (vdec->id == id)) {
4221 /*to add*/
4222 break;
4223 }
4224 }
4225 vdec_core_unlock(vdec_core, flags);
4226 return size;
4227}
4228
4229static ssize_t show_debug(struct class *class,
4230 struct class_attribute *attr, char *buf)
4231{
4232 char *pbuf = buf;
4233 struct vdec_s *vdec;
4234 struct vdec_core_s *core = vdec_core;
4235 unsigned long flags = vdec_core_lock(vdec_core);
4236 u64 tmp;
4237
4238 pbuf += sprintf(pbuf,
4239 "============== help:\n");
4240 pbuf += sprintf(pbuf,
4241 "'echo xxx > debug' usuage:\n");
4242 pbuf += sprintf(pbuf,
4243 "schedule - trigger schedule thread to run\n");
4244 pbuf += sprintf(pbuf,
4245 "power_off core_num - call vdec_poweroff(core_num)\n");
4246 pbuf += sprintf(pbuf,
4247 "power_on core_num - call vdec_poweron(core_num)\n");
4248 pbuf += sprintf(pbuf,
4249 "wr adr val - call WRITE_VREG(adr, val)\n");
4250 pbuf += sprintf(pbuf,
4251 "rd adr - call READ_VREG(adr)\n");
4252 pbuf += sprintf(pbuf,
4253 "read_hevc_clk_reg - read HHI register for hevc clk\n");
4254 pbuf += sprintf(pbuf,
4255 "===================\n");
4256
4257 pbuf += sprintf(pbuf,
4258 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
4259 list_for_each_entry(vdec,
4260 &core->connected_vdec_list, list) {
4261 enum vdec_type_e type;
4262 if ((vdec->status == VDEC_STATUS_CONNECTED
4263 || vdec->status == VDEC_STATUS_ACTIVE)) {
4264 for (type = VDEC_1; type < VDEC_MAX; type++) {
4265 if (vdec->core_mask & (1 << type)) {
4266 pbuf += sprintf(pbuf, "%s(%d):",
4267 vdec->vf_provider_name, type);
4268 pbuf += sprintf(pbuf, "\t%d",
4269 vdec->check_count[type]);
4270 pbuf += sprintf(pbuf, "\t%d",
4271 vdec->run_count[type]);
4272 pbuf += sprintf(pbuf, "\t%d",
4273 vdec->input_underrun_count[type]);
4274 pbuf += sprintf(pbuf, "\t%d",
4275 vdec->not_run_ready_count[type]);
4276 tmp = vdec->run_clk[type] * 100;
4277 do_div(tmp, vdec->total_clk[type]);
4278 pbuf += sprintf(pbuf,
4279 "\t%d%%\n",
4280 vdec->total_clk[type] == 0 ? 0 :
4281 (u32)tmp);
4282 }
4283 }
4284 }
4285 }
4286
4287 vdec_core_unlock(vdec_core, flags);
4288 return pbuf - buf;
4289
4290}
4291#endif
4292
4293/*irq num as same as .dts*/
4294/*
4295 * interrupts = <0 3 1
4296 * 0 23 1
4297 * 0 32 1
4298 * 0 43 1
4299 * 0 44 1
4300 * 0 45 1>;
4301 * interrupt-names = "vsync",
4302 * "demux",
4303 * "parser",
4304 * "mailbox_0",
4305 * "mailbox_1",
4306 * "mailbox_2";
4307 */
4308s32 vdec_request_threaded_irq(enum vdec_irq_num num,
4309 irq_handler_t handler,
4310 irq_handler_t thread_fn,
4311 unsigned long irqflags,
4312 const char *devname, void *dev)
4313{
4314 s32 res_irq;
4315 s32 ret = 0;
4316
4317 if (num >= VDEC_IRQ_MAX) {
4318 pr_err("[%s] request irq error, irq num too big!", __func__);
4319 return -EINVAL;
4320 }
4321
4322 if (vdec_core->isr_context[num].irq < 0) {
4323 res_irq = platform_get_irq(
4324 vdec_core->vdec_core_platform_device, num);
4325 if (res_irq < 0) {
4326 pr_err("[%s] get irq error!", __func__);
4327 return -EINVAL;
4328 }
4329
4330 vdec_core->isr_context[num].irq = res_irq;
4331 vdec_core->isr_context[num].dev_isr = handler;
4332 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4333 vdec_core->isr_context[num].dev_id = dev;
4334
4335 ret = request_threaded_irq(res_irq,
4336 vdec_isr,
4337 vdec_thread_isr,
4338 (thread_fn) ? IRQF_ONESHOT : irqflags,
4339 devname,
4340 &vdec_core->isr_context[num]);
4341
4342 if (ret) {
4343 vdec_core->isr_context[num].irq = -1;
4344 vdec_core->isr_context[num].dev_isr = NULL;
4345 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4346 vdec_core->isr_context[num].dev_id = NULL;
4347
4348 pr_err("vdec irq register error for %s.\n", devname);
4349 return -EIO;
4350 }
4351 } else {
4352 vdec_core->isr_context[num].dev_isr = handler;
4353 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4354 vdec_core->isr_context[num].dev_id = dev;
4355 }
4356
4357 return ret;
4358}
4359EXPORT_SYMBOL(vdec_request_threaded_irq);
4360
4361s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
4362 const char *devname, void *dev)
4363{
4364 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
4365
4366 return vdec_request_threaded_irq(num,
4367 handler,
4368 NULL,/*no thread_fn*/
4369 IRQF_SHARED,
4370 devname,
4371 dev);
4372}
4373EXPORT_SYMBOL(vdec_request_irq);
4374
4375void vdec_free_irq(enum vdec_irq_num num, void *dev)
4376{
4377 if (num >= VDEC_IRQ_MAX) {
4378 pr_err("[%s] request irq error, irq num too big!", __func__);
4379 return;
4380 }
4381 /*
4382 *assume amrisc is stopped already and there is no mailbox interrupt
4383 * when we reset pointers here.
4384 */
4385 vdec_core->isr_context[num].dev_isr = NULL;
4386 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4387 vdec_core->isr_context[num].dev_id = NULL;
4388 synchronize_irq(vdec_core->isr_context[num].irq);
4389}
4390EXPORT_SYMBOL(vdec_free_irq);
4391
4392struct vdec_s *vdec_get_default_vdec_for_userdata(void)
4393{
4394 struct vdec_s *vdec;
4395 struct vdec_s *ret_vdec;
4396 struct vdec_core_s *core = vdec_core;
4397 unsigned long flags;
4398 int id;
4399
4400 flags = vdec_core_lock(vdec_core);
4401
4402 id = 0x10000000;
4403 ret_vdec = NULL;
4404 if (!list_empty(&core->connected_vdec_list)) {
4405 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4406 if (vdec->id < id) {
4407 id = vdec->id;
4408 ret_vdec = vdec;
4409 }
4410 }
4411 }
4412
4413 vdec_core_unlock(vdec_core, flags);
4414
4415 return ret_vdec;
4416}
4417EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
4418
4419struct vdec_s *vdec_get_vdec_by_id(int vdec_id)
4420{
4421 struct vdec_s *vdec;
4422 struct vdec_s *ret_vdec;
4423 struct vdec_core_s *core = vdec_core;
4424 unsigned long flags;
4425
4426 flags = vdec_core_lock(vdec_core);
4427
4428 ret_vdec = NULL;
4429 if (!list_empty(&core->connected_vdec_list)) {
4430 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4431 if (vdec->id == vdec_id) {
4432 ret_vdec = vdec;
4433 break;
4434 }
4435 }
4436 }
4437
4438 vdec_core_unlock(vdec_core, flags);
4439
4440 return ret_vdec;
4441}
4442EXPORT_SYMBOL(vdec_get_vdec_by_id);
4443
4444int vdec_read_user_data(struct vdec_s *vdec,
4445 struct userdata_param_t *p_userdata_param)
4446{
4447 int ret = 0;
4448
4449 if (!vdec)
4450 vdec = vdec_get_default_vdec_for_userdata();
4451
4452 if (vdec) {
4453 if (vdec->user_data_read)
4454 ret = vdec->user_data_read(vdec, p_userdata_param);
4455 }
4456 return ret;
4457}
4458EXPORT_SYMBOL(vdec_read_user_data);
4459
4460int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
4461{
4462 if (vdec) {
4463 if (vdec->wakeup_userdata_poll)
4464 vdec->wakeup_userdata_poll(vdec);
4465 }
4466
4467 return 0;
4468}
4469EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
4470
4471void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
4472{
4473 if (!vdec)
4474 vdec = vdec_get_default_vdec_for_userdata();
4475
4476 if (vdec) {
4477 if (vdec->reset_userdata_fifo)
4478 vdec->reset_userdata_fifo(vdec, bInit);
4479 }
4480}
4481EXPORT_SYMBOL(vdec_reset_userdata_fifo);
4482
4483static int dump_mode;
4484static ssize_t dump_risc_mem_store(struct class *class,
4485 struct class_attribute *attr,
4486 const char *buf, size_t size)/*set*/
4487{
4488 unsigned int val;
4489 ssize_t ret;
4490 char dump_mode_str[4] = "PRL";
4491
4492 /*ret = sscanf(buf, "%d", &val);*/
4493 ret = kstrtoint(buf, 0, &val);
4494
4495 if (ret != 0)
4496 return -EINVAL;
4497 dump_mode = val & 0x3;
4498 pr_info("set dump mode to %d,%c_mem\n",
4499 dump_mode, dump_mode_str[dump_mode]);
4500 return size;
4501}
4502static u32 read_amrisc_reg(int reg)
4503{
4504 WRITE_VREG(0x31b, reg);
4505 return READ_VREG(0x31c);
4506}
4507
4508static void dump_pmem(void)
4509{
4510 int i;
4511
4512 WRITE_VREG(0x301, 0x8000);
4513 WRITE_VREG(0x31d, 0);
4514 pr_info("start dump amrisc pmem of risc\n");
4515 for (i = 0; i < 0xfff; i++) {
4516 /*same as .o format*/
4517 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
4518 }
4519}
4520
4521static void dump_lmem(void)
4522{
4523 int i;
4524
4525 WRITE_VREG(0x301, 0x8000);
4526 WRITE_VREG(0x31d, 2);
4527 pr_info("start dump amrisc lmem\n");
4528 for (i = 0; i < 0x3ff; i++) {
4529 /*same as */
4530 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
4531 }
4532}
4533
4534static ssize_t dump_risc_mem_show(struct class *class,
4535 struct class_attribute *attr, char *buf)
4536{
4537 char *pbuf = buf;
4538 int ret;
4539
4540 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4541 mutex_lock(&vdec_mutex);
4542 if (!vdec_on(VDEC_1)) {
4543 mutex_unlock(&vdec_mutex);
4544 pbuf += sprintf(pbuf, "amrisc is power off\n");
4545 ret = pbuf - buf;
4546 return ret;
4547 }
4548 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4549 /*TODO:M6 define */
4550 /*
4551 * switch_mod_gate_by_type(MOD_VDEC, 1);
4552 */
4553 amports_switch_gate("vdec", 1);
4554 }
4555 /*start do**/
4556 switch (dump_mode) {
4557 case 0:
4558 dump_pmem();
4559 break;
4560 case 2:
4561 dump_lmem();
4562 break;
4563 default:
4564 break;
4565 }
4566
4567 /*done*/
4568 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4569 mutex_unlock(&vdec_mutex);
4570 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4571 /*TODO:M6 define */
4572 /*
4573 * switch_mod_gate_by_type(MOD_VDEC, 0);
4574 */
4575 amports_switch_gate("vdec", 0);
4576 }
4577 return sprintf(buf, "done\n");
4578}
4579
4580static ssize_t core_show(struct class *class, struct class_attribute *attr,
4581 char *buf)
4582{
4583 struct vdec_core_s *core = vdec_core;
4584 char *pbuf = buf;
4585
4586 if (list_empty(&core->connected_vdec_list))
4587 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4588 else {
4589 struct vdec_s *vdec;
4590
4591 pbuf += sprintf(pbuf,
4592 " Core: last_sched %p, sched_mask %lx\n",
4593 core->last_vdec,
4594 core->sched_mask);
4595
4596 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4597 pbuf += sprintf(pbuf,
4598 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
4599 vdec->id,
4600 vdec,
4601 vdec_device_name[vdec->format * 2],
4602 vdec_status_str(vdec),
4603 vdec_type_str(vdec),
4604 vdec->active_mask);
4605 }
4606 }
4607
4608 return pbuf - buf;
4609}
4610
4611static ssize_t vdec_status_show(struct class *class,
4612 struct class_attribute *attr, char *buf)
4613{
4614 char *pbuf = buf;
4615 struct vdec_s *vdec;
4616 struct vdec_info vs;
4617 unsigned char vdec_num = 0;
4618 struct vdec_core_s *core = vdec_core;
4619 unsigned long flags = vdec_core_lock(vdec_core);
4620
4621 if (list_empty(&core->connected_vdec_list)) {
4622 pbuf += sprintf(pbuf, "No vdec.\n");
4623 goto out;
4624 }
4625
4626 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4627 if ((vdec->status == VDEC_STATUS_CONNECTED
4628 || vdec->status == VDEC_STATUS_ACTIVE)) {
4629 memset(&vs, 0, sizeof(vs));
4630 if (vdec_status(vdec, &vs)) {
4631 pbuf += sprintf(pbuf, "err.\n");
4632 goto out;
4633 }
4634 pbuf += sprintf(pbuf,
4635 "vdec channel %u statistics:\n",
4636 vdec_num);
4637 pbuf += sprintf(pbuf,
4638 "%13s : %s\n", "device name",
4639 vs.vdec_name);
4640 pbuf += sprintf(pbuf,
4641 "%13s : %u\n", "frame width",
4642 vs.frame_width);
4643 pbuf += sprintf(pbuf,
4644 "%13s : %u\n", "frame height",
4645 vs.frame_height);
4646 pbuf += sprintf(pbuf,
4647 "%13s : %u %s\n", "frame rate",
4648 vs.frame_rate, "fps");
4649 pbuf += sprintf(pbuf,
4650 "%13s : %u %s\n", "bit rate",
4651 vs.bit_rate / 1024 * 8, "kbps");
4652 pbuf += sprintf(pbuf,
4653 "%13s : %u\n", "status",
4654 vs.status);
4655 pbuf += sprintf(pbuf,
4656 "%13s : %u\n", "frame dur",
4657 vs.frame_dur);
4658 pbuf += sprintf(pbuf,
4659 "%13s : %u %s\n", "frame data",
4660 vs.frame_data / 1024, "KB");
4661 pbuf += sprintf(pbuf,
4662 "%13s : %u\n", "frame count",
4663 vs.frame_count);
4664 pbuf += sprintf(pbuf,
4665 "%13s : %u\n", "drop count",
4666 vs.drop_frame_count);
4667 pbuf += sprintf(pbuf,
4668 "%13s : %u\n", "fra err count",
4669 vs.error_frame_count);
4670 pbuf += sprintf(pbuf,
4671 "%13s : %u\n", "hw err count",
4672 vs.error_count);
4673 pbuf += sprintf(pbuf,
4674 "%13s : %llu %s\n", "total data",
4675 vs.total_data / 1024, "KB");
4676 pbuf += sprintf(pbuf,
4677 "%13s : %x\n\n", "ratio_control",
4678 vs.ratio_control);
4679
4680 vdec_num++;
4681 }
4682 }
4683out:
4684 vdec_core_unlock(vdec_core, flags);
4685 return pbuf - buf;
4686}
4687
4688static ssize_t dump_vdec_blocks_show(struct class *class,
4689 struct class_attribute *attr, char *buf)
4690{
4691 struct vdec_core_s *core = vdec_core;
4692 char *pbuf = buf;
4693 unsigned long flags = vdec_core_lock(vdec_core);
4694
4695 if (list_empty(&core->connected_vdec_list))
4696 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4697 else {
4698 struct vdec_s *vdec;
4699 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4700 pbuf += vdec_input_dump_blocks(&vdec->input,
4701 pbuf, PAGE_SIZE - (pbuf - buf));
4702 }
4703 }
4704 vdec_core_unlock(vdec_core, flags);
4705
4706 return pbuf - buf;
4707}
4708static ssize_t dump_vdec_chunks_show(struct class *class,
4709 struct class_attribute *attr, char *buf)
4710{
4711 struct vdec_core_s *core = vdec_core;
4712 char *pbuf = buf;
4713 unsigned long flags = vdec_core_lock(vdec_core);
4714
4715 if (list_empty(&core->connected_vdec_list))
4716 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4717 else {
4718 struct vdec_s *vdec;
4719 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4720 pbuf += vdec_input_dump_chunks(vdec->id, &vdec->input,
4721 pbuf, PAGE_SIZE - (pbuf - buf));
4722 }
4723 }
4724 vdec_core_unlock(vdec_core, flags);
4725
4726 return pbuf - buf;
4727}
4728
4729static ssize_t dump_decoder_state_show(struct class *class,
4730 struct class_attribute *attr, char *buf)
4731{
4732 char *pbuf = buf;
4733 struct vdec_s *vdec;
4734 struct vdec_core_s *core = vdec_core;
4735 unsigned long flags = vdec_core_lock(vdec_core);
4736
4737 if (list_empty(&core->connected_vdec_list)) {
4738 pbuf += sprintf(pbuf, "No vdec.\n");
4739 } else {
4740 list_for_each_entry(vdec,
4741 &core->connected_vdec_list, list) {
4742 if ((vdec->status == VDEC_STATUS_CONNECTED
4743 || vdec->status == VDEC_STATUS_ACTIVE)
4744 && vdec->dump_state)
4745 vdec->dump_state(vdec);
4746 }
4747 }
4748 vdec_core_unlock(vdec_core, flags);
4749
4750 return pbuf - buf;
4751}
4752
4753static ssize_t dump_fps_show(struct class *class,
4754 struct class_attribute *attr, char *buf)
4755{
4756 char *pbuf = buf;
4757 struct vdec_core_s *core = vdec_core;
4758 int i;
4759
4760 unsigned long flags = vdec_fps_lock(vdec_core);
4761 for (i = 0; i < MAX_INSTANCE_MUN; i++)
4762 pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps);
4763
4764 pbuf += sprintf(pbuf, "\n");
4765 vdec_fps_unlock(vdec_core, flags);
4766
4767 return pbuf - buf;
4768}
4769
4770
4771
4772static struct class_attribute vdec_class_attrs[] = {
4773 __ATTR_RO(amrisc_regs),
4774 __ATTR_RO(dump_trace),
4775 __ATTR_RO(clock_level),
4776 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
4777 show_poweron_clock_level, store_poweron_clock_level),
4778 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4779 dump_risc_mem_show, dump_risc_mem_store),
4780 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4781 show_keep_vdec_mem, store_keep_vdec_mem),
4782 __ATTR_RO(core),
4783 __ATTR_RO(vdec_status),
4784 __ATTR_RO(dump_vdec_blocks),
4785 __ATTR_RO(dump_vdec_chunks),
4786 __ATTR_RO(dump_decoder_state),
4787#ifdef VDEC_DEBUG_SUPPORT
4788 __ATTR(debug, S_IRUGO | S_IWUSR | S_IWGRP,
4789 show_debug, store_debug),
4790#endif
4791#ifdef FRAME_CHECK
4792 __ATTR(dump_yuv, S_IRUGO | S_IWUSR | S_IWGRP,
4793 dump_yuv_show, dump_yuv_store),
4794 __ATTR(frame_check, S_IRUGO | S_IWUSR | S_IWGRP,
4795 frame_check_show, frame_check_store),
4796#endif
4797 __ATTR_RO(dump_fps),
4798 __ATTR_NULL
4799};
4800
4801static struct class vdec_class = {
4802 .name = "vdec",
4803 .class_attrs = vdec_class_attrs,
4804 };
4805
4806struct device *get_vdec_device(void)
4807{
4808 return &vdec_core->vdec_core_platform_device->dev;
4809}
4810EXPORT_SYMBOL(get_vdec_device);
4811
4812static int vdec_probe(struct platform_device *pdev)
4813{
4814 s32 i, r;
4815
4816 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
4817 sizeof(struct vdec_core_s), GFP_KERNEL);
4818 if (vdec_core == NULL) {
4819 pr_err("vdec core allocation failed.\n");
4820 return -ENOMEM;
4821 }
4822
4823 atomic_set(&vdec_core->vdec_nr, 0);
4824 sema_init(&vdec_core->sem, 1);
4825
4826 r = class_register(&vdec_class);
4827 if (r) {
4828 pr_info("vdec class create fail.\n");
4829 return r;
4830 }
4831
4832 vdec_core->vdec_core_platform_device = pdev;
4833
4834 platform_set_drvdata(pdev, vdec_core);
4835
4836 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4837 vdec_core->isr_context[i].index = i;
4838 vdec_core->isr_context[i].irq = -1;
4839 }
4840
4841 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
4842 IRQF_ONESHOT, "vdec-0", NULL);
4843 if (r < 0) {
4844 pr_err("vdec interrupt request failed\n");
4845 return r;
4846 }
4847
4848 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
4849 IRQF_ONESHOT, "vdec-1", NULL);
4850 if (r < 0) {
4851 pr_err("vdec interrupt request failed\n");
4852 return r;
4853 }
4854#if 0
4855 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
4856 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
4857 IRQF_ONESHOT, "vdec-hevc_back", NULL);
4858 if (r < 0) {
4859 pr_err("vdec interrupt request failed\n");
4860 return r;
4861 }
4862 }
4863#endif
4864 r = of_reserved_mem_device_init(&pdev->dev);
4865 if (r == 0)
4866 pr_info("vdec_probe done\n");
4867
4868 vdec_core->cma_dev = &pdev->dev;
4869
4870 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
4871 /* default to 250MHz */
4872 vdec_clock_hi_enable();
4873 }
4874
4875 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
4876 /* set vdec dmc request to urgent */
4877 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
4878 }
4879 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
4880 spin_lock_init(&vdec_core->lock);
4881 spin_lock_init(&vdec_core->canvas_lock);
4882 spin_lock_init(&vdec_core->fps_lock);
4883 spin_lock_init(&vdec_core->input_lock);
4884 ida_init(&vdec_core->ida);
4885 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
4886 "vdec-core");
4887
4888 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
4889 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
4890 /*work queue priority lower than vdec-core.*/
4891 return 0;
4892}
4893
4894static int vdec_remove(struct platform_device *pdev)
4895{
4896 int i;
4897
4898 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4899 if (vdec_core->isr_context[i].irq >= 0) {
4900 free_irq(vdec_core->isr_context[i].irq,
4901 &vdec_core->isr_context[i]);
4902 vdec_core->isr_context[i].irq = -1;
4903 vdec_core->isr_context[i].dev_isr = NULL;
4904 vdec_core->isr_context[i].dev_threaded_isr = NULL;
4905 vdec_core->isr_context[i].dev_id = NULL;
4906 }
4907 }
4908
4909 kthread_stop(vdec_core->thread);
4910
4911 destroy_workqueue(vdec_core->vdec_core_wq);
4912 class_unregister(&vdec_class);
4913
4914 return 0;
4915}
4916
4917static const struct of_device_id amlogic_vdec_dt_match[] = {
4918 {
4919 .compatible = "amlogic, vdec",
4920 },
4921 {},
4922};
4923
4924static struct mconfig vdec_configs[] = {
4925 MC_PU32("debug_trace_num", &debug_trace_num),
4926 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
4927 MC_PU32("clk_config", &clk_config),
4928 MC_PI32("step_mode", &step_mode),
4929 MC_PI32("poweron_clock_level", &poweron_clock_level),
4930};
4931static struct mconfig_node vdec_node;
4932
4933static struct platform_driver vdec_driver = {
4934 .probe = vdec_probe,
4935 .remove = vdec_remove,
4936 .driver = {
4937 .name = "vdec",
4938 .of_match_table = amlogic_vdec_dt_match,
4939 }
4940};
4941
4942static struct codec_profile_t amvdec_input_profile = {
4943 .name = "vdec_input",
4944 .profile = "drm_framemode"
4945};
4946
4947int vdec_module_init(void)
4948{
4949 if (platform_driver_register(&vdec_driver)) {
4950 pr_info("failed to register vdec module\n");
4951 return -ENODEV;
4952 }
4953 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4954 "vdec", vdec_configs, CONFIG_FOR_RW);
4955 vcodec_profile_register(&amvdec_input_profile);
4956 return 0;
4957}
4958EXPORT_SYMBOL(vdec_module_init);
4959
4960void vdec_module_exit(void)
4961{
4962 platform_driver_unregister(&vdec_driver);
4963}
4964EXPORT_SYMBOL(vdec_module_exit);
4965
4966#if 0
4967static int __init vdec_module_init(void)
4968{
4969 if (platform_driver_register(&vdec_driver)) {
4970 pr_info("failed to register vdec module\n");
4971 return -ENODEV;
4972 }
4973 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4974 "vdec", vdec_configs, CONFIG_FOR_RW);
4975 return 0;
4976}
4977
4978static void __exit vdec_module_exit(void)
4979{
4980 platform_driver_unregister(&vdec_driver);
4981}
4982#endif
4983
4984static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4985{
4986 vdec_core->cma_dev = dev;
4987
4988 return 0;
4989}
4990
4991static const struct reserved_mem_ops rmem_vdec_ops = {
4992 .device_init = vdec_mem_device_init,
4993};
4994
4995static int __init vdec_mem_setup(struct reserved_mem *rmem)
4996{
4997 rmem->ops = &rmem_vdec_ops;
4998 pr_info("vdec: reserved mem setup\n");
4999
5000 return 0;
5001}
5002
5003void vdec_fill_frame_info(struct vframe_qos_s *vframe_qos, int debug)
5004{
5005 if (frame_info_buf_in == NULL) {
5006 pr_info("error,frame_info_buf_in is null\n");
5007 return;
5008 }
5009 if (frame_info_buf_out == NULL) {
5010 pr_info("error,frame_info_buf_out is null\n");
5011 return;
5012 }
5013 if (frame_qos_wr >= QOS_FRAME_NUM)
5014 frame_qos_wr = 0;
5015
5016 if (frame_qos_wr >= QOS_FRAME_NUM ||
5017 frame_qos_wr < 0) {
5018 pr_info("error,index :%d is error\n", frame_qos_wr);
5019 return;
5020 }
5021 if (frameinfo_flag == DISABLE_FRAME_INFO)
5022 return;
5023
5024 if (frameinfo_flag == PRINT_FRAME_INFO) {
5025 pr_info("num %d size %d pts %d\n",
5026 vframe_qos->num,
5027 vframe_qos->size,
5028 vframe_qos->pts);
5029 pr_info("mv min_mv %d avg_mv %d max_mv %d\n",
5030 vframe_qos->min_mv,
5031 vframe_qos->avg_mv,
5032 vframe_qos->max_mv);
5033 pr_info("qp min_qp %d avg_qp %d max_qp %d\n",
5034 vframe_qos->min_qp,
5035 vframe_qos->avg_qp,
5036 vframe_qos->max_qp);
5037 pr_info("skip min_skip %d avg_skip %d max_skip %d\n",
5038 vframe_qos->min_skip,
5039 vframe_qos->avg_skip,
5040 vframe_qos->max_skip);
5041 }
5042 memcpy(&frame_info_buf_in[frame_qos_wr++],
5043 vframe_qos, sizeof(struct vframe_qos_s));
5044 if (frame_qos_wr >= QOS_FRAME_NUM)
5045 frame_qos_wr = 0;
5046
5047 /*pr_info("frame_qos_wr:%d\n", frame_qos_wr);*/
5048
5049}
5050EXPORT_SYMBOL(vdec_fill_frame_info);
5051
5052struct vframe_qos_s *vdec_get_qos_info(void)
5053{
5054 int write_count = 0;
5055 int qos_wr = frame_qos_wr;
5056
5057 if (frame_info_buf_in == NULL) {
5058 pr_info("error,frame_info_buf_in is null\n");
5059 return NULL;
5060 }
5061 if (frame_info_buf_out == NULL) {
5062 pr_info("error,frame_info_buf_out is null\n");
5063 return NULL;
5064 }
5065
5066
5067 memset(frame_info_buf_out, 0,
5068 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
5069 if (frame_qos_rd > qos_wr) {
5070 write_count = QOS_FRAME_NUM - frame_qos_rd;
5071 if (write_count > 0 && write_count <= QOS_FRAME_NUM) {
5072 memcpy(frame_info_buf_out, &frame_info_buf_in[0],
5073 write_count*sizeof(struct vframe_qos_s));
5074 if ((write_count + qos_wr) <= QOS_FRAME_NUM)
5075 memcpy(&frame_info_buf_out[write_count], frame_info_buf_in,
5076 qos_wr*sizeof(struct vframe_qos_s));
5077 else
5078 pr_info("get_qos_info:%d,out of range\n", __LINE__);
5079 } else
5080 pr_info("get_qos_info:%d,out of range\n", __LINE__);
5081 } else if (frame_qos_rd < qos_wr) {
5082 write_count = qos_wr - frame_qos_rd;
5083 if (write_count > 0 && write_count < QOS_FRAME_NUM)
5084 memcpy(frame_info_buf_out, &frame_info_buf_in[frame_qos_rd],
5085 (write_count)*sizeof(struct vframe_qos_s));
5086 else
5087 pr_info("get_qos_info:%d, out of range\n", __LINE__);
5088 }
5089 /*
5090 pr_info("cnt:%d,size:%d,num:%d,rd:%d,wr:%d\n",
5091 wirte_count,
5092 frame_info_buf_out[0].size,
5093 frame_info_buf_out[0].num,
5094 frame_qos_rd,qos_wr);
5095 */
5096 frame_qos_rd = qos_wr;
5097 return frame_info_buf_out;
5098}
5099EXPORT_SYMBOL(vdec_get_qos_info);
5100
5101
5102RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
5103/*
5104uint force_hevc_clock_cntl;
5105EXPORT_SYMBOL(force_hevc_clock_cntl);
5106
5107module_param(force_hevc_clock_cntl, uint, 0664);
5108*/
5109module_param(debug, uint, 0664);
5110module_param(debug_trace_num, uint, 0664);
5111module_param(hevc_max_reset_count, int, 0664);
5112module_param(clk_config, uint, 0664);
5113module_param(step_mode, int, 0664);
5114module_param(debugflags, int, 0664);
5115module_param(parallel_decode, int, 0664);
5116module_param(fps_detection, int, 0664);
5117module_param(fps_clear, int, 0664);
5118module_param(force_nosecure_even_drm, int, 0664);
5119module_param(disable_switch_single_to_mult, int, 0664);
5120
5121module_param(frameinfo_flag, int, 0664);
5122MODULE_PARM_DESC(frameinfo_flag,
5123 "\n frameinfo_flag\n");
5124module_param(v4lvideo_add_di, int, 0664);
5125MODULE_PARM_DESC(v4lvideo_add_di,
5126 "\n v4lvideo_add_di\n");
5127
5128module_param(max_di_instance, int, 0664);
5129MODULE_PARM_DESC(max_di_instance,
5130 "\n max_di_instance\n");
5131
5132/*
5133*module_init(vdec_module_init);
5134*module_exit(vdec_module_exit);
5135*/
5136#define CREATE_TRACE_POINTS
5137#include "vdec_trace.h"
5138MODULE_DESCRIPTION("AMLOGIC vdec driver");
5139MODULE_LICENSE("GPL");
5140MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
5141