summaryrefslogtreecommitdiff
path: root/drivers/frame_provider/decoder/utils/vdec.c (plain)
blob: 54f9051723ee19296adca8ccd16b7f2a5b62bc1d
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 */
17#define DEBUG
18#include <linux/kernel.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/kthread.h>
24#include <linux/platform_device.h>
25#include <linux/uaccess.h>
26#include <linux/semaphore.h>
27#include <linux/sched/rt.h>
28#include <linux/interrupt.h>
29#include <linux/amlogic/media/utils/vformat.h>
30#include <linux/amlogic/iomap.h>
31#include <linux/amlogic/media/canvas/canvas.h>
32#include <linux/amlogic/media/vfm/vframe.h>
33#include <linux/amlogic/media/vfm/vframe_provider.h>
34#include <linux/amlogic/media/vfm/vframe_receiver.h>
35#include <linux/amlogic/media/video_sink/ionvideo_ext.h>
36#include <linux/amlogic/media/vfm/vfm_ext.h>
37/*for VDEC_DEBUG_SUPPORT*/
38#include <linux/time.h>
39
40#include <linux/amlogic/media/utils/vdec_reg.h>
41#include "vdec.h"
42#include "vdec_trace.h"
43#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
44#include "vdec_profile.h"
45#endif
46#include <linux/of.h>
47#include <linux/of_fdt.h>
48#include <linux/libfdt_env.h>
49#include <linux/of_reserved_mem.h>
50#include <linux/dma-contiguous.h>
51#include <linux/cma.h>
52#include <linux/module.h>
53#include <linux/slab.h>
54#include <linux/dma-mapping.h>
55#include <linux/dma-contiguous.h>
56#include "../../../stream_input/amports/amports_priv.h"
57
58#include <linux/amlogic/media/utils/amports_config.h>
59#include "../utils/amvdec.h"
60#include "vdec_input.h"
61
62#include "../../../common/media_clock/clk/clk.h"
63#include <linux/reset.h>
64#include <linux/amlogic/cpu_version.h>
65#include <linux/amlogic/media/codec_mm/codec_mm.h>
66#include <linux/amlogic/media/video_sink/video_keeper.h>
67#include <linux/amlogic/media/codec_mm/configs.h>
68#include <linux/amlogic/media/frame_sync/ptsserv.h>
69#include "secprot.h"
70#include "../../../common/chips/decoder_cpu_ver_info.h"
71#include "frame_check.h"
72
73#ifdef CONFIG_AMLOGIC_POWER
74#include <linux/amlogic/power_ctrl.h>
75#endif
76
77static DEFINE_MUTEX(vdec_mutex);
78
79#define MC_SIZE (4096 * 4)
80#define CMA_ALLOC_SIZE SZ_64M
81#define MEM_NAME "vdec_prealloc"
82static int inited_vcodec_num;
83#define jiffies_ms div64_u64(get_jiffies_64() * 1000, HZ)
84static int poweron_clock_level;
85static int keep_vdec_mem;
86static unsigned int debug_trace_num = 16 * 20;
87static int step_mode;
88static unsigned int clk_config;
89/*
90 &1: sched_priority to MAX_RT_PRIO -1.
91 &2: always reload firmware.
92 &4: vdec canvas debug enable
93 */
94static unsigned int debug;
95
96static int hevc_max_reset_count;
97
98static int no_powerdown;
99static int parallel_decode = 1;
100static int fps_detection;
101static int fps_clear;
102
103
104static int force_nosecure_even_drm;
105static int disable_switch_single_to_mult;
106
107static DEFINE_SPINLOCK(vdec_spin_lock);
108
109#define HEVC_TEST_LIMIT 100
110#define GXBB_REV_A_MINOR 0xA
111
112#define PRINT_FRAME_INFO 1
113#define DISABLE_FRAME_INFO 2
114
115static int frameinfo_flag = 0;
116//static int path_debug = 0;
117
118static struct vframe_qos_s *frame_info_buf_in = NULL;
119static struct vframe_qos_s *frame_info_buf_out = NULL;
120static int frame_qos_wr = 0;
121static int frame_qos_rd = 0;
122int decode_underflow = 0;
123
124#define CANVAS_MAX_SIZE (AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1)
125
126struct am_reg {
127 char *name;
128 int offset;
129};
130
131struct vdec_isr_context_s {
132 int index;
133 int irq;
134 irq_handler_t dev_isr;
135 irq_handler_t dev_threaded_isr;
136 void *dev_id;
137 struct vdec_s *vdec;
138};
139
140struct decode_fps_s {
141 u32 frame_count;
142 u64 start_timestamp;
143 u64 last_timestamp;
144 u32 fps;
145};
146
147struct vdec_core_s {
148 struct list_head connected_vdec_list;
149 spinlock_t lock;
150 spinlock_t canvas_lock;
151 spinlock_t fps_lock;
152 spinlock_t input_lock;
153 struct ida ida;
154 atomic_t vdec_nr;
155 struct vdec_s *vfm_vdec;
156 struct vdec_s *active_vdec;
157 struct vdec_s *active_hevc;
158 struct vdec_s *hint_fr_vdec;
159 struct platform_device *vdec_core_platform_device;
160 struct device *cma_dev;
161 struct semaphore sem;
162 struct task_struct *thread;
163 struct workqueue_struct *vdec_core_wq;
164
165 unsigned long sched_mask;
166 struct vdec_isr_context_s isr_context[VDEC_IRQ_MAX];
167 int power_ref_count[VDEC_MAX];
168 struct vdec_s *last_vdec;
169 int parallel_dec;
170 unsigned long power_ref_mask;
171 int vdec_combine_flag;
172 struct decode_fps_s decode_fps[MAX_INSTANCE_MUN];
173 unsigned long buff_flag;
174 unsigned long stream_buff_flag;
175};
176
177struct canvas_status_s {
178 int type;
179 int canvas_used_flag;
180 int id;
181};
182
183
184static struct vdec_core_s *vdec_core;
185
186static const char * const vdec_status_string[] = {
187 "VDEC_STATUS_UNINITIALIZED",
188 "VDEC_STATUS_DISCONNECTED",
189 "VDEC_STATUS_CONNECTED",
190 "VDEC_STATUS_ACTIVE"
191};
192
193static int debugflags;
194
195static struct canvas_status_s canvas_stat[AMVDEC_CANVAS_MAX1 - AMVDEC_CANVAS_START_INDEX + 1 + AMVDEC_CANVAS_MAX2 + 1];
196
197
198int vdec_get_debug_flags(void)
199{
200 return debugflags;
201}
202EXPORT_SYMBOL(vdec_get_debug_flags);
203
204unsigned char is_mult_inc(unsigned int type)
205{
206 unsigned char ret = 0;
207 if (vdec_get_debug_flags() & 0xf000)
208 ret = (vdec_get_debug_flags() & 0x1000)
209 ? 1 : 0;
210 else if (type & PORT_TYPE_DECODER_SCHED)
211 ret = 1;
212 return ret;
213}
214EXPORT_SYMBOL(is_mult_inc);
215
216static const bool cores_with_input[VDEC_MAX] = {
217 true, /* VDEC_1 */
218 false, /* VDEC_HCODEC */
219 false, /* VDEC_2 */
220 true, /* VDEC_HEVC / VDEC_HEVC_FRONT */
221 false, /* VDEC_HEVC_BACK */
222};
223
224static const int cores_int[VDEC_MAX] = {
225 VDEC_IRQ_1,
226 VDEC_IRQ_2,
227 VDEC_IRQ_0,
228 VDEC_IRQ_0,
229 VDEC_IRQ_HEVC_BACK
230};
231
232unsigned long vdec_canvas_lock(struct vdec_core_s *core)
233{
234 unsigned long flags;
235 spin_lock_irqsave(&core->canvas_lock, flags);
236
237 return flags;
238}
239
240void vdec_canvas_unlock(struct vdec_core_s *core, unsigned long flags)
241{
242 spin_unlock_irqrestore(&core->canvas_lock, flags);
243}
244
245unsigned long vdec_fps_lock(struct vdec_core_s *core)
246{
247 unsigned long flags;
248 spin_lock_irqsave(&core->fps_lock, flags);
249
250 return flags;
251}
252
253void vdec_fps_unlock(struct vdec_core_s *core, unsigned long flags)
254{
255 spin_unlock_irqrestore(&core->fps_lock, flags);
256}
257
258unsigned long vdec_core_lock(struct vdec_core_s *core)
259{
260 unsigned long flags;
261
262 spin_lock_irqsave(&core->lock, flags);
263
264 return flags;
265}
266
267void vdec_core_unlock(struct vdec_core_s *core, unsigned long flags)
268{
269 spin_unlock_irqrestore(&core->lock, flags);
270}
271
272unsigned long vdec_inputbuff_lock(struct vdec_core_s *core)
273{
274 unsigned long flags;
275
276 spin_lock_irqsave(&core->input_lock, flags);
277
278 return flags;
279}
280
281void vdec_inputbuff_unlock(struct vdec_core_s *core, unsigned long flags)
282{
283 spin_unlock_irqrestore(&core->input_lock, flags);
284}
285
286
287static bool vdec_is_input_frame_empty(struct vdec_s *vdec) {
288 struct vdec_core_s *core = vdec_core;
289 bool ret;
290 unsigned long flags;
291
292 flags = vdec_inputbuff_lock(core);
293 ret = !(vdec->core_mask & core->buff_flag);
294 vdec_inputbuff_unlock(core, flags);
295
296 return ret;
297}
298
299static void vdec_up(struct vdec_s *vdec)
300{
301 struct vdec_core_s *core = vdec_core;
302
303 if (debug & 8)
304 pr_info("vdec_up, id:%d\n", vdec->id);
305 up(&core->sem);
306}
307
308
309static u64 vdec_get_us_time_system(void)
310{
311 struct timeval tv;
312
313 do_gettimeofday(&tv);
314
315 return div64_u64(timeval_to_ns(&tv), 1000);
316}
317
318static void vdec_fps_clear(int id)
319{
320 if (id >= MAX_INSTANCE_MUN)
321 return;
322
323 vdec_core->decode_fps[id].frame_count = 0;
324 vdec_core->decode_fps[id].start_timestamp = 0;
325 vdec_core->decode_fps[id].last_timestamp = 0;
326 vdec_core->decode_fps[id].fps = 0;
327}
328
329static void vdec_fps_clearall(void)
330{
331 int i;
332
333 for (i = 0; i < MAX_INSTANCE_MUN; i++) {
334 vdec_core->decode_fps[i].frame_count = 0;
335 vdec_core->decode_fps[i].start_timestamp = 0;
336 vdec_core->decode_fps[i].last_timestamp = 0;
337 vdec_core->decode_fps[i].fps = 0;
338 }
339}
340
341static void vdec_fps_detec(int id)
342{
343 unsigned long flags;
344
345 if (fps_detection == 0)
346 return;
347
348 if (id >= MAX_INSTANCE_MUN)
349 return;
350
351 flags = vdec_fps_lock(vdec_core);
352
353 if (fps_clear == 1) {
354 vdec_fps_clearall();
355 fps_clear = 0;
356 }
357
358 vdec_core->decode_fps[id].frame_count++;
359 if (vdec_core->decode_fps[id].frame_count == 1) {
360 vdec_core->decode_fps[id].start_timestamp =
361 vdec_get_us_time_system();
362 vdec_core->decode_fps[id].last_timestamp =
363 vdec_core->decode_fps[id].start_timestamp;
364 } else {
365 vdec_core->decode_fps[id].last_timestamp =
366 vdec_get_us_time_system();
367 vdec_core->decode_fps[id].fps =
368 (u32)div_u64(((u64)(vdec_core->decode_fps[id].frame_count) *
369 10000000000),
370 (vdec_core->decode_fps[id].last_timestamp -
371 vdec_core->decode_fps[id].start_timestamp));
372 }
373 vdec_fps_unlock(vdec_core, flags);
374}
375
376
377
378static int get_canvas(unsigned int index, unsigned int base)
379{
380 int start;
381 int canvas_index = index * base;
382 int ret;
383
384 if ((base > 4) || (base == 0))
385 return -1;
386
387 if ((AMVDEC_CANVAS_START_INDEX + canvas_index + base - 1)
388 <= AMVDEC_CANVAS_MAX1) {
389 start = AMVDEC_CANVAS_START_INDEX + base * index;
390 } else {
391 canvas_index -= (AMVDEC_CANVAS_MAX1 -
392 AMVDEC_CANVAS_START_INDEX + 1) / base * base;
393 if (canvas_index <= AMVDEC_CANVAS_MAX2)
394 start = canvas_index / base;
395 else
396 return -1;
397 }
398
399 if (base == 1) {
400 ret = start;
401 } else if (base == 2) {
402 ret = ((start + 1) << 16) | ((start + 1) << 8) | start;
403 } else if (base == 3) {
404 ret = ((start + 2) << 16) | ((start + 1) << 8) | start;
405 } else if (base == 4) {
406 ret = (((start + 3) << 24) | (start + 2) << 16) |
407 ((start + 1) << 8) | start;
408 }
409
410 return ret;
411}
412
413static int get_canvas_ex(int type, int id)
414{
415 int i;
416 unsigned long flags;
417
418 flags = vdec_canvas_lock(vdec_core);
419
420 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
421 /*0x10-0x15 has been used by rdma*/
422 if ((i >= 0x10) && (i <= 0x15))
423 continue;
424 if ((canvas_stat[i].type == type) &&
425 (canvas_stat[i].id & (1 << id)) == 0) {
426 canvas_stat[i].canvas_used_flag++;
427 canvas_stat[i].id |= (1 << id);
428 if (debug & 4)
429 pr_debug("get used canvas %d\n", i);
430 vdec_canvas_unlock(vdec_core, flags);
431 if (i < AMVDEC_CANVAS_MAX2 + 1)
432 return i;
433 else
434 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
435 }
436 }
437
438 for (i = 0; i < CANVAS_MAX_SIZE; i++) {
439 /*0x10-0x15 has been used by rdma*/
440 if ((i >= 0x10) && (i <= 0x15))
441 continue;
442 if (canvas_stat[i].type == 0) {
443 canvas_stat[i].type = type;
444 canvas_stat[i].canvas_used_flag = 1;
445 canvas_stat[i].id = (1 << id);
446 if (debug & 4) {
447 pr_debug("get canvas %d\n", i);
448 pr_debug("canvas_used_flag %d\n",
449 canvas_stat[i].canvas_used_flag);
450 pr_debug("canvas_stat[i].id %d\n",
451 canvas_stat[i].id);
452 }
453 vdec_canvas_unlock(vdec_core, flags);
454 if (i < AMVDEC_CANVAS_MAX2 + 1)
455 return i;
456 else
457 return (i + AMVDEC_CANVAS_START_INDEX - AMVDEC_CANVAS_MAX2 - 1);
458 }
459 }
460 vdec_canvas_unlock(vdec_core, flags);
461
462 pr_info("cannot get canvas\n");
463
464 return -1;
465}
466
467static void free_canvas_ex(int index, int id)
468{
469 unsigned long flags;
470 int offset;
471
472 flags = vdec_canvas_lock(vdec_core);
473 if (index >= 0 &&
474 index < AMVDEC_CANVAS_MAX2 + 1)
475 offset = index;
476 else if ((index >= AMVDEC_CANVAS_START_INDEX) &&
477 (index <= AMVDEC_CANVAS_MAX1))
478 offset = index + AMVDEC_CANVAS_MAX2 + 1 - AMVDEC_CANVAS_START_INDEX;
479 else {
480 vdec_canvas_unlock(vdec_core, flags);
481 return;
482 }
483
484 if ((canvas_stat[offset].canvas_used_flag > 0) &&
485 (canvas_stat[offset].id & (1 << id))) {
486 canvas_stat[offset].canvas_used_flag--;
487 canvas_stat[offset].id &= ~(1 << id);
488 if (canvas_stat[offset].canvas_used_flag == 0) {
489 canvas_stat[offset].type = 0;
490 canvas_stat[offset].id = 0;
491 }
492 if (debug & 4) {
493 pr_debug("free index %d used_flag %d, type = %d, id = %d\n",
494 offset,
495 canvas_stat[offset].canvas_used_flag,
496 canvas_stat[offset].type,
497 canvas_stat[offset].id);
498 }
499 }
500 vdec_canvas_unlock(vdec_core, flags);
501
502 return;
503
504}
505
506static void vdec_dmc_pipeline_reset(void)
507{
508 /*
509 * bit15: vdec_piple
510 * bit14: hevc_dmc_piple
511 * bit13: hevcf_dmc_pipl
512 * bit12: wave420_dmc_pipl
513 * bit11: hcodec_dmc_pipl
514 */
515
516 WRITE_RESET_REG(RESET7_REGISTER,
517 (1 << 15) | (1 << 14) | (1 << 13) |
518 (1 << 12) | (1 << 11));
519}
520
521static void vdec_stop_armrisc(int hw)
522{
523 ulong timeout = jiffies + HZ;
524
525 if (hw == VDEC_INPUT_TARGET_VLD) {
526 WRITE_VREG(MPSR, 0);
527 WRITE_VREG(CPSR, 0);
528
529 while (READ_VREG(IMEM_DMA_CTRL) & 0x8000) {
530 if (time_after(jiffies, timeout))
531 break;
532 }
533
534 timeout = jiffies + HZ;
535 while (READ_VREG(LMEM_DMA_CTRL) & 0x8000) {
536 if (time_after(jiffies, timeout))
537 break;
538 }
539 } else if (hw == VDEC_INPUT_TARGET_HEVC) {
540 WRITE_VREG(HEVC_MPSR, 0);
541 WRITE_VREG(HEVC_CPSR, 0);
542
543 while (READ_VREG(HEVC_IMEM_DMA_CTRL) & 0x8000) {
544 if (time_after(jiffies, timeout))
545 break;
546 }
547
548 timeout = jiffies + HZ/10;
549 while (READ_VREG(HEVC_LMEM_DMA_CTRL) & 0x8000) {
550 if (time_after(jiffies, timeout))
551 break;
552 }
553 }
554}
555
556static void vdec_disable_DMC(struct vdec_s *vdec)
557{
558 /*close first,then wait pedding end,timing suggestion from vlsi*/
559 struct vdec_input_s *input = &vdec->input;
560 unsigned long flags;
561 unsigned int mask = 0;
562
563 if (input->target == VDEC_INPUT_TARGET_VLD) {
564 mask = (1 << 13);
565 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
566 mask = (1 << 21);
567 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
568 mask = (1 << 4); /*hevc*/
569 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
570 mask |= (1 << 8); /*hevcb */
571 }
572
573 /* need to stop armrisc. */
574 if (!IS_ERR_OR_NULL(vdec->dev))
575 vdec_stop_armrisc(input->target);
576
577 spin_lock_irqsave(&vdec_spin_lock, flags);
578 codec_dmcbus_write(DMC_REQ_CTRL,
579 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
580 spin_unlock_irqrestore(&vdec_spin_lock, flags);
581
582 while (!(codec_dmcbus_read(DMC_CHAN_STS)
583 & mask))
584 ;
585
586 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
587}
588
589static void vdec_enable_DMC(struct vdec_s *vdec)
590{
591 struct vdec_input_s *input = &vdec->input;
592 unsigned long flags;
593 unsigned int mask = 0;
594
595 if (input->target == VDEC_INPUT_TARGET_VLD) {
596 mask = (1 << 13);
597 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
598 mask = (1 << 21);
599 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
600 mask = (1 << 4); /*hevc*/
601 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
602 mask |= (1 << 8); /*hevcb */
603 }
604
605 /*must to be reset the dmc pipeline if it's g12b.*/
606 if (get_cpu_type() == AM_MESON_CPU_MAJOR_ID_G12B)
607 vdec_dmc_pipeline_reset();
608
609 spin_lock_irqsave(&vdec_spin_lock, flags);
610 codec_dmcbus_write(DMC_REQ_CTRL,
611 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
612 spin_unlock_irqrestore(&vdec_spin_lock, flags);
613 pr_debug("%s input->target= 0x%x\n", __func__, input->target);
614}
615
616
617
618static int vdec_get_hw_type(int value)
619{
620 int type;
621 switch (value) {
622 case VFORMAT_HEVC:
623 case VFORMAT_VP9:
624 case VFORMAT_AVS2:
625 type = CORE_MASK_HEVC;
626 break;
627
628 case VFORMAT_MPEG12:
629 case VFORMAT_MPEG4:
630 case VFORMAT_H264:
631 case VFORMAT_MJPEG:
632 case VFORMAT_REAL:
633 case VFORMAT_JPEG:
634 case VFORMAT_VC1:
635 case VFORMAT_AVS:
636 case VFORMAT_YUV:
637 case VFORMAT_H264MVC:
638 case VFORMAT_H264_4K2K:
639 case VFORMAT_H264_ENC:
640 case VFORMAT_JPEG_ENC:
641 type = CORE_MASK_VDEC_1;
642 break;
643
644 default:
645 type = -1;
646 }
647
648 return type;
649}
650
651
652static void vdec_save_active_hw(struct vdec_s *vdec)
653{
654 int type;
655
656 type = vdec_get_hw_type(vdec->port->vformat);
657
658 if (type == CORE_MASK_HEVC) {
659 vdec_core->active_hevc = vdec;
660 } else if (type == CORE_MASK_VDEC_1) {
661 vdec_core->active_vdec = vdec;
662 } else {
663 pr_info("save_active_fw wrong\n");
664 }
665}
666
667static void vdec_update_buff_status(void)
668{
669 struct vdec_core_s *core = vdec_core;
670 unsigned long flags;
671 struct vdec_s *vdec;
672
673 flags = vdec_inputbuff_lock(core);
674 core->buff_flag = 0;
675 core->stream_buff_flag = 0;
676 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
677 struct vdec_input_s *input = &vdec->input;
678 if (input_frame_based(input)) {
679 if (input->have_frame_num || input->eos)
680 core->buff_flag |= vdec->core_mask;
681 } else if (input_stream_based(input)) {
682 core->stream_buff_flag |= vdec->core_mask;
683 }
684 }
685 vdec_inputbuff_unlock(core, flags);
686}
687
688#if 0
689void vdec_update_streambuff_status(void)
690{
691 struct vdec_core_s *core = vdec_core;
692 struct vdec_s *vdec;
693
694 /* check streaming prepare level threshold if not EOS */
695 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
696 struct vdec_input_s *input = &vdec->input;
697 if (input && input_stream_based(input) && !input->eos &&
698 (vdec->need_more_data & VDEC_NEED_MORE_DATA)) {
699 u32 rp, wp, level;
700
701 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
702 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
703 if (wp < rp)
704 level = input->size + wp - rp;
705 else
706 level = wp - rp;
707 if ((level < input->prepare_level) &&
708 (pts_get_rec_num(PTS_TYPE_VIDEO,
709 vdec->input.total_rd_count) < 2)) {
710 break;
711 } else if (level > input->prepare_level) {
712 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
713 if (debug & 8)
714 pr_info("vdec_flush_streambuff_status up\n");
715 vdec_up(vdec);
716 }
717 break;
718 }
719 }
720}
721EXPORT_SYMBOL(vdec_update_streambuff_status);
722#endif
723
724int vdec_status(struct vdec_s *vdec, struct vdec_info *vstatus)
725{
726 if (vdec && vdec->dec_status &&
727 ((vdec->status == VDEC_STATUS_CONNECTED ||
728 vdec->status == VDEC_STATUS_ACTIVE)))
729 return vdec->dec_status(vdec, vstatus);
730
731 return 0;
732}
733EXPORT_SYMBOL(vdec_status);
734
735int vdec_set_trickmode(struct vdec_s *vdec, unsigned long trickmode)
736{
737 int r;
738
739 if (vdec->set_trickmode) {
740 r = vdec->set_trickmode(vdec, trickmode);
741
742 if ((r == 0) && (vdec->slave) && (vdec->slave->set_trickmode))
743 r = vdec->slave->set_trickmode(vdec->slave,
744 trickmode);
745 return r;
746 }
747
748 return -1;
749}
750EXPORT_SYMBOL(vdec_set_trickmode);
751
752int vdec_set_isreset(struct vdec_s *vdec, int isreset)
753{
754 vdec->is_reset = isreset;
755 pr_info("is_reset=%d\n", isreset);
756 if (vdec->set_isreset)
757 return vdec->set_isreset(vdec, isreset);
758 return 0;
759}
760EXPORT_SYMBOL(vdec_set_isreset);
761
762int vdec_set_dv_metawithel(struct vdec_s *vdec, int isdvmetawithel)
763{
764 vdec->dolby_meta_with_el = isdvmetawithel;
765 pr_info("isdvmetawithel=%d\n", isdvmetawithel);
766 return 0;
767}
768EXPORT_SYMBOL(vdec_set_dv_metawithel);
769
770void vdec_set_no_powerdown(int flag)
771{
772 no_powerdown = flag;
773 pr_info("no_powerdown=%d\n", no_powerdown);
774 return;
775}
776EXPORT_SYMBOL(vdec_set_no_powerdown);
777
778void vdec_count_info(struct vdec_info *vs, unsigned int err,
779 unsigned int offset)
780{
781 if (err)
782 vs->error_frame_count++;
783 if (offset) {
784 if (0 == vs->frame_count) {
785 vs->offset = 0;
786 vs->samp_cnt = 0;
787 }
788 vs->frame_data = offset > vs->total_data ?
789 offset - vs->total_data : vs->total_data - offset;
790 vs->total_data = offset;
791 if (vs->samp_cnt < 96000 * 2) { /* 2s */
792 if (0 == vs->samp_cnt)
793 vs->offset = offset;
794 vs->samp_cnt += vs->frame_dur;
795 } else {
796 vs->bit_rate = (offset - vs->offset) / 2;
797 /*pr_info("bitrate : %u\n",vs->bit_rate);*/
798 vs->samp_cnt = 0;
799 }
800 vs->frame_count++;
801 }
802 /*pr_info("size : %u, offset : %u, dur : %u, cnt : %u\n",
803 vs->offset,offset,vs->frame_dur,vs->samp_cnt);*/
804 return;
805}
806EXPORT_SYMBOL(vdec_count_info);
807int vdec_is_support_4k(void)
808{
809 return !is_meson_gxl_package_805X();
810}
811EXPORT_SYMBOL(vdec_is_support_4k);
812
813/*
814 * clk_config:
815 *0:default
816 *1:no gp0_pll;
817 *2:always used gp0_pll;
818 *>=10:fixed n M clk;
819 *== 100 , 100M clks;
820 */
821unsigned int get_vdec_clk_config_settings(void)
822{
823 return clk_config;
824}
825void update_vdec_clk_config_settings(unsigned int config)
826{
827 clk_config = config;
828}
829EXPORT_SYMBOL(update_vdec_clk_config_settings);
830
831static bool hevc_workaround_needed(void)
832{
833 return (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) &&
834 (get_meson_cpu_version(MESON_CPU_VERSION_LVL_MINOR)
835 == GXBB_REV_A_MINOR);
836}
837
838struct device *get_codec_cma_device(void)
839{
840 return vdec_core->cma_dev;
841}
842
843#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
844static const char * const vdec_device_name[] = {
845 "amvdec_mpeg12", "ammvdec_mpeg12",
846 "amvdec_mpeg4", "ammvdec_mpeg4",
847 "amvdec_h264", "ammvdec_h264",
848 "amvdec_mjpeg", "ammvdec_mjpeg",
849 "amvdec_real", "ammvdec_real",
850 "amjpegdec", "ammjpegdec",
851 "amvdec_vc1", "ammvdec_vc1",
852 "amvdec_avs", "ammvdec_avs",
853 "amvdec_yuv", "ammvdec_yuv",
854 "amvdec_h264mvc", "ammvdec_h264mvc",
855 "amvdec_h264_4k2k", "ammvdec_h264_4k2k",
856 "amvdec_h265", "ammvdec_h265",
857 "amvenc_avc", "amvenc_avc",
858 "jpegenc", "jpegenc",
859 "amvdec_vp9", "ammvdec_vp9",
860 "amvdec_avs2", "ammvdec_avs2"
861};
862
863
864#else
865
866static const char * const vdec_device_name[] = {
867 "amvdec_mpeg12",
868 "amvdec_mpeg4",
869 "amvdec_h264",
870 "amvdec_mjpeg",
871 "amvdec_real",
872 "amjpegdec",
873 "amvdec_vc1",
874 "amvdec_avs",
875 "amvdec_yuv",
876 "amvdec_h264mvc",
877 "amvdec_h264_4k2k",
878 "amvdec_h265",
879 "amvenc_avc",
880 "jpegenc",
881 "amvdec_vp9",
882 "amvdec_avs2"
883};
884
885#endif
886
887/*
888 * Only support time sliced decoding for frame based input,
889 * so legacy decoder can exist with time sliced decoder.
890 */
891static const char *get_dev_name(bool use_legacy_vdec, int format)
892{
893#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
894 if (use_legacy_vdec)
895 return vdec_device_name[format * 2];
896 else
897 return vdec_device_name[format * 2 + 1];
898#else
899 return vdec_device_name[format];
900#endif
901}
902
903#ifdef VDEC_DEBUG_SUPPORT
904static u64 get_current_clk(void)
905{
906 /*struct timespec xtime = current_kernel_time();
907 u64 usec = xtime.tv_sec * 1000000;
908 usec += xtime.tv_nsec / 1000;
909 */
910 u64 usec = sched_clock();
911 return usec;
912}
913
914static void inc_profi_count(unsigned long mask, u32 *count)
915{
916 enum vdec_type_e type;
917
918 for (type = VDEC_1; type < VDEC_MAX; type++) {
919 if (mask & (1 << type))
920 count[type]++;
921 }
922}
923
924static void update_profi_clk_run(struct vdec_s *vdec,
925 unsigned long mask, u64 clk)
926{
927 enum vdec_type_e type;
928
929 for (type = VDEC_1; type < VDEC_MAX; type++) {
930 if (mask & (1 << type)) {
931 vdec->start_run_clk[type] = clk;
932 if (vdec->profile_start_clk[type] == 0)
933 vdec->profile_start_clk[type] = clk;
934 vdec->total_clk[type] = clk
935 - vdec->profile_start_clk[type];
936 /*pr_info("set start_run_clk %ld\n",
937 vdec->start_run_clk);*/
938
939 }
940 }
941}
942
943static void update_profi_clk_stop(struct vdec_s *vdec,
944 unsigned long mask, u64 clk)
945{
946 enum vdec_type_e type;
947
948 for (type = VDEC_1; type < VDEC_MAX; type++) {
949 if (mask & (1 << type)) {
950 if (vdec->start_run_clk[type] == 0)
951 pr_info("error, start_run_clk[%d] not set\n", type);
952
953 /*pr_info("update run_clk type %d, %ld, %ld, %ld\n",
954 type,
955 clk,
956 vdec->start_run_clk[type],
957 vdec->run_clk[type]);*/
958 vdec->run_clk[type] +=
959 (clk - vdec->start_run_clk[type]);
960 }
961 }
962}
963
964#endif
965
966int vdec_set_decinfo(struct vdec_s *vdec, struct dec_sysinfo *p)
967{
968 if (copy_from_user((void *)&vdec->sys_info_store, (void *)p,
969 sizeof(struct dec_sysinfo)))
970 return -EFAULT;
971
972 /* force switch to mult instance if supports this profile. */
973 if ((vdec->type == VDEC_TYPE_SINGLE) &&
974 !disable_switch_single_to_mult) {
975 const char *str = NULL;
976 char fmt[16] = {0};
977
978 str = strchr(get_dev_name(false, vdec->format), '_');
979 if (!str)
980 return -1;
981
982 sprintf(fmt, "m%s", ++str);
983 if (is_support_profile(fmt) &&
984 vdec->sys_info->format != VIDEO_DEC_FORMAT_H263)
985 vdec->type = VDEC_TYPE_STREAM_PARSER;
986 }
987
988 return 0;
989}
990EXPORT_SYMBOL(vdec_set_decinfo);
991
992/* construct vdec strcture */
993struct vdec_s *vdec_create(struct stream_port_s *port,
994 struct vdec_s *master)
995{
996 struct vdec_s *vdec;
997 int type = VDEC_TYPE_SINGLE;
998 int id;
999
1000 if (is_mult_inc(port->type))
1001 type = (port->type & PORT_TYPE_FRAME) ?
1002 VDEC_TYPE_FRAME_BLOCK :
1003 VDEC_TYPE_STREAM_PARSER;
1004
1005 id = ida_simple_get(&vdec_core->ida,
1006 0, MAX_INSTANCE_MUN, GFP_KERNEL);
1007 if (id < 0) {
1008 pr_info("vdec_create request id failed!ret =%d\n", id);
1009 return NULL;
1010 }
1011 vdec = vzalloc(sizeof(struct vdec_s));
1012
1013 /* TBD */
1014 if (vdec) {
1015 vdec->magic = 0x43454456;
1016 vdec->id = -1;
1017 vdec->type = type;
1018 vdec->port = port;
1019 vdec->sys_info = &vdec->sys_info_store;
1020
1021 INIT_LIST_HEAD(&vdec->list);
1022
1023 atomic_inc(&vdec_core->vdec_nr);
1024 vdec->id = id;
1025 vdec_input_init(&vdec->input, vdec);
1026 vdec->input.vdec_is_input_frame_empty = vdec_is_input_frame_empty;
1027 vdec->input.vdec_up = vdec_up;
1028 if (master) {
1029 vdec->master = master;
1030 master->slave = vdec;
1031 master->sched = 1;
1032 }
1033 }
1034
1035 pr_debug("vdec_create instance %p, total %d\n", vdec,
1036 atomic_read(&vdec_core->vdec_nr));
1037
1038 //trace_vdec_create(vdec); /*DEBUG_TMP*/
1039
1040 return vdec;
1041}
1042EXPORT_SYMBOL(vdec_create);
1043
1044int vdec_set_format(struct vdec_s *vdec, int format)
1045{
1046 vdec->format = format;
1047 vdec->port_flag |= PORT_FLAG_VFORMAT;
1048
1049 if (vdec->slave) {
1050 vdec->slave->format = format;
1051 vdec->slave->port_flag |= PORT_FLAG_VFORMAT;
1052 }
1053 //trace_vdec_set_format(vdec, format);/*DEBUG_TMP*/
1054
1055 return 0;
1056}
1057EXPORT_SYMBOL(vdec_set_format);
1058
1059int vdec_set_pts(struct vdec_s *vdec, u32 pts)
1060{
1061 vdec->pts = pts;
1062 vdec->pts64 = div64_u64((u64)pts * 100, 9);
1063 vdec->pts_valid = true;
1064 //trace_vdec_set_pts(vdec, (u64)pts);/*DEBUG_TMP*/
1065 return 0;
1066}
1067EXPORT_SYMBOL(vdec_set_pts);
1068
1069void vdec_set_timestamp(struct vdec_s *vdec, u64 timestamp)
1070{
1071 vdec->timestamp = timestamp;
1072 vdec->timestamp_valid = true;
1073}
1074EXPORT_SYMBOL(vdec_set_timestamp);
1075
1076int vdec_set_pts64(struct vdec_s *vdec, u64 pts64)
1077{
1078 vdec->pts64 = pts64;
1079 vdec->pts = (u32)div64_u64(pts64 * 9, 100);
1080 vdec->pts_valid = true;
1081
1082 //trace_vdec_set_pts64(vdec, pts64);/*DEBUG_TMP*/
1083 return 0;
1084}
1085EXPORT_SYMBOL(vdec_set_pts64);
1086
1087int vdec_get_status(struct vdec_s *vdec)
1088{
1089 return vdec->status;
1090}
1091EXPORT_SYMBOL(vdec_get_status);
1092
1093int vdec_get_frame_num(struct vdec_s *vdec)
1094{
1095 return vdec->input.have_frame_num;
1096}
1097EXPORT_SYMBOL(vdec_get_frame_num);
1098
1099void vdec_set_status(struct vdec_s *vdec, int status)
1100{
1101 //trace_vdec_set_status(vdec, status);/*DEBUG_TMP*/
1102 vdec->status = status;
1103}
1104EXPORT_SYMBOL(vdec_set_status);
1105
1106void vdec_set_next_status(struct vdec_s *vdec, int status)
1107{
1108 //trace_vdec_set_next_status(vdec, status);/*DEBUG_TMP*/
1109 vdec->next_status = status;
1110}
1111EXPORT_SYMBOL(vdec_set_next_status);
1112
1113int vdec_set_video_path(struct vdec_s *vdec, int video_path)
1114{
1115 vdec->frame_base_video_path = video_path;
1116 return 0;
1117}
1118EXPORT_SYMBOL(vdec_set_video_path);
1119
1120int vdec_set_receive_id(struct vdec_s *vdec, int receive_id)
1121{
1122 vdec->vf_receiver_inst = receive_id;
1123 return 0;
1124}
1125EXPORT_SYMBOL(vdec_set_receive_id);
1126
1127/* add frame data to input chain */
1128int vdec_write_vframe(struct vdec_s *vdec, const char *buf, size_t count)
1129{
1130 return vdec_input_add_frame(&vdec->input, buf, count);
1131}
1132EXPORT_SYMBOL(vdec_write_vframe);
1133
1134/* add a work queue thread for vdec*/
1135void vdec_schedule_work(struct work_struct *work)
1136{
1137 if (vdec_core->vdec_core_wq)
1138 queue_work(vdec_core->vdec_core_wq, work);
1139 else
1140 schedule_work(work);
1141}
1142EXPORT_SYMBOL(vdec_schedule_work);
1143
1144static struct vdec_s *vdec_get_associate(struct vdec_s *vdec)
1145{
1146 if (vdec->master)
1147 return vdec->master;
1148 else if (vdec->slave)
1149 return vdec->slave;
1150 return NULL;
1151}
1152
1153static void vdec_sync_input_read(struct vdec_s *vdec)
1154{
1155 if (!vdec_stream_based(vdec))
1156 return;
1157
1158 if (vdec_dual(vdec)) {
1159 u32 me, other;
1160 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1161 me = READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1162 other =
1163 vdec_get_associate(vdec)->input.stream_cookie;
1164 if (me > other)
1165 return;
1166 else if (me == other) {
1167 me = READ_VREG(VLD_MEM_VIFIFO_RP);
1168 other =
1169 vdec_get_associate(vdec)->input.swap_rp;
1170 if (me > other) {
1171 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1172 vdec_get_associate(vdec)->
1173 input.swap_rp);
1174 return;
1175 }
1176 }
1177 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1178 READ_VREG(VLD_MEM_VIFIFO_RP));
1179 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1180 me = READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1181 if (((me & 0x80000000) == 0) &&
1182 (vdec->input.streaming_rp & 0x80000000))
1183 me += 1ULL << 32;
1184 other = vdec_get_associate(vdec)->input.streaming_rp;
1185 if (me > other) {
1186 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1187 vdec_get_associate(vdec)->
1188 input.swap_rp);
1189 return;
1190 }
1191
1192 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1193 READ_VREG(HEVC_STREAM_RD_PTR));
1194 }
1195 } else if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1196 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1197 READ_VREG(VLD_MEM_VIFIFO_RP));
1198 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1199 WRITE_PARSER_REG(PARSER_VIDEO_RP,
1200 READ_VREG(HEVC_STREAM_RD_PTR));
1201 }
1202}
1203
1204static void vdec_sync_input_write(struct vdec_s *vdec)
1205{
1206 if (!vdec_stream_based(vdec))
1207 return;
1208
1209 if (vdec->input.target == VDEC_INPUT_TARGET_VLD) {
1210 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1211 READ_PARSER_REG(PARSER_VIDEO_WP));
1212 } else if (vdec->input.target == VDEC_INPUT_TARGET_HEVC) {
1213 WRITE_VREG(HEVC_STREAM_WR_PTR,
1214 READ_PARSER_REG(PARSER_VIDEO_WP));
1215 }
1216}
1217
1218/*
1219 *get next frame from input chain
1220 */
1221/*
1222 *THE VLD_FIFO is 512 bytes and Video buffer level
1223 * empty interrupt is set to 0x80 bytes threshold
1224 */
1225#define VLD_PADDING_SIZE 1024
1226#define HEVC_PADDING_SIZE (1024*16)
1227int vdec_prepare_input(struct vdec_s *vdec, struct vframe_chunk_s **p)
1228{
1229 struct vdec_input_s *input = &vdec->input;
1230 struct vframe_chunk_s *chunk = NULL;
1231 struct vframe_block_list_s *block = NULL;
1232 int dummy;
1233
1234 /* full reset to HW input */
1235 if (input->target == VDEC_INPUT_TARGET_VLD) {
1236 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1237
1238 /* reset VLD fifo for all vdec */
1239 WRITE_VREG(DOS_SW_RESET0, (1<<5) | (1<<4) | (1<<3));
1240 WRITE_VREG(DOS_SW_RESET0, 0);
1241
1242 dummy = READ_RESET_REG(RESET0_REGISTER);
1243 WRITE_VREG(POWER_CTL_VLD, 1 << 4);
1244 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1245#if 0
1246 /*move to driver*/
1247 if (input_frame_based(input))
1248 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
1249
1250 /*
1251 * 2: assist
1252 * 3: parser
1253 * 4: parser_state
1254 * 8: dblk
1255 * 11:mcpu
1256 * 12:ccpu
1257 * 13:ddr
1258 * 14:iqit
1259 * 15:ipp
1260 * 17:qdct
1261 * 18:mpred
1262 * 19:sao
1263 * 24:hevc_afifo
1264 */
1265 WRITE_VREG(DOS_SW_RESET3,
1266 (1<<3)|(1<<4)|(1<<8)|(1<<11)|(1<<12)|(1<<14)|(1<<15)|
1267 (1<<17)|(1<<18)|(1<<19));
1268 WRITE_VREG(DOS_SW_RESET3, 0);
1269#endif
1270 }
1271
1272 /*
1273 *setup HW decoder input buffer (VLD context)
1274 * based on input->type and input->target
1275 */
1276 if (input_frame_based(input)) {
1277 chunk = vdec_input_next_chunk(&vdec->input);
1278
1279 if (chunk == NULL) {
1280 *p = NULL;
1281 return -1;
1282 }
1283
1284 block = chunk->block;
1285
1286 if (input->target == VDEC_INPUT_TARGET_VLD) {
1287 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR, block->start);
1288 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR, block->start +
1289 block->size - 8);
1290 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1291 round_down(block->start + chunk->offset,
1292 VDEC_FIFO_ALIGN));
1293
1294 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1295 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1296
1297 /* set to manual mode */
1298 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1299 WRITE_VREG(VLD_MEM_VIFIFO_RP,
1300 round_down(block->start + chunk->offset,
1301 VDEC_FIFO_ALIGN));
1302 dummy = chunk->offset + chunk->size +
1303 VLD_PADDING_SIZE;
1304 if (dummy >= block->size)
1305 dummy -= block->size;
1306 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1307 round_down(block->start + dummy,
1308 VDEC_FIFO_ALIGN));
1309
1310 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 3);
1311 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1312
1313 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1314 (0x11 << 16) | (1<<10) | (7<<3));
1315
1316 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1317 WRITE_VREG(HEVC_STREAM_START_ADDR, block->start);
1318 WRITE_VREG(HEVC_STREAM_END_ADDR, block->start +
1319 block->size);
1320 WRITE_VREG(HEVC_STREAM_RD_PTR, block->start +
1321 chunk->offset);
1322 dummy = chunk->offset + chunk->size +
1323 HEVC_PADDING_SIZE;
1324 if (dummy >= block->size)
1325 dummy -= block->size;
1326 WRITE_VREG(HEVC_STREAM_WR_PTR,
1327 round_down(block->start + dummy,
1328 VDEC_FIFO_ALIGN));
1329
1330 /* set endian */
1331 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1332 }
1333
1334 *p = chunk;
1335 return chunk->size;
1336
1337 } else {
1338 /* stream based */
1339 u32 rp = 0, wp = 0, fifo_len = 0;
1340 int size;
1341 bool swap_valid = input->swap_valid;
1342 unsigned long swap_page_phys = input->swap_page_phys;
1343
1344 if (vdec_dual(vdec) &&
1345 ((vdec->flag & VDEC_FLAG_SELF_INPUT_CONTEXT) == 0)) {
1346 /* keep using previous input context */
1347 struct vdec_s *master = (vdec->slave) ?
1348 vdec : vdec->master;
1349 if (master->input.last_swap_slave) {
1350 swap_valid = master->slave->input.swap_valid;
1351 swap_page_phys =
1352 master->slave->input.swap_page_phys;
1353 } else {
1354 swap_valid = master->input.swap_valid;
1355 swap_page_phys = master->input.swap_page_phys;
1356 }
1357 }
1358
1359 if (swap_valid) {
1360 if (input->target == VDEC_INPUT_TARGET_VLD) {
1361 if (vdec->format == VFORMAT_H264)
1362 SET_VREG_MASK(POWER_CTL_VLD,
1363 (1 << 9));
1364
1365 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1366
1367 /* restore read side */
1368 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1369 swap_page_phys);
1370 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1371
1372 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1373 ;
1374 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1375
1376 /* restore wrap count */
1377 WRITE_VREG(VLD_MEM_VIFIFO_WRAP_COUNT,
1378 input->stream_cookie);
1379
1380 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1381 fifo_len = READ_VREG(VLD_MEM_VIFIFO_LEVEL);
1382
1383 /* enable */
1384 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1385 (0x11 << 16) | (1<<10));
1386
1387 /* sync with front end */
1388 vdec_sync_input_read(vdec);
1389 vdec_sync_input_write(vdec);
1390
1391 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1392 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1393 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1394
1395 /* restore read side */
1396 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1397 swap_page_phys);
1398 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1399
1400 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1401 & (1<<7))
1402 ;
1403 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1404
1405 /* restore stream offset */
1406 WRITE_VREG(HEVC_SHIFT_BYTE_COUNT,
1407 input->stream_cookie);
1408
1409 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1410 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1411 >> 16) & 0x7f;
1412
1413
1414 /* enable */
1415
1416 /* sync with front end */
1417 vdec_sync_input_read(vdec);
1418 vdec_sync_input_write(vdec);
1419
1420 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1421
1422 /*pr_info("vdec: restore context\r\n");*/
1423 }
1424
1425 } else {
1426 if (input->target == VDEC_INPUT_TARGET_VLD) {
1427 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1428 input->start);
1429 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1430 input->start + input->size - 8);
1431 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1432 input->start);
1433
1434 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1435 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1436
1437 /* set to manual mode */
1438 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1439 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1440 WRITE_VREG(VLD_MEM_VIFIFO_WP,
1441 READ_PARSER_REG(PARSER_VIDEO_WP));
1442
1443 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1444
1445 /* enable */
1446 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL,
1447 (0x11 << 16) | (1<<10));
1448
1449 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1450
1451 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1452 WRITE_VREG(HEVC_STREAM_START_ADDR,
1453 input->start);
1454 WRITE_VREG(HEVC_STREAM_END_ADDR,
1455 input->start + input->size);
1456 WRITE_VREG(HEVC_STREAM_RD_PTR,
1457 input->start);
1458 WRITE_VREG(HEVC_STREAM_WR_PTR,
1459 READ_PARSER_REG(PARSER_VIDEO_WP));
1460
1461 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1462 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1463 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1464 >> 16) & 0x7f;
1465
1466 /* enable */
1467 }
1468 }
1469 *p = NULL;
1470 if (wp >= rp)
1471 size = wp - rp + fifo_len;
1472 else
1473 size = wp + input->size - rp + fifo_len;
1474 if (size < 0) {
1475 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1476 __func__, input->size, wp, rp, fifo_len, size);
1477 size = 0;
1478 }
1479 return size;
1480 }
1481}
1482EXPORT_SYMBOL(vdec_prepare_input);
1483
1484void vdec_enable_input(struct vdec_s *vdec)
1485{
1486 struct vdec_input_s *input = &vdec->input;
1487
1488 if (vdec->status != VDEC_STATUS_ACTIVE)
1489 return;
1490
1491 if (input->target == VDEC_INPUT_TARGET_VLD)
1492 SET_VREG_MASK(VLD_MEM_VIFIFO_CONTROL, (1<<2) | (1<<1));
1493 else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1494 SET_VREG_MASK(HEVC_STREAM_CONTROL, 1);
1495 if (vdec_stream_based(vdec))
1496 CLEAR_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1497 else
1498 SET_VREG_MASK(HEVC_STREAM_CONTROL, 7 << 4);
1499 SET_VREG_MASK(HEVC_STREAM_FIFO_CTL, (1<<29));
1500 }
1501}
1502EXPORT_SYMBOL(vdec_enable_input);
1503
1504int vdec_set_input_buffer(struct vdec_s *vdec, u32 start, u32 size)
1505{
1506 int r = vdec_input_set_buffer(&vdec->input, start, size);
1507
1508 if (r)
1509 return r;
1510
1511 if (vdec->slave)
1512 r = vdec_input_set_buffer(&vdec->slave->input, start, size);
1513
1514 return r;
1515}
1516EXPORT_SYMBOL(vdec_set_input_buffer);
1517
1518/*
1519 * vdec_eos returns the possibility that there are
1520 * more input can be used by decoder through vdec_prepare_input
1521 * Note: this function should be called prior to vdec_vframe_dirty
1522 * by decoder driver to determine if EOS happens for stream based
1523 * decoding when there is no sufficient data for a frame
1524 */
1525bool vdec_has_more_input(struct vdec_s *vdec)
1526{
1527 struct vdec_input_s *input = &vdec->input;
1528
1529 if (!input->eos)
1530 return true;
1531
1532 if (input_frame_based(input))
1533 return vdec_input_next_input_chunk(input) != NULL;
1534 else {
1535 if (input->target == VDEC_INPUT_TARGET_VLD)
1536 return READ_VREG(VLD_MEM_VIFIFO_WP) !=
1537 READ_PARSER_REG(PARSER_VIDEO_WP);
1538 else {
1539 return (READ_VREG(HEVC_STREAM_WR_PTR) & ~0x3) !=
1540 (READ_PARSER_REG(PARSER_VIDEO_WP) & ~0x3);
1541 }
1542 }
1543}
1544EXPORT_SYMBOL(vdec_has_more_input);
1545
1546void vdec_set_prepare_level(struct vdec_s *vdec, int level)
1547{
1548 vdec->input.prepare_level = level;
1549}
1550EXPORT_SYMBOL(vdec_set_prepare_level);
1551
1552void vdec_set_flag(struct vdec_s *vdec, u32 flag)
1553{
1554 vdec->flag = flag;
1555}
1556EXPORT_SYMBOL(vdec_set_flag);
1557
1558void vdec_set_eos(struct vdec_s *vdec, bool eos)
1559{
1560 struct vdec_core_s *core = vdec_core;
1561 vdec->input.eos = eos;
1562
1563 if (vdec->slave)
1564 vdec->slave->input.eos = eos;
1565 up(&core->sem);
1566}
1567EXPORT_SYMBOL(vdec_set_eos);
1568
1569#ifdef VDEC_DEBUG_SUPPORT
1570void vdec_set_step_mode(void)
1571{
1572 step_mode = 0x1ff;
1573}
1574EXPORT_SYMBOL(vdec_set_step_mode);
1575#endif
1576
1577void vdec_set_next_sched(struct vdec_s *vdec, struct vdec_s *next_vdec)
1578{
1579 if (vdec && next_vdec) {
1580 vdec->sched = 0;
1581 next_vdec->sched = 1;
1582 }
1583}
1584EXPORT_SYMBOL(vdec_set_next_sched);
1585
1586/*
1587 * Swap Context: S0 S1 S2 S3 S4
1588 * Sample sequence: M S M M S
1589 * Master Context: S0 S0 S2 S3 S3
1590 * Slave context: NA S1 S1 S2 S4
1591 * ^
1592 * ^
1593 * ^
1594 * the tricky part
1595 * If there are back to back decoding of master or slave
1596 * then the context of the counter part should be updated
1597 * with current decoder. In this example, S1 should be
1598 * updated to S2.
1599 * This is done by swap the swap_page and related info
1600 * between two layers.
1601 */
1602static void vdec_borrow_input_context(struct vdec_s *vdec)
1603{
1604 struct page *swap_page;
1605 unsigned long swap_page_phys;
1606 struct vdec_input_s *me;
1607 struct vdec_input_s *other;
1608
1609 if (!vdec_dual(vdec))
1610 return;
1611
1612 me = &vdec->input;
1613 other = &vdec_get_associate(vdec)->input;
1614
1615 /* swap the swap_context, borrow counter part's
1616 * swap context storage and update all related info.
1617 * After vdec_vframe_dirty, vdec_save_input_context
1618 * will be called to update current vdec's
1619 * swap context
1620 */
1621 swap_page = other->swap_page;
1622 other->swap_page = me->swap_page;
1623 me->swap_page = swap_page;
1624
1625 swap_page_phys = other->swap_page_phys;
1626 other->swap_page_phys = me->swap_page_phys;
1627 me->swap_page_phys = swap_page_phys;
1628
1629 other->swap_rp = me->swap_rp;
1630 other->streaming_rp = me->streaming_rp;
1631 other->stream_cookie = me->stream_cookie;
1632 other->swap_valid = me->swap_valid;
1633}
1634
1635void vdec_vframe_dirty(struct vdec_s *vdec, struct vframe_chunk_s *chunk)
1636{
1637 if (chunk)
1638 chunk->flag |= VFRAME_CHUNK_FLAG_CONSUMED;
1639
1640 if (vdec_stream_based(vdec)) {
1641 vdec->input.swap_needed = true;
1642
1643 if (vdec_dual(vdec)) {
1644 vdec_get_associate(vdec)->input.dirty_count = 0;
1645 vdec->input.dirty_count++;
1646 if (vdec->input.dirty_count > 1) {
1647 vdec->input.dirty_count = 1;
1648 vdec_borrow_input_context(vdec);
1649 }
1650 }
1651
1652 /* for stream based mode, we update read and write pointer
1653 * also in case decoder wants to keep working on decoding
1654 * for more frames while input front end has more data
1655 */
1656 vdec_sync_input_read(vdec);
1657 vdec_sync_input_write(vdec);
1658
1659 vdec->need_more_data |= VDEC_NEED_MORE_DATA_DIRTY;
1660 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
1661 }
1662}
1663EXPORT_SYMBOL(vdec_vframe_dirty);
1664
1665bool vdec_need_more_data(struct vdec_s *vdec)
1666{
1667 if (vdec_stream_based(vdec))
1668 return vdec->need_more_data & VDEC_NEED_MORE_DATA;
1669
1670 return false;
1671}
1672EXPORT_SYMBOL(vdec_need_more_data);
1673
1674
1675void hevc_wait_ddr(void)
1676{
1677 unsigned long flags;
1678 unsigned int mask = 0;
1679
1680 mask = 1 << 4; /* hevc */
1681 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
1682 mask |= (1 << 8); /* hevcb */
1683
1684 spin_lock_irqsave(&vdec_spin_lock, flags);
1685 codec_dmcbus_write(DMC_REQ_CTRL,
1686 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
1687 spin_unlock_irqrestore(&vdec_spin_lock, flags);
1688
1689 while (!(codec_dmcbus_read(DMC_CHAN_STS)
1690 & mask))
1691 ;
1692}
1693
1694void vdec_save_input_context(struct vdec_s *vdec)
1695{
1696 struct vdec_input_s *input = &vdec->input;
1697
1698#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1699 vdec_profile(vdec, VDEC_PROFILE_EVENT_SAVE_INPUT);
1700#endif
1701
1702 if (input->target == VDEC_INPUT_TARGET_VLD)
1703 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1<<15);
1704
1705 if (input_stream_based(input) && (input->swap_needed)) {
1706 if (input->target == VDEC_INPUT_TARGET_VLD) {
1707 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1708 input->swap_page_phys);
1709 WRITE_VREG(VLD_MEM_SWAP_CTL, 3);
1710 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1711 ;
1712 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1713 vdec->input.stream_cookie =
1714 READ_VREG(VLD_MEM_VIFIFO_WRAP_COUNT);
1715 vdec->input.swap_rp =
1716 READ_VREG(VLD_MEM_VIFIFO_RP);
1717 vdec->input.total_rd_count =
1718 (u64)vdec->input.stream_cookie *
1719 vdec->input.size + vdec->input.swap_rp -
1720 READ_VREG(VLD_MEM_VIFIFO_BYTES_AVAIL);
1721 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1722 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1723 input->swap_page_phys);
1724 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 3);
1725
1726 while (READ_VREG(HEVC_STREAM_SWAP_CTRL) & (1<<7))
1727 ;
1728 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1729
1730 vdec->input.stream_cookie =
1731 READ_VREG(HEVC_SHIFT_BYTE_COUNT);
1732 vdec->input.swap_rp =
1733 READ_VREG(HEVC_STREAM_RD_PTR);
1734 if (((vdec->input.stream_cookie & 0x80000000) == 0) &&
1735 (vdec->input.streaming_rp & 0x80000000))
1736 vdec->input.streaming_rp += 1ULL << 32;
1737 vdec->input.streaming_rp &= 0xffffffffULL << 32;
1738 vdec->input.streaming_rp |= vdec->input.stream_cookie;
1739 vdec->input.total_rd_count = vdec->input.streaming_rp;
1740 }
1741
1742 input->swap_valid = true;
1743 input->swap_needed = false;
1744 /*pr_info("vdec: save context\r\n");*/
1745
1746 vdec_sync_input_read(vdec);
1747
1748 if (vdec_dual(vdec)) {
1749 struct vdec_s *master = (vdec->slave) ?
1750 vdec : vdec->master;
1751 master->input.last_swap_slave = (master->slave == vdec);
1752 /* pr_info("master->input.last_swap_slave = %d\n",
1753 master->input.last_swap_slave); */
1754 }
1755
1756 hevc_wait_ddr();
1757 }
1758}
1759EXPORT_SYMBOL(vdec_save_input_context);
1760
1761void vdec_clean_input(struct vdec_s *vdec)
1762{
1763 struct vdec_input_s *input = &vdec->input;
1764
1765 while (!list_empty(&input->vframe_chunk_list)) {
1766 struct vframe_chunk_s *chunk =
1767 vdec_input_next_chunk(input);
1768 if (chunk && (chunk->flag & VFRAME_CHUNK_FLAG_CONSUMED))
1769 vdec_input_release_chunk(input, chunk);
1770 else
1771 break;
1772 }
1773 vdec_save_input_context(vdec);
1774}
1775EXPORT_SYMBOL(vdec_clean_input);
1776
1777
1778static int vdec_input_read_restore(struct vdec_s *vdec)
1779{
1780 struct vdec_input_s *input = &vdec->input;
1781
1782 if (!vdec_stream_based(vdec))
1783 return 0;
1784
1785 if (!input->swap_valid) {
1786 if (input->target == VDEC_INPUT_TARGET_VLD) {
1787 WRITE_VREG(VLD_MEM_VIFIFO_START_PTR,
1788 input->start);
1789 WRITE_VREG(VLD_MEM_VIFIFO_END_PTR,
1790 input->start + input->size - 8);
1791 WRITE_VREG(VLD_MEM_VIFIFO_CURR_PTR,
1792 input->start);
1793 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 1);
1794 WRITE_VREG(VLD_MEM_VIFIFO_CONTROL, 0);
1795
1796 /* set to manual mode */
1797 WRITE_VREG(VLD_MEM_VIFIFO_BUF_CNTL, 2);
1798 WRITE_VREG(VLD_MEM_VIFIFO_RP, input->start);
1799 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1800 WRITE_VREG(HEVC_STREAM_START_ADDR,
1801 input->start);
1802 WRITE_VREG(HEVC_STREAM_END_ADDR,
1803 input->start + input->size);
1804 WRITE_VREG(HEVC_STREAM_RD_PTR,
1805 input->start);
1806 }
1807 return 0;
1808 }
1809 if (input->target == VDEC_INPUT_TARGET_VLD) {
1810 /* restore read side */
1811 WRITE_VREG(VLD_MEM_SWAP_ADDR,
1812 input->swap_page_phys);
1813
1814 /*swap active*/
1815 WRITE_VREG(VLD_MEM_SWAP_CTL, 1);
1816
1817 /*wait swap busy*/
1818 while (READ_VREG(VLD_MEM_SWAP_CTL) & (1<<7))
1819 ;
1820
1821 WRITE_VREG(VLD_MEM_SWAP_CTL, 0);
1822 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1823 /* restore read side */
1824 WRITE_VREG(HEVC_STREAM_SWAP_ADDR,
1825 input->swap_page_phys);
1826 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 1);
1827
1828 while (READ_VREG(HEVC_STREAM_SWAP_CTRL)
1829 & (1<<7))
1830 ;
1831 WRITE_VREG(HEVC_STREAM_SWAP_CTRL, 0);
1832 }
1833
1834 return 0;
1835}
1836
1837
1838int vdec_sync_input(struct vdec_s *vdec)
1839{
1840 struct vdec_input_s *input = &vdec->input;
1841 u32 rp = 0, wp = 0, fifo_len = 0;
1842 int size;
1843
1844 vdec_input_read_restore(vdec);
1845 vdec_sync_input_read(vdec);
1846 vdec_sync_input_write(vdec);
1847 if (input->target == VDEC_INPUT_TARGET_VLD) {
1848 rp = READ_VREG(VLD_MEM_VIFIFO_RP);
1849 wp = READ_VREG(VLD_MEM_VIFIFO_WP);
1850
1851 } else if (input->target == VDEC_INPUT_TARGET_HEVC) {
1852 rp = READ_VREG(HEVC_STREAM_RD_PTR);
1853 wp = READ_VREG(HEVC_STREAM_WR_PTR);
1854 fifo_len = (READ_VREG(HEVC_STREAM_FIFO_CTL)
1855 >> 16) & 0x7f;
1856 }
1857 if (wp >= rp)
1858 size = wp - rp + fifo_len;
1859 else
1860 size = wp + input->size - rp + fifo_len;
1861 if (size < 0) {
1862 pr_info("%s error: input->size %x wp %x rp %x fifo_len %x => size %x\r\n",
1863 __func__, input->size, wp, rp, fifo_len, size);
1864 size = 0;
1865 }
1866 return size;
1867
1868}
1869EXPORT_SYMBOL(vdec_sync_input);
1870
1871const char *vdec_status_str(struct vdec_s *vdec)
1872{
1873 return vdec->status < ARRAY_SIZE(vdec_status_string) ?
1874 vdec_status_string[vdec->status] : "INVALID";
1875}
1876
1877const char *vdec_type_str(struct vdec_s *vdec)
1878{
1879 switch (vdec->type) {
1880 case VDEC_TYPE_SINGLE:
1881 return "VDEC_TYPE_SINGLE";
1882 case VDEC_TYPE_STREAM_PARSER:
1883 return "VDEC_TYPE_STREAM_PARSER";
1884 case VDEC_TYPE_FRAME_BLOCK:
1885 return "VDEC_TYPE_FRAME_BLOCK";
1886 case VDEC_TYPE_FRAME_CIRCULAR:
1887 return "VDEC_TYPE_FRAME_CIRCULAR";
1888 default:
1889 return "VDEC_TYPE_INVALID";
1890 }
1891}
1892
1893const char *vdec_device_name_str(struct vdec_s *vdec)
1894{
1895 return vdec_device_name[vdec->format * 2 + 1];
1896}
1897EXPORT_SYMBOL(vdec_device_name_str);
1898
1899void walk_vdec_core_list(char *s)
1900{
1901 struct vdec_s *vdec;
1902 struct vdec_core_s *core = vdec_core;
1903 unsigned long flags;
1904
1905 pr_info("%s --->\n", s);
1906
1907 flags = vdec_core_lock(vdec_core);
1908
1909 if (list_empty(&core->connected_vdec_list)) {
1910 pr_info("connected vdec list empty\n");
1911 } else {
1912 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
1913 pr_info("\tvdec (%p), status = %s\n", vdec,
1914 vdec_status_str(vdec));
1915 }
1916 }
1917
1918 vdec_core_unlock(vdec_core, flags);
1919}
1920EXPORT_SYMBOL(walk_vdec_core_list);
1921
1922/* insert vdec to vdec_core for scheduling,
1923 * for dual running decoders, connect/disconnect always runs in pairs
1924 */
1925int vdec_connect(struct vdec_s *vdec)
1926{
1927 unsigned long flags;
1928
1929 //trace_vdec_connect(vdec);/*DEBUG_TMP*/
1930
1931 if (vdec->status != VDEC_STATUS_DISCONNECTED)
1932 return 0;
1933
1934 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
1935 vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
1936
1937 init_completion(&vdec->inactive_done);
1938
1939 if (vdec->slave) {
1940 vdec_set_status(vdec->slave, VDEC_STATUS_CONNECTED);
1941 vdec_set_next_status(vdec->slave, VDEC_STATUS_CONNECTED);
1942
1943 init_completion(&vdec->slave->inactive_done);
1944 }
1945
1946 flags = vdec_core_lock(vdec_core);
1947
1948 list_add_tail(&vdec->list, &vdec_core->connected_vdec_list);
1949
1950 if (vdec->slave) {
1951 list_add_tail(&vdec->slave->list,
1952 &vdec_core->connected_vdec_list);
1953 }
1954
1955 vdec_core_unlock(vdec_core, flags);
1956
1957 up(&vdec_core->sem);
1958
1959 return 0;
1960}
1961EXPORT_SYMBOL(vdec_connect);
1962
1963/* remove vdec from vdec_core scheduling */
1964int vdec_disconnect(struct vdec_s *vdec)
1965{
1966#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
1967 vdec_profile(vdec, VDEC_PROFILE_EVENT_DISCONNECT);
1968#endif
1969 //trace_vdec_disconnect(vdec);/*DEBUG_TMP*/
1970
1971 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
1972 (vdec->status != VDEC_STATUS_ACTIVE)) {
1973 return 0;
1974 }
1975 mutex_lock(&vdec_mutex);
1976 /*
1977 *when a vdec is under the management of scheduler
1978 * the status change will only be from vdec_core_thread
1979 */
1980 vdec_set_next_status(vdec, VDEC_STATUS_DISCONNECTED);
1981
1982 if (vdec->slave)
1983 vdec_set_next_status(vdec->slave, VDEC_STATUS_DISCONNECTED);
1984 else if (vdec->master)
1985 vdec_set_next_status(vdec->master, VDEC_STATUS_DISCONNECTED);
1986 mutex_unlock(&vdec_mutex);
1987 up(&vdec_core->sem);
1988
1989 if(!wait_for_completion_timeout(&vdec->inactive_done,
1990 msecs_to_jiffies(2000)))
1991 goto discon_timeout;
1992
1993 if (vdec->slave) {
1994 if(!wait_for_completion_timeout(&vdec->slave->inactive_done,
1995 msecs_to_jiffies(2000)))
1996 goto discon_timeout;
1997 } else if (vdec->master) {
1998 if(!wait_for_completion_timeout(&vdec->master->inactive_done,
1999 msecs_to_jiffies(2000)))
2000 goto discon_timeout;
2001 }
2002
2003 return 0;
2004discon_timeout:
2005 pr_err("%s timeout!!! status: 0x%x\n", __func__, vdec->status);
2006 return 0;
2007}
2008EXPORT_SYMBOL(vdec_disconnect);
2009
2010/* release vdec structure */
2011int vdec_destroy(struct vdec_s *vdec)
2012{
2013 //trace_vdec_destroy(vdec);/*DEBUG_TMP*/
2014
2015 vdec_input_release(&vdec->input);
2016
2017#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2018 vdec_profile_flush(vdec);
2019#endif
2020 ida_simple_remove(&vdec_core->ida, vdec->id);
2021 vfree(vdec);
2022
2023 atomic_dec(&vdec_core->vdec_nr);
2024
2025 return 0;
2026}
2027EXPORT_SYMBOL(vdec_destroy);
2028
2029/*
2030 *register vdec_device
2031 * create output, vfm or create ionvideo output
2032 */
2033s32 vdec_init(struct vdec_s *vdec, int is_4k)
2034{
2035 int r = 0;
2036 struct vdec_s *p = vdec;
2037 const char *dev_name;
2038 int id = PLATFORM_DEVID_AUTO;/*if have used my self*/
2039
2040 dev_name = get_dev_name(vdec_single(vdec), vdec->format);
2041
2042 if (dev_name == NULL)
2043 return -ENODEV;
2044
2045 pr_info("vdec_init, dev_name:%s, vdec_type=%s\n",
2046 dev_name, vdec_type_str(vdec));
2047
2048 /*
2049 *todo: VFM patch control should be configurable,
2050 * for now all stream based input uses default VFM path.
2051 */
2052 if (vdec_stream_based(vdec) && !vdec_dual(vdec)) {
2053 if (vdec_core->vfm_vdec == NULL) {
2054 pr_debug("vdec_init set vfm decoder %p\n", vdec);
2055 vdec_core->vfm_vdec = vdec;
2056 } else {
2057 pr_info("vdec_init vfm path busy.\n");
2058 return -EBUSY;
2059 }
2060 }
2061
2062 mutex_lock(&vdec_mutex);
2063 inited_vcodec_num++;
2064 mutex_unlock(&vdec_mutex);
2065
2066 vdec_input_set_type(&vdec->input, vdec->type,
2067 (vdec->format == VFORMAT_HEVC ||
2068 vdec->format == VFORMAT_AVS2 ||
2069 vdec->format == VFORMAT_VP9) ?
2070 VDEC_INPUT_TARGET_HEVC :
2071 VDEC_INPUT_TARGET_VLD);
2072 if (vdec_single(vdec))
2073 vdec_enable_DMC(vdec);
2074 p->cma_dev = vdec_core->cma_dev;
2075 p->get_canvas = get_canvas;
2076 p->get_canvas_ex = get_canvas_ex;
2077 p->free_canvas_ex = free_canvas_ex;
2078 p->vdec_fps_detec = vdec_fps_detec;
2079 atomic_set(&p->inirq_flag, 0);
2080 atomic_set(&p->inirq_thread_flag, 0);
2081 /* todo */
2082 if (!vdec_dual(vdec))
2083 p->use_vfm_path = vdec_stream_based(vdec);
2084 /* vdec_dev_reg.flag = 0; */
2085 if (vdec->id >= 0)
2086 id = vdec->id;
2087 p->parallel_dec = parallel_decode;
2088 vdec_core->parallel_dec = parallel_decode;
2089 vdec->canvas_mode = CANVAS_BLKMODE_32X32;
2090#ifdef FRAME_CHECK
2091 vdec_frame_check_init(vdec);
2092#endif
2093 p->dev = platform_device_register_data(
2094 &vdec_core->vdec_core_platform_device->dev,
2095 dev_name,
2096 id,
2097 &p, sizeof(struct vdec_s *));
2098
2099 if (IS_ERR(p->dev)) {
2100 r = PTR_ERR(p->dev);
2101 pr_err("vdec: Decoder device %s register failed (%d)\n",
2102 dev_name, r);
2103
2104 mutex_lock(&vdec_mutex);
2105 inited_vcodec_num--;
2106 mutex_unlock(&vdec_mutex);
2107
2108 goto error;
2109 } else if (!p->dev->dev.driver) {
2110 pr_info("vdec: Decoder device %s driver probe failed.\n",
2111 dev_name);
2112 r = -ENODEV;
2113
2114 goto error;
2115 }
2116
2117 if ((p->type == VDEC_TYPE_FRAME_BLOCK) && (p->run == NULL)) {
2118 r = -ENODEV;
2119 pr_err("vdec: Decoder device not handled (%s)\n", dev_name);
2120
2121 mutex_lock(&vdec_mutex);
2122 inited_vcodec_num--;
2123 mutex_unlock(&vdec_mutex);
2124
2125 goto error;
2126 }
2127
2128 if (p->use_vfm_path) {
2129 vdec->vf_receiver_inst = -1;
2130 vdec->vfm_map_id[0] = 0;
2131 } else if (!vdec_dual(vdec)) {
2132 /* create IONVIDEO instance and connect decoder's
2133 * vf_provider interface to it
2134 */
2135 if (p->type != VDEC_TYPE_FRAME_BLOCK) {
2136 r = -ENODEV;
2137 pr_err("vdec: Incorrect decoder type\n");
2138
2139 mutex_lock(&vdec_mutex);
2140 inited_vcodec_num--;
2141 mutex_unlock(&vdec_mutex);
2142
2143 goto error;
2144 }
2145 if (p->frame_base_video_path == FRAME_BASE_PATH_IONVIDEO) {
2146#if 1
2147 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2148 &vdec->vf_receiver_inst);
2149#else
2150 /*
2151 * temporarily just use decoder instance ID as iondriver ID
2152 * to solve OMX iondriver instance number check time sequence
2153 * only the limitation is we can NOT mix different video
2154 * decoders since same ID will be used for different decoder
2155 * formats.
2156 */
2157 vdec->vf_receiver_inst = p->dev->id;
2158 r = ionvideo_assign_map(&vdec->vf_receiver_name,
2159 &vdec->vf_receiver_inst);
2160#endif
2161 if (r < 0) {
2162 pr_err("IonVideo frame receiver allocation failed.\n");
2163
2164 mutex_lock(&vdec_mutex);
2165 inited_vcodec_num--;
2166 mutex_unlock(&vdec_mutex);
2167
2168 goto error;
2169 }
2170
2171 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2172 "%s %s", vdec->vf_provider_name,
2173 vdec->vf_receiver_name);
2174 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2175 "vdec-map-%d", vdec->id);
2176 } else if (p->frame_base_video_path ==
2177 FRAME_BASE_PATH_AMLVIDEO_AMVIDEO) {
2178 if (vdec_secure(vdec)) {
2179 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2180 "%s %s", vdec->vf_provider_name,
2181 "amlvideo amvideo");
2182 } else {
2183 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2184 "%s %s", vdec->vf_provider_name,
2185 "amlvideo ppmgr deinterlace amvideo");
2186 }
2187 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2188 "vdec-map-%d", vdec->id);
2189 } else if (p->frame_base_video_path ==
2190 FRAME_BASE_PATH_AMLVIDEO1_AMVIDEO2) {
2191 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2192 "%s %s", vdec->vf_provider_name,
2193 "aml_video.1 videosync.0 videopip");
2194 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2195 "vdec-map-%d", vdec->id);
2196 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_OSD) {
2197 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2198 "%s %s", vdec->vf_provider_name,
2199 vdec->vf_receiver_name);
2200 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2201 "vdec-map-%d", vdec->id);
2202 } else if (p->frame_base_video_path == FRAME_BASE_PATH_TUNNEL_MODE) {
2203 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2204 "%s %s", vdec->vf_provider_name,
2205 "amvideo");
2206 } else if (p->frame_base_video_path == FRAME_BASE_PATH_V4L_VIDEO) {
2207 snprintf(vdec->vfm_map_chain, VDEC_MAP_NAME_SIZE,
2208 "%s %s %s", vdec->vf_provider_name,
2209 vdec->vf_receiver_name, "amvideo");
2210 snprintf(vdec->vfm_map_id, VDEC_MAP_NAME_SIZE,
2211 "vdec-map-%d", vdec->id);
2212 }
2213
2214 if (vfm_map_add(vdec->vfm_map_id,
2215 vdec->vfm_map_chain) < 0) {
2216 r = -ENOMEM;
2217 pr_err("Decoder pipeline map creation failed %s.\n",
2218 vdec->vfm_map_id);
2219 vdec->vfm_map_id[0] = 0;
2220
2221 mutex_lock(&vdec_mutex);
2222 inited_vcodec_num--;
2223 mutex_unlock(&vdec_mutex);
2224
2225 goto error;
2226 }
2227
2228 pr_debug("vfm map %s created\n", vdec->vfm_map_id);
2229
2230 /*
2231 *assume IONVIDEO driver already have a few vframe_receiver
2232 * registered.
2233 * 1. Call iondriver function to allocate a IONVIDEO path and
2234 * provide receiver's name and receiver op.
2235 * 2. Get decoder driver's provider name from driver instance
2236 * 3. vfm_map_add(name, "<decoder provider name>
2237 * <iondriver receiver name>"), e.g.
2238 * vfm_map_add("vdec_ion_map_0", "mpeg4_0 iondriver_1");
2239 * 4. vf_reg_provider and vf_reg_receiver
2240 * Note: the decoder provider's op uses vdec as op_arg
2241 * the iondriver receiver's op uses iondev device as
2242 * op_arg
2243 */
2244
2245 }
2246
2247 if (!vdec_single(vdec)) {
2248 vf_reg_provider(&p->vframe_provider);
2249
2250 vf_notify_receiver(p->vf_provider_name,
2251 VFRAME_EVENT_PROVIDER_START,
2252 vdec);
2253
2254 if (vdec_core->hint_fr_vdec == NULL)
2255 vdec_core->hint_fr_vdec = vdec;
2256
2257 if (vdec_core->hint_fr_vdec == vdec) {
2258 if (p->sys_info->rate != 0) {
2259 if (!vdec->is_reset) {
2260 vf_notify_receiver(p->vf_provider_name,
2261 VFRAME_EVENT_PROVIDER_FR_HINT,
2262 (void *)
2263 ((unsigned long)
2264 p->sys_info->rate));
2265 vdec->fr_hint_state = VDEC_HINTED;
2266 }
2267 } else {
2268 vdec->fr_hint_state = VDEC_NEED_HINT;
2269 }
2270 }
2271 }
2272
2273 p->dolby_meta_with_el = 0;
2274 pr_debug("vdec_init, vf_provider_name = %s\n", p->vf_provider_name);
2275 vdec_input_prepare_bufs(/*prepared buffer for fast playing.*/
2276 &vdec->input,
2277 vdec->sys_info->width,
2278 vdec->sys_info->height);
2279 /* vdec is now ready to be active */
2280 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2281 if (p->use_vfm_path) {
2282 frame_info_buf_in = (struct vframe_qos_s *)
2283 kmalloc(QOS_FRAME_NUM*sizeof(struct vframe_qos_s), GFP_KERNEL);
2284 if (!frame_info_buf_in)
2285 pr_err("kmalloc: frame_info_buf_in failed\n");
2286 else
2287 memset(frame_info_buf_in, 0,
2288 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
2289
2290 frame_info_buf_out = (struct vframe_qos_s *)
2291 kmalloc(QOS_FRAME_NUM*sizeof(struct vframe_qos_s), GFP_KERNEL);
2292 if (!frame_info_buf_out)
2293 pr_err("kmalloc: frame_info_buf_out failed\n");
2294 else
2295 memset(frame_info_buf_out, 0,
2296 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
2297 frame_qos_wr = 0;
2298 frame_qos_rd = 0;
2299 }
2300 return 0;
2301
2302error:
2303 return r;
2304}
2305EXPORT_SYMBOL(vdec_init);
2306
2307/* vdec_create/init/release/destroy are applied to both dual running decoders
2308 */
2309void vdec_release(struct vdec_s *vdec)
2310{
2311 //trace_vdec_release(vdec);/*DEBUG_TMP*/
2312#ifdef VDEC_DEBUG_SUPPORT
2313 if (step_mode) {
2314 pr_info("VDEC_DEBUG: in step_mode, wait release\n");
2315 while (step_mode)
2316 udelay(10);
2317 pr_info("VDEC_DEBUG: step_mode is clear\n");
2318 }
2319#endif
2320 vdec_disconnect(vdec);
2321
2322 if (vdec->vframe_provider.name) {
2323 if (!vdec_single(vdec)) {
2324 if (vdec_core->hint_fr_vdec == vdec
2325 && vdec->fr_hint_state == VDEC_HINTED)
2326 vf_notify_receiver(
2327 vdec->vf_provider_name,
2328 VFRAME_EVENT_PROVIDER_FR_END_HINT,
2329 NULL);
2330 vdec->fr_hint_state = VDEC_NO_NEED_HINT;
2331 }
2332 vf_unreg_provider(&vdec->vframe_provider);
2333 }
2334
2335 if (vdec_core->vfm_vdec == vdec)
2336 vdec_core->vfm_vdec = NULL;
2337
2338 if (vdec_core->hint_fr_vdec == vdec)
2339 vdec_core->hint_fr_vdec = NULL;
2340
2341 if (vdec->vf_receiver_inst >= 0) {
2342 if (vdec->vfm_map_id[0]) {
2343 vfm_map_remove(vdec->vfm_map_id);
2344 vdec->vfm_map_id[0] = 0;
2345 }
2346 }
2347
2348 while ((atomic_read(&vdec->inirq_flag) > 0)
2349 || (atomic_read(&vdec->inirq_thread_flag) > 0))
2350 schedule();
2351
2352#ifdef FRAME_CHECK
2353 vdec_frame_check_exit(vdec);
2354#endif
2355 vdec_fps_clear(vdec->id);
2356 if (atomic_read(&vdec_core->vdec_nr) == 1)
2357 vdec_disable_DMC(vdec);
2358 platform_device_unregister(vdec->dev);
2359 pr_debug("vdec_release instance %p, total %d\n", vdec,
2360 atomic_read(&vdec_core->vdec_nr));
2361 if (vdec->use_vfm_path) {
2362 kfree(frame_info_buf_in);
2363 frame_info_buf_in = NULL;
2364 kfree(frame_info_buf_out);
2365 frame_info_buf_out = NULL;
2366 frame_qos_wr = 0;
2367 frame_qos_rd = 0;
2368 }
2369 vdec_destroy(vdec);
2370
2371 mutex_lock(&vdec_mutex);
2372 inited_vcodec_num--;
2373 mutex_unlock(&vdec_mutex);
2374
2375}
2376EXPORT_SYMBOL(vdec_release);
2377
2378/* For dual running decoders, vdec_reset is only called with master vdec.
2379 */
2380int vdec_reset(struct vdec_s *vdec)
2381{
2382 //trace_vdec_reset(vdec); /*DEBUG_TMP*/
2383
2384 vdec_disconnect(vdec);
2385
2386 if (vdec->vframe_provider.name)
2387 vf_unreg_provider(&vdec->vframe_provider);
2388
2389 if ((vdec->slave) && (vdec->slave->vframe_provider.name))
2390 vf_unreg_provider(&vdec->slave->vframe_provider);
2391
2392 if (vdec->reset) {
2393 vdec->reset(vdec);
2394 if (vdec->slave)
2395 vdec->slave->reset(vdec->slave);
2396 }
2397 vdec->mc_loaded = 0;/*clear for reload firmware*/
2398 vdec_input_release(&vdec->input);
2399
2400 vdec_input_init(&vdec->input, vdec);
2401
2402 vdec_input_prepare_bufs(&vdec->input, vdec->sys_info->width,
2403 vdec->sys_info->height);
2404
2405 vf_reg_provider(&vdec->vframe_provider);
2406 vf_notify_receiver(vdec->vf_provider_name,
2407 VFRAME_EVENT_PROVIDER_START, vdec);
2408
2409 if (vdec->slave) {
2410 vf_reg_provider(&vdec->slave->vframe_provider);
2411 vf_notify_receiver(vdec->slave->vf_provider_name,
2412 VFRAME_EVENT_PROVIDER_START, vdec->slave);
2413 vdec->slave->mc_loaded = 0;/*clear for reload firmware*/
2414 }
2415
2416 vdec_connect(vdec);
2417
2418 return 0;
2419}
2420EXPORT_SYMBOL(vdec_reset);
2421
2422void vdec_free_cmabuf(void)
2423{
2424 mutex_lock(&vdec_mutex);
2425
2426 /*if (inited_vcodec_num > 0) {
2427 mutex_unlock(&vdec_mutex);
2428 return;
2429 }*/
2430 mutex_unlock(&vdec_mutex);
2431}
2432
2433void vdec_core_request(struct vdec_s *vdec, unsigned long mask)
2434{
2435 vdec->core_mask |= mask;
2436
2437 if (vdec->slave)
2438 vdec->slave->core_mask |= mask;
2439 if (vdec_core->parallel_dec == 1) {
2440 if (mask & CORE_MASK_COMBINE)
2441 vdec_core->vdec_combine_flag++;
2442 }
2443
2444}
2445EXPORT_SYMBOL(vdec_core_request);
2446
2447int vdec_core_release(struct vdec_s *vdec, unsigned long mask)
2448{
2449 vdec->core_mask &= ~mask;
2450
2451 if (vdec->slave)
2452 vdec->slave->core_mask &= ~mask;
2453 if (vdec_core->parallel_dec == 1) {
2454 if (mask & CORE_MASK_COMBINE)
2455 vdec_core->vdec_combine_flag--;
2456 }
2457 return 0;
2458}
2459EXPORT_SYMBOL(vdec_core_release);
2460
2461bool vdec_core_with_input(unsigned long mask)
2462{
2463 enum vdec_type_e type;
2464
2465 for (type = VDEC_1; type < VDEC_MAX; type++) {
2466 if ((mask & (1 << type)) && cores_with_input[type])
2467 return true;
2468 }
2469
2470 return false;
2471}
2472
2473void vdec_core_finish_run(struct vdec_s *vdec, unsigned long mask)
2474{
2475 unsigned long i;
2476 unsigned long t = mask;
2477 mutex_lock(&vdec_mutex);
2478 while (t) {
2479 i = __ffs(t);
2480 clear_bit(i, &vdec->active_mask);
2481 t &= ~(1 << i);
2482 }
2483
2484 if (vdec->active_mask == 0)
2485 vdec_set_status(vdec, VDEC_STATUS_CONNECTED);
2486
2487 mutex_unlock(&vdec_mutex);
2488}
2489EXPORT_SYMBOL(vdec_core_finish_run);
2490/*
2491 * find what core resources are available for vdec
2492 */
2493static unsigned long vdec_schedule_mask(struct vdec_s *vdec,
2494 unsigned long active_mask)
2495{
2496 unsigned long mask = vdec->core_mask &
2497 ~CORE_MASK_COMBINE;
2498
2499 if (vdec->core_mask & CORE_MASK_COMBINE) {
2500 /* combined cores must be granted together */
2501 if ((mask & ~active_mask) == mask)
2502 return mask;
2503 else
2504 return 0;
2505 } else
2506 return mask & ~vdec->sched_mask & ~active_mask;
2507}
2508
2509/*
2510 *Decoder callback
2511 * Each decoder instance uses this callback to notify status change, e.g. when
2512 * decoder finished using HW resource.
2513 * a sample callback from decoder's driver is following:
2514 *
2515 * if (hw->vdec_cb) {
2516 * vdec_set_next_status(vdec, VDEC_STATUS_CONNECTED);
2517 * hw->vdec_cb(vdec, hw->vdec_cb_arg);
2518 * }
2519 */
2520static void vdec_callback(struct vdec_s *vdec, void *data)
2521{
2522 struct vdec_core_s *core = (struct vdec_core_s *)data;
2523
2524#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2525 vdec_profile(vdec, VDEC_PROFILE_EVENT_CB);
2526#endif
2527
2528 up(&core->sem);
2529}
2530
2531static irqreturn_t vdec_isr(int irq, void *dev_id)
2532{
2533 struct vdec_isr_context_s *c =
2534 (struct vdec_isr_context_s *)dev_id;
2535 struct vdec_s *vdec = vdec_core->last_vdec;
2536 irqreturn_t ret = IRQ_HANDLED;
2537
2538 if (vdec_core->parallel_dec == 1) {
2539 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2540 vdec = vdec_core->active_hevc;
2541 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2542 vdec = vdec_core->active_vdec;
2543 else
2544 vdec = NULL;
2545 }
2546
2547 if (vdec) {
2548 atomic_set(&vdec->inirq_flag, 1);
2549 vdec->isr_ns = local_clock();
2550 }
2551 if (c->dev_isr) {
2552 ret = c->dev_isr(irq, c->dev_id);
2553 goto isr_done;
2554 }
2555
2556 if ((c != &vdec_core->isr_context[VDEC_IRQ_0]) &&
2557 (c != &vdec_core->isr_context[VDEC_IRQ_1]) &&
2558 (c != &vdec_core->isr_context[VDEC_IRQ_HEVC_BACK])) {
2559#if 0
2560 pr_warn("vdec interrupt w/o a valid receiver\n");
2561#endif
2562 goto isr_done;
2563 }
2564
2565 if (!vdec) {
2566#if 0
2567 pr_warn("vdec interrupt w/o an active instance running. core = %p\n",
2568 core);
2569#endif
2570 goto isr_done;
2571 }
2572
2573 if (!vdec->irq_handler) {
2574#if 0
2575 pr_warn("vdec instance has no irq handle.\n");
2576#endif
2577 goto isr_done;
2578 }
2579
2580 ret = vdec->irq_handler(vdec, c->index);
2581isr_done:
2582 if (vdec)
2583 atomic_set(&vdec->inirq_flag, 0);
2584 return ret;
2585}
2586
2587static irqreturn_t vdec_thread_isr(int irq, void *dev_id)
2588{
2589 struct vdec_isr_context_s *c =
2590 (struct vdec_isr_context_s *)dev_id;
2591 struct vdec_s *vdec = vdec_core->last_vdec;
2592 irqreturn_t ret = IRQ_HANDLED;
2593
2594 if (vdec_core->parallel_dec == 1) {
2595 if (irq == vdec_core->isr_context[VDEC_IRQ_0].irq)
2596 vdec = vdec_core->active_hevc;
2597 else if (irq == vdec_core->isr_context[VDEC_IRQ_1].irq)
2598 vdec = vdec_core->active_vdec;
2599 else
2600 vdec = NULL;
2601 }
2602
2603 if (vdec) {
2604 u32 isr2tfn = 0;
2605 atomic_set(&vdec->inirq_thread_flag, 1);
2606 vdec->tfn_ns = local_clock();
2607 isr2tfn = vdec->tfn_ns - vdec->isr_ns;
2608 if (isr2tfn > 10000000)
2609 pr_err("!!!!!!! %s vdec_isr to %s took %uns !!!\n",
2610 vdec->vf_provider_name, __func__, isr2tfn);
2611 }
2612 if (c->dev_threaded_isr) {
2613 ret = c->dev_threaded_isr(irq, c->dev_id);
2614 goto thread_isr_done;
2615 }
2616 if (!vdec)
2617 goto thread_isr_done;
2618
2619 if (!vdec->threaded_irq_handler)
2620 goto thread_isr_done;
2621 ret = vdec->threaded_irq_handler(vdec, c->index);
2622thread_isr_done:
2623 if (vdec)
2624 atomic_set(&vdec->inirq_thread_flag, 0);
2625 return ret;
2626}
2627
2628unsigned long vdec_ready_to_run(struct vdec_s *vdec, unsigned long mask)
2629{
2630 unsigned long ready_mask;
2631 struct vdec_input_s *input = &vdec->input;
2632 if ((vdec->status != VDEC_STATUS_CONNECTED) &&
2633 (vdec->status != VDEC_STATUS_ACTIVE))
2634 return false;
2635
2636 if (!vdec->run_ready)
2637 return false;
2638
2639 /* when crc32 error, block at error frame */
2640 if (vdec->vfc.err_crc_block)
2641 return false;
2642
2643 if ((vdec->slave || vdec->master) &&
2644 (vdec->sched == 0))
2645 return false;
2646#ifdef VDEC_DEBUG_SUPPORT
2647 inc_profi_count(mask, vdec->check_count);
2648#endif
2649 if (vdec_core_with_input(mask)) {
2650 /* check frame based input underrun */
2651 if (input && !input->eos && input_frame_based(input)
2652 && (!vdec_input_next_chunk(input))) {
2653#ifdef VDEC_DEBUG_SUPPORT
2654 inc_profi_count(mask, vdec->input_underrun_count);
2655#endif
2656 return false;
2657 }
2658 /* check streaming prepare level threshold if not EOS */
2659 if (input && input_stream_based(input) && !input->eos) {
2660 u32 rp, wp, level;
2661
2662 rp = READ_PARSER_REG(PARSER_VIDEO_RP);
2663 wp = READ_PARSER_REG(PARSER_VIDEO_WP);
2664 if (wp < rp)
2665 level = input->size + wp - rp;
2666 else
2667 level = wp - rp;
2668
2669 if ((level < input->prepare_level) &&
2670 (pts_get_rec_num(PTS_TYPE_VIDEO,
2671 vdec->input.total_rd_count) < 2)) {
2672 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2673#ifdef VDEC_DEBUG_SUPPORT
2674 inc_profi_count(mask, vdec->input_underrun_count);
2675 if (step_mode & 0x200) {
2676 if ((step_mode & 0xff) == vdec->id) {
2677 step_mode |= 0xff;
2678 return mask;
2679 }
2680 }
2681#endif
2682 return false;
2683 } else if (level > input->prepare_level)
2684 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA;
2685 }
2686 }
2687
2688 if (step_mode) {
2689 if ((step_mode & 0xff) != vdec->id)
2690 return 0;
2691 step_mode |= 0xff; /*VDEC_DEBUG_SUPPORT*/
2692 }
2693
2694 /*step_mode &= ~0xff; not work for id of 0, removed*/
2695
2696#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2697 vdec_profile(vdec, VDEC_PROFILE_EVENT_CHK_RUN_READY);
2698#endif
2699
2700 ready_mask = vdec->run_ready(vdec, mask) & mask;
2701#ifdef VDEC_DEBUG_SUPPORT
2702 if (ready_mask != mask)
2703 inc_profi_count(ready_mask ^ mask, vdec->not_run_ready_count);
2704#endif
2705#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2706 if (ready_mask)
2707 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN_READY);
2708#endif
2709
2710 return ready_mask;
2711}
2712
2713/* bridge on/off vdec's interrupt processing to vdec core */
2714static void vdec_route_interrupt(struct vdec_s *vdec, unsigned long mask,
2715 bool enable)
2716{
2717 enum vdec_type_e type;
2718
2719 for (type = VDEC_1; type < VDEC_MAX; type++) {
2720 if (mask & (1 << type)) {
2721 struct vdec_isr_context_s *c =
2722 &vdec_core->isr_context[cores_int[type]];
2723 if (enable)
2724 c->vdec = vdec;
2725 else if (c->vdec == vdec)
2726 c->vdec = NULL;
2727 }
2728 }
2729}
2730
2731/*
2732 * Set up secure protection for each decoder instance running.
2733 * Note: The operation from REE side only resets memory access
2734 * to a default policy and even a non_secure type will still be
2735 * changed to secure type automatically when secure source is
2736 * detected inside TEE.
2737 * Perform need_more_data checking and set flag is decoder
2738 * is not consuming data.
2739 */
2740void vdec_prepare_run(struct vdec_s *vdec, unsigned long mask)
2741{
2742 struct vdec_input_s *input = &vdec->input;
2743 int secure = (vdec_secure(vdec)) ? DMC_DEV_TYPE_SECURE :
2744 DMC_DEV_TYPE_NON_SECURE;
2745
2746 vdec_route_interrupt(vdec, mask, true);
2747
2748 if (!vdec_core_with_input(mask))
2749 return;
2750
2751 if (secure && vdec_stream_based(vdec) && force_nosecure_even_drm)
2752 {
2753 /* Verimatrix ultra webclient (HLS) was played in drmmode and used hw demux. In drmmode VDEC only can access secure.
2754 Now HW demux parsed es data to no-secure buffer. So the VDEC input was no-secure, VDEC playback failed. Forcing
2755 use nosecure for verimatrix webclient HLS. If in the future HW demux can parse es data to secure buffer, make
2756 VDEC r/w secure.*/
2757 secure = 0;
2758 //pr_debug("allow VDEC can access nosecure even in drmmode\n");
2759 }
2760 if (input->target == VDEC_INPUT_TARGET_VLD)
2761 tee_config_device_secure(DMC_DEV_ID_VDEC, secure);
2762 else if (input->target == VDEC_INPUT_TARGET_HEVC)
2763 tee_config_device_secure(DMC_DEV_ID_HEVC, secure);
2764
2765 if (vdec_stream_based(vdec) &&
2766 ((vdec->need_more_data & VDEC_NEED_MORE_DATA_RUN) &&
2767 (vdec->need_more_data & VDEC_NEED_MORE_DATA_DIRTY) == 0)) {
2768 vdec->need_more_data |= VDEC_NEED_MORE_DATA;
2769 }
2770
2771 vdec->need_more_data |= VDEC_NEED_MORE_DATA_RUN;
2772 vdec->need_more_data &= ~VDEC_NEED_MORE_DATA_DIRTY;
2773}
2774
2775
2776/* struct vdec_core_shread manages all decoder instance in active list. When
2777 * a vdec is added into the active list, it can onlt be in two status:
2778 * VDEC_STATUS_CONNECTED(the decoder does not own HW resource and ready to run)
2779 * VDEC_STATUS_ACTIVE(the decoder owns HW resources and is running).
2780 * Removing a decoder from active list is only performed within core thread.
2781 * Adding a decoder into active list is performed from user thread.
2782 */
2783static int vdec_core_thread(void *data)
2784{
2785 struct vdec_core_s *core = (struct vdec_core_s *)data;
2786 struct sched_param param = {.sched_priority = MAX_RT_PRIO/2};
2787 unsigned long flags;
2788 int i;
2789
2790 sched_setscheduler(current, SCHED_FIFO, &param);
2791
2792 allow_signal(SIGTERM);
2793
2794 while (down_interruptible(&core->sem) == 0) {
2795 struct vdec_s *vdec, *tmp, *worker;
2796 unsigned long sched_mask = 0;
2797 LIST_HEAD(disconnecting_list);
2798
2799 if (kthread_should_stop())
2800 break;
2801 mutex_lock(&vdec_mutex);
2802
2803 if (core->parallel_dec == 1) {
2804 for (i = VDEC_1; i < VDEC_MAX; i++) {
2805 core->power_ref_mask =
2806 core->power_ref_count[i] > 0 ?
2807 (core->power_ref_mask | (1 << i)) :
2808 (core->power_ref_mask & ~(1 << i));
2809 }
2810 }
2811 /* clean up previous active vdec's input */
2812 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
2813 unsigned long mask = vdec->sched_mask &
2814 (vdec->active_mask ^ vdec->sched_mask);
2815
2816 vdec_route_interrupt(vdec, mask, false);
2817
2818#ifdef VDEC_DEBUG_SUPPORT
2819 update_profi_clk_stop(vdec, mask, get_current_clk());
2820#endif
2821 /*
2822 * If decoder released some core resources (mask), then
2823 * check if these core resources are associated
2824 * with any input side and do input clean up accordingly
2825 */
2826 if (vdec_core_with_input(mask)) {
2827 struct vdec_input_s *input = &vdec->input;
2828 while (!list_empty(
2829 &input->vframe_chunk_list)) {
2830 struct vframe_chunk_s *chunk =
2831 vdec_input_next_chunk(input);
2832 if (chunk && (chunk->flag &
2833 VFRAME_CHUNK_FLAG_CONSUMED))
2834 vdec_input_release_chunk(input,
2835 chunk);
2836 else
2837 break;
2838 }
2839
2840 vdec_save_input_context(vdec);
2841 }
2842
2843 vdec->sched_mask &= ~mask;
2844 core->sched_mask &= ~mask;
2845 }
2846 vdec_update_buff_status();
2847 /*
2848 *todo:
2849 * this is the case when the decoder is in active mode and
2850 * the system side wants to stop it. Currently we rely on
2851 * the decoder instance to go back to VDEC_STATUS_CONNECTED
2852 * from VDEC_STATUS_ACTIVE by its own. However, if for some
2853 * reason the decoder can not exist by itself (dead decoding
2854 * or whatever), then we may have to add another vdec API
2855 * to kill the vdec and release its HW resource and make it
2856 * become inactive again.
2857 * if ((core->active_vdec) &&
2858 * (core->active_vdec->status == VDEC_STATUS_DISCONNECTED)) {
2859 * }
2860 */
2861
2862 /* check disconnected decoders */
2863 flags = vdec_core_lock(vdec_core);
2864 list_for_each_entry_safe(vdec, tmp,
2865 &core->connected_vdec_list, list) {
2866 if ((vdec->status == VDEC_STATUS_CONNECTED) &&
2867 (vdec->next_status == VDEC_STATUS_DISCONNECTED)) {
2868 if (core->parallel_dec == 1) {
2869 if (vdec_core->active_hevc == vdec)
2870 vdec_core->active_hevc = NULL;
2871 if (vdec_core->active_vdec == vdec)
2872 vdec_core->active_vdec = NULL;
2873 }
2874 if (core->last_vdec == vdec)
2875 core->last_vdec = NULL;
2876 list_move(&vdec->list, &disconnecting_list);
2877 }
2878 }
2879 vdec_core_unlock(vdec_core, flags);
2880 mutex_unlock(&vdec_mutex);
2881 /* elect next vdec to be scheduled */
2882 vdec = core->last_vdec;
2883 if (vdec) {
2884 vdec = list_entry(vdec->list.next, struct vdec_s, list);
2885 list_for_each_entry_from(vdec,
2886 &core->connected_vdec_list, list) {
2887 sched_mask = vdec_schedule_mask(vdec,
2888 core->sched_mask);
2889 if (!sched_mask)
2890 continue;
2891 sched_mask = vdec_ready_to_run(vdec,
2892 sched_mask);
2893 if (sched_mask)
2894 break;
2895 }
2896
2897 if (&vdec->list == &core->connected_vdec_list)
2898 vdec = NULL;
2899 }
2900
2901 if (!vdec) {
2902 /* search from beginning */
2903 list_for_each_entry(vdec,
2904 &core->connected_vdec_list, list) {
2905 sched_mask = vdec_schedule_mask(vdec,
2906 core->sched_mask);
2907 if (vdec == core->last_vdec) {
2908 if (!sched_mask) {
2909 vdec = NULL;
2910 break;
2911 }
2912
2913 sched_mask = vdec_ready_to_run(vdec,
2914 sched_mask);
2915
2916 if (!sched_mask) {
2917 vdec = NULL;
2918 break;
2919 }
2920 break;
2921 }
2922
2923 if (!sched_mask)
2924 continue;
2925
2926 sched_mask = vdec_ready_to_run(vdec,
2927 sched_mask);
2928 if (sched_mask)
2929 break;
2930 }
2931
2932 if (&vdec->list == &core->connected_vdec_list)
2933 vdec = NULL;
2934 }
2935
2936 worker = vdec;
2937
2938 if (vdec) {
2939 unsigned long mask = sched_mask;
2940 unsigned long i;
2941
2942 /* setting active_mask should be atomic.
2943 * it can be modified by decoder driver callbacks.
2944 */
2945 while (sched_mask) {
2946 i = __ffs(sched_mask);
2947 set_bit(i, &vdec->active_mask);
2948 sched_mask &= ~(1 << i);
2949 }
2950
2951 /* vdec's sched_mask is only set from core thread */
2952 vdec->sched_mask |= mask;
2953 if (core->last_vdec) {
2954 if ((core->last_vdec != vdec) &&
2955 (core->last_vdec->mc_type != vdec->mc_type))
2956 vdec->mc_loaded = 0;/*clear for reload firmware*/
2957 } else
2958 vdec->mc_loaded = 0;
2959 core->last_vdec = vdec;
2960 if (debug & 2)
2961 vdec->mc_loaded = 0;/*alway reload firmware*/
2962 vdec_set_status(vdec, VDEC_STATUS_ACTIVE);
2963
2964 core->sched_mask |= mask;
2965 if (core->parallel_dec == 1)
2966 vdec_save_active_hw(vdec);
2967#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
2968 vdec_profile(vdec, VDEC_PROFILE_EVENT_RUN);
2969#endif
2970 vdec_prepare_run(vdec, mask);
2971#ifdef VDEC_DEBUG_SUPPORT
2972 inc_profi_count(mask, vdec->run_count);
2973 update_profi_clk_run(vdec, mask, get_current_clk());
2974#endif
2975 vdec->run(vdec, mask, vdec_callback, core);
2976
2977
2978 /* we have some cores scheduled, keep working until
2979 * all vdecs are checked with no cores to schedule
2980 */
2981 if (core->parallel_dec == 1) {
2982 if (vdec_core->vdec_combine_flag == 0)
2983 up(&core->sem);
2984 } else
2985 up(&core->sem);
2986 }
2987
2988 /* remove disconnected decoder from active list */
2989 list_for_each_entry_safe(vdec, tmp, &disconnecting_list, list) {
2990 list_del(&vdec->list);
2991 vdec_set_status(vdec, VDEC_STATUS_DISCONNECTED);
2992 /*core->last_vdec = NULL;*/
2993 complete(&vdec->inactive_done);
2994 }
2995
2996 /* if there is no new work scheduled and nothing
2997 * is running, sleep 20ms
2998 */
2999 if (core->parallel_dec == 1) {
3000 if (vdec_core->vdec_combine_flag == 0) {
3001 if ((!worker) &&
3002 ((core->sched_mask != core->power_ref_mask)) &&
3003 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3004 ((core->buff_flag | core->stream_buff_flag) &
3005 (core->sched_mask ^ core->power_ref_mask))) {
3006 usleep_range(1000, 2000);
3007 up(&core->sem);
3008 }
3009 } else {
3010 if ((!worker) && (!core->sched_mask) &&
3011 (atomic_read(&vdec_core->vdec_nr) > 0) &&
3012 (core->buff_flag | core->stream_buff_flag)) {
3013 usleep_range(1000, 2000);
3014 up(&core->sem);
3015 }
3016 }
3017 } else if ((!worker) && (!core->sched_mask) && (atomic_read(&vdec_core->vdec_nr) > 0)) {
3018 usleep_range(1000, 2000);
3019 up(&core->sem);
3020 }
3021
3022 }
3023
3024 return 0;
3025}
3026
3027#if 1 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 */
3028static bool test_hevc(u32 decomp_addr, u32 us_delay)
3029{
3030 int i;
3031
3032 /* SW_RESET IPP */
3033 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 1);
3034 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0);
3035
3036 /* initialize all canvas table */
3037 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0);
3038 for (i = 0; i < 32; i++)
3039 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CMD_ADDR,
3040 0x1 | (i << 8) | decomp_addr);
3041 WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 1);
3042 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
3043 for (i = 0; i < 32; i++)
3044 WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
3045
3046 /* Initialize mcrcc */
3047 WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
3048 WRITE_VREG(HEVCD_MCRCC_CTL2, 0x0);
3049 WRITE_VREG(HEVCD_MCRCC_CTL3, 0x0);
3050 WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
3051
3052 /* Decomp initialize */
3053 WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x0);
3054 WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
3055
3056 /* Frame level initialization */
3057 WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG, 0x100 | (0x100 << 16));
3058 WRITE_VREG(HEVCD_IPP_TOP_TILECONFIG3, 0x0);
3059 WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 0x1 << 5);
3060 WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x2 | (0x2 << 2));
3061
3062 WRITE_VREG(HEVCD_IPP_CONFIG, 0x0);
3063 WRITE_VREG(HEVCD_IPP_LINEBUFF_BASE, 0x0);
3064
3065 /* Enable SWIMP mode */
3066 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CONFIG, 0x1);
3067
3068 /* Enable frame */
3069 WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x2);
3070 WRITE_VREG(HEVCD_IPP_TOP_FRMCTL, 0x1);
3071
3072 /* Send SW-command CTB info */
3073 WRITE_VREG(HEVCD_IPP_SWMPREDIF_CTBINFO, 0x1 << 31);
3074
3075 /* Send PU_command */
3076 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO0, (0x4 << 9) | (0x4 << 16));
3077 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO1, 0x1 << 3);
3078 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO2, 0x0);
3079 WRITE_VREG(HEVCD_IPP_SWMPREDIF_PUINFO3, 0x0);
3080
3081 udelay(us_delay);
3082
3083 WRITE_VREG(HEVCD_IPP_DBG_SEL, 0x2 << 4);
3084
3085 return (READ_VREG(HEVCD_IPP_DBG_DATA) & 3) == 1;
3086}
3087
3088void vdec_power_reset(void)
3089{
3090 /* enable vdec1 isolation */
3091 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3092 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc0);
3093 /* power off vdec1 memories */
3094 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3095 /* vdec1 power off */
3096 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3097 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc);
3098
3099 if (has_vdec2()) {
3100 /* enable vdec2 isolation */
3101 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3102 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x300);
3103 /* power off vdec2 memories */
3104 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3105 /* vdec2 power off */
3106 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3107 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0x30);
3108 }
3109
3110 if (has_hdec()) {
3111 /* enable hcodec isolation */
3112 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3113 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0x30);
3114 /* power off hcodec memories */
3115 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3116 /* hcodec power off */
3117 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3118 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 3);
3119 }
3120
3121 if (has_hevc_vdec()) {
3122 /* enable hevc isolation */
3123 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3124 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | 0xc00);
3125 /* power off hevc memories */
3126 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3127 /* hevc power off */
3128 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3129 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | 0xc0);
3130 }
3131}
3132EXPORT_SYMBOL(vdec_power_reset);
3133
3134void vdec_poweron(enum vdec_type_e core)
3135{
3136 void *decomp_addr = NULL;
3137 dma_addr_t decomp_dma_addr;
3138 u32 decomp_addr_aligned = 0;
3139 int hevc_loop = 0;
3140 int sleep_val, iso_val;
3141 bool is_power_ctrl_ver2 = false;
3142
3143 if (core >= VDEC_MAX)
3144 return;
3145
3146 mutex_lock(&vdec_mutex);
3147
3148 vdec_core->power_ref_count[core]++;
3149 if (vdec_core->power_ref_count[core] > 1) {
3150 mutex_unlock(&vdec_mutex);
3151 return;
3152 }
3153
3154 if (vdec_on(core)) {
3155 mutex_unlock(&vdec_mutex);
3156 return;
3157 }
3158
3159 is_power_ctrl_ver2 =
3160 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3161 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3162
3163 if (hevc_workaround_needed() &&
3164 (core == VDEC_HEVC)) {
3165 decomp_addr = codec_mm_dma_alloc_coherent(MEM_NAME,
3166 SZ_64K + SZ_4K, &decomp_dma_addr, GFP_KERNEL, 0);
3167
3168 if (decomp_addr) {
3169 decomp_addr_aligned = ALIGN(decomp_dma_addr, SZ_64K);
3170 memset((u8 *)decomp_addr +
3171 (decomp_addr_aligned - decomp_dma_addr),
3172 0xff, SZ_4K);
3173 } else
3174 pr_err("vdec: alloc HEVC gxbb decomp buffer failed.\n");
3175 }
3176
3177 if (core == VDEC_1) {
3178 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3179 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3180
3181 /* vdec1 power on */
3182#ifdef CONFIG_AMLOGIC_POWER
3183 if (is_support_power_ctrl()) {
3184 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3185 mutex_unlock(&vdec_mutex);
3186 pr_err("vdec-1 power on ctrl sleep fail.\n");
3187 return;
3188 }
3189 } else {
3190 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3191 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3192 }
3193#else
3194 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3195 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3196#endif
3197 /* wait 10uS */
3198 udelay(10);
3199 /* vdec1 soft reset */
3200 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3201 WRITE_VREG(DOS_SW_RESET0, 0);
3202 /* enable vdec1 clock */
3203 /*
3204 *add power on vdec clock level setting,only for m8 chip,
3205 * m8baby and m8m2 can dynamic adjust vdec clock,
3206 * power on with default clock level
3207 */
3208 amports_switch_gate("clk_vdec_mux", 1);
3209 vdec_clock_hi_enable();
3210 /* power up vdec memories */
3211 WRITE_VREG(DOS_MEM_PD_VDEC, 0);
3212
3213 /* remove vdec1 isolation */
3214#ifdef CONFIG_AMLOGIC_POWER
3215 if (is_support_power_ctrl()) {
3216 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3217 mutex_unlock(&vdec_mutex);
3218 pr_err("vdec-1 power on ctrl iso fail.\n");
3219 return;
3220 }
3221 } else {
3222 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3223 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3224 }
3225#else
3226 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3227 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3228#endif
3229 /* reset DOS top registers */
3230 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3231 } else if (core == VDEC_2) {
3232 if (has_vdec2()) {
3233 /* vdec2 power on */
3234 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3235 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3236 ~0x30);
3237 /* wait 10uS */
3238 udelay(10);
3239 /* vdec2 soft reset */
3240 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3241 WRITE_VREG(DOS_SW_RESET2, 0);
3242 /* enable vdec1 clock */
3243 vdec2_clock_hi_enable();
3244 /* power up vdec memories */
3245 WRITE_VREG(DOS_MEM_PD_VDEC2, 0);
3246 /* remove vdec2 isolation */
3247 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3248 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
3249 ~0x300);
3250 /* reset DOS top registers */
3251 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3252 }
3253 } else if (core == VDEC_HCODEC) {
3254 if (has_hdec()) {
3255 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3256 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3257
3258 /* hcodec power on */
3259#ifdef CONFIG_AMLOGIC_POWER
3260 if (is_support_power_ctrl()) {
3261 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3262 mutex_unlock(&vdec_mutex);
3263 pr_err("hcodec power on ctrl sleep fail.\n");
3264 return;
3265 }
3266 } else {
3267 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3268 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3269 }
3270#else
3271 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3272 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3273#endif
3274 /* wait 10uS */
3275 udelay(10);
3276 /* hcodec soft reset */
3277 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3278 WRITE_VREG(DOS_SW_RESET1, 0);
3279 /* enable hcodec clock */
3280 hcodec_clock_enable();
3281 /* power up hcodec memories */
3282 WRITE_VREG(DOS_MEM_PD_HCODEC, 0);
3283 /* remove hcodec isolation */
3284#ifdef CONFIG_AMLOGIC_POWER
3285 if (is_support_power_ctrl()) {
3286 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3287 mutex_unlock(&vdec_mutex);
3288 pr_err("hcodec power on ctrl iso fail.\n");
3289 return;
3290 }
3291 } else {
3292 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3293 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3294 }
3295#else
3296 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3297 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3298#endif
3299 }
3300 } else if (core == VDEC_HEVC) {
3301 if (has_hevc_vdec()) {
3302 bool hevc_fixed = false;
3303
3304 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3305 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3306
3307 while (!hevc_fixed) {
3308 /* hevc power on */
3309#ifdef CONFIG_AMLOGIC_POWER
3310 if (is_support_power_ctrl()) {
3311 if (power_ctrl_sleep_mask(true, sleep_val, 0)) {
3312 mutex_unlock(&vdec_mutex);
3313 pr_err("hevc power on ctrl sleep fail.\n");
3314 return;
3315 }
3316 } else {
3317 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3318 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3319 }
3320#else
3321 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3322 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & ~sleep_val);
3323#endif
3324 /* wait 10uS */
3325 udelay(10);
3326 /* hevc soft reset */
3327 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3328 WRITE_VREG(DOS_SW_RESET3, 0);
3329 /* enable hevc clock */
3330 amports_switch_gate("clk_hevc_mux", 1);
3331 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3332 amports_switch_gate("clk_hevcb_mux", 1);
3333 hevc_clock_hi_enable();
3334 hevc_back_clock_hi_enable();
3335 /* power up hevc memories */
3336 WRITE_VREG(DOS_MEM_PD_HEVC, 0);
3337 /* remove hevc isolation */
3338#ifdef CONFIG_AMLOGIC_POWER
3339 if (is_support_power_ctrl()) {
3340 if (power_ctrl_iso_mask(true, iso_val, 0)) {
3341 mutex_unlock(&vdec_mutex);
3342 pr_err("hevc power on ctrl iso fail.\n");
3343 return;
3344 }
3345 } else {
3346 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3347 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3348 }
3349#else
3350 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3351 READ_AOREG(AO_RTI_GEN_PWR_ISO0) & ~iso_val);
3352#endif
3353 if (!hevc_workaround_needed())
3354 break;
3355
3356 if (decomp_addr)
3357 hevc_fixed = test_hevc(
3358 decomp_addr_aligned, 20);
3359
3360 if (!hevc_fixed) {
3361 hevc_loop++;
3362
3363 mutex_unlock(&vdec_mutex);
3364
3365 if (hevc_loop >= HEVC_TEST_LIMIT) {
3366 pr_warn("hevc power sequence over limit\n");
3367 pr_warn("=====================================================\n");
3368 pr_warn(" This chip is identified to have HW failure.\n");
3369 pr_warn(" Please contact sqa-platform to replace the platform.\n");
3370 pr_warn("=====================================================\n");
3371
3372 panic("Force panic for chip detection !!!\n");
3373
3374 break;
3375 }
3376
3377 vdec_poweroff(VDEC_HEVC);
3378
3379 mdelay(10);
3380
3381 mutex_lock(&vdec_mutex);
3382 }
3383 }
3384
3385 if (hevc_loop > hevc_max_reset_count)
3386 hevc_max_reset_count = hevc_loop;
3387
3388 WRITE_VREG(DOS_SW_RESET3, 0xffffffff);
3389 udelay(10);
3390 WRITE_VREG(DOS_SW_RESET3, 0);
3391 }
3392 }
3393
3394 if (decomp_addr)
3395 codec_mm_dma_free_coherent(MEM_NAME,
3396 SZ_64K + SZ_4K, decomp_addr, decomp_dma_addr, 0);
3397
3398 mutex_unlock(&vdec_mutex);
3399}
3400EXPORT_SYMBOL(vdec_poweron);
3401
3402void vdec_poweroff(enum vdec_type_e core)
3403{
3404 int sleep_val, iso_val;
3405 bool is_power_ctrl_ver2 = false;
3406
3407 if (core >= VDEC_MAX)
3408 return;
3409
3410 mutex_lock(&vdec_mutex);
3411
3412 vdec_core->power_ref_count[core]--;
3413 if (vdec_core->power_ref_count[core] > 0) {
3414 mutex_unlock(&vdec_mutex);
3415 return;
3416 }
3417
3418 is_power_ctrl_ver2 =
3419 ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3420 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) ? true : false;
3421
3422 if (core == VDEC_1) {
3423 sleep_val = is_power_ctrl_ver2 ? 0x2 : 0xc;
3424 iso_val = is_power_ctrl_ver2 ? 0x2 : 0xc0;
3425
3426 /* enable vdec1 isolation */
3427#ifdef CONFIG_AMLOGIC_POWER
3428 if (is_support_power_ctrl()) {
3429 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3430 mutex_unlock(&vdec_mutex);
3431 pr_err("vdec-1 power off ctrl iso fail.\n");
3432 return;
3433 }
3434 } else {
3435 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3436 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3437 }
3438#else
3439 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3440 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3441#endif
3442 /* power off vdec1 memories */
3443 WRITE_VREG(DOS_MEM_PD_VDEC, 0xffffffffUL);
3444 /* disable vdec1 clock */
3445 vdec_clock_off();
3446 /* vdec1 power off */
3447#ifdef CONFIG_AMLOGIC_POWER
3448 if (is_support_power_ctrl()) {
3449 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3450 mutex_unlock(&vdec_mutex);
3451 pr_err("vdec-1 power off ctrl sleep fail.\n");
3452 return;
3453 }
3454 } else {
3455 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3456 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3457 }
3458#else
3459 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3460 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3461#endif
3462 } else if (core == VDEC_2) {
3463 if (has_vdec2()) {
3464 /* enable vdec2 isolation */
3465 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3466 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
3467 0x300);
3468 /* power off vdec2 memories */
3469 WRITE_VREG(DOS_MEM_PD_VDEC2, 0xffffffffUL);
3470 /* disable vdec2 clock */
3471 vdec2_clock_off();
3472 /* vdec2 power off */
3473 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3474 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
3475 0x30);
3476 }
3477 } else if (core == VDEC_HCODEC) {
3478 if (has_hdec()) {
3479 sleep_val = is_power_ctrl_ver2 ? 0x1 : 0x3;
3480 iso_val = is_power_ctrl_ver2 ? 0x1 : 0x30;
3481
3482 /* enable hcodec isolation */
3483#ifdef CONFIG_AMLOGIC_POWER
3484 if (is_support_power_ctrl()) {
3485 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3486 mutex_unlock(&vdec_mutex);
3487 pr_err("hcodec power off ctrl iso fail.\n");
3488 return;
3489 }
3490 } else {
3491 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3492 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3493 }
3494#else
3495 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3496 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3497#endif
3498 /* power off hcodec memories */
3499 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
3500 /* disable hcodec clock */
3501 hcodec_clock_off();
3502 /* hcodec power off */
3503#ifdef CONFIG_AMLOGIC_POWER
3504 if (is_support_power_ctrl()) {
3505 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3506 mutex_unlock(&vdec_mutex);
3507 pr_err("hcodec power off ctrl sleep fail.\n");
3508 return;
3509 }
3510 } else {
3511 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3512 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3513 }
3514#else
3515 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3516 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3517#endif
3518 }
3519 } else if (core == VDEC_HEVC) {
3520 if (has_hevc_vdec()) {
3521 sleep_val = is_power_ctrl_ver2 ? 0x4 : 0xc0;
3522 iso_val = is_power_ctrl_ver2 ? 0x4 : 0xc00;
3523
3524 if (no_powerdown == 0) {
3525 /* enable hevc isolation */
3526#ifdef CONFIG_AMLOGIC_POWER
3527 if (is_support_power_ctrl()) {
3528 if (power_ctrl_iso_mask(false, iso_val, 0)) {
3529 mutex_unlock(&vdec_mutex);
3530 pr_err("hevc power off ctrl iso fail.\n");
3531 return;
3532 }
3533 } else {
3534 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3535 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3536 }
3537#else
3538 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
3539 READ_AOREG(AO_RTI_GEN_PWR_ISO0) | iso_val);
3540#endif
3541 /* power off hevc memories */
3542 WRITE_VREG(DOS_MEM_PD_HEVC, 0xffffffffUL);
3543
3544 /* disable hevc clock */
3545 hevc_clock_off();
3546 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3547 hevc_back_clock_off();
3548
3549 /* hevc power off */
3550#ifdef CONFIG_AMLOGIC_POWER
3551 if (is_support_power_ctrl()) {
3552 if (power_ctrl_sleep_mask(false, sleep_val, 0)) {
3553 mutex_unlock(&vdec_mutex);
3554 pr_err("hevc power off ctrl sleep fail.\n");
3555 return;
3556 }
3557 } else {
3558 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3559 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3560 }
3561#else
3562 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
3563 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | sleep_val);
3564#endif
3565 } else {
3566 pr_info("!!!!!!!!not power down\n");
3567 hevc_reset_core(NULL);
3568 no_powerdown = 0;
3569 }
3570 }
3571 }
3572 mutex_unlock(&vdec_mutex);
3573}
3574EXPORT_SYMBOL(vdec_poweroff);
3575
3576bool vdec_on(enum vdec_type_e core)
3577{
3578 bool ret = false;
3579
3580 if (core == VDEC_1) {
3581 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3582 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3583 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3584 ? 0x2 : 0xc)) == 0) &&
3585 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100))
3586 ret = true;
3587 } else if (core == VDEC_2) {
3588 if (has_vdec2()) {
3589 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & 0x30) == 0) &&
3590 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100))
3591 ret = true;
3592 }
3593 } else if (core == VDEC_HCODEC) {
3594 if (has_hdec()) {
3595 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3596 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3597 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3598 ? 0x1 : 0x3)) == 0) &&
3599 (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000))
3600 ret = true;
3601 }
3602 } else if (core == VDEC_HEVC) {
3603 if (has_hevc_vdec()) {
3604 if (((READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
3605 (((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3606 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1))
3607 ? 0x4 : 0xc0)) == 0) &&
3608 (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x1000000))
3609 ret = true;
3610 }
3611 }
3612
3613 return ret;
3614}
3615EXPORT_SYMBOL(vdec_on);
3616
3617#elif 0 /* MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6TVD */
3618void vdec_poweron(enum vdec_type_e core)
3619{
3620 ulong flags;
3621
3622 spin_lock_irqsave(&lock, flags);
3623
3624 if (core == VDEC_1) {
3625 /* vdec1 soft reset */
3626 WRITE_VREG(DOS_SW_RESET0, 0xfffffffc);
3627 WRITE_VREG(DOS_SW_RESET0, 0);
3628 /* enable vdec1 clock */
3629 vdec_clock_enable();
3630 /* reset DOS top registers */
3631 WRITE_VREG(DOS_VDEC_MCRCC_STALL_CTRL, 0);
3632 } else if (core == VDEC_2) {
3633 /* vdec2 soft reset */
3634 WRITE_VREG(DOS_SW_RESET2, 0xffffffff);
3635 WRITE_VREG(DOS_SW_RESET2, 0);
3636 /* enable vdec2 clock */
3637 vdec2_clock_enable();
3638 /* reset DOS top registers */
3639 WRITE_VREG(DOS_VDEC2_MCRCC_STALL_CTRL, 0);
3640 } else if (core == VDEC_HCODEC) {
3641 /* hcodec soft reset */
3642 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
3643 WRITE_VREG(DOS_SW_RESET1, 0);
3644 /* enable hcodec clock */
3645 hcodec_clock_enable();
3646 }
3647
3648 spin_unlock_irqrestore(&lock, flags);
3649}
3650
3651void vdec_poweroff(enum vdec_type_e core)
3652{
3653 ulong flags;
3654
3655 spin_lock_irqsave(&lock, flags);
3656
3657 if (core == VDEC_1) {
3658 /* disable vdec1 clock */
3659 vdec_clock_off();
3660 } else if (core == VDEC_2) {
3661 /* disable vdec2 clock */
3662 vdec2_clock_off();
3663 } else if (core == VDEC_HCODEC) {
3664 /* disable hcodec clock */
3665 hcodec_clock_off();
3666 }
3667
3668 spin_unlock_irqrestore(&lock, flags);
3669}
3670
3671bool vdec_on(enum vdec_type_e core)
3672{
3673 bool ret = false;
3674
3675 if (core == VDEC_1) {
3676 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x100)
3677 ret = true;
3678 } else if (core == VDEC_2) {
3679 if (READ_HHI_REG(HHI_VDEC2_CLK_CNTL) & 0x100)
3680 ret = true;
3681 } else if (core == VDEC_HCODEC) {
3682 if (READ_HHI_REG(HHI_VDEC_CLK_CNTL) & 0x1000000)
3683 ret = true;
3684 }
3685
3686 return ret;
3687}
3688#endif
3689
3690int vdec_source_changed(int format, int width, int height, int fps)
3691{
3692 /* todo: add level routines for clock adjustment per chips */
3693 int ret = -1;
3694 static int on_setting;
3695
3696 if (on_setting > 0)
3697 return ret;/*on changing clk,ignore this change*/
3698
3699 if (vdec_source_get(VDEC_1) == width * height * fps)
3700 return ret;
3701
3702
3703 on_setting = 1;
3704 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3705 pr_debug("vdec1 video changed to %d x %d %d fps clk->%dMHZ\n",
3706 width, height, fps, vdec_clk_get(VDEC_1));
3707 on_setting = 0;
3708 return ret;
3709
3710}
3711EXPORT_SYMBOL(vdec_source_changed);
3712
3713void vdec_reset_core(struct vdec_s *vdec)
3714{
3715 unsigned long flags;
3716 unsigned int mask = 0;
3717
3718 mask = 1 << 13; /*bit13: DOS VDEC interface*/
3719 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3720 mask = 1 << 21; /*bit21: DOS VDEC interface*/
3721
3722 spin_lock_irqsave(&vdec_spin_lock, flags);
3723 codec_dmcbus_write(DMC_REQ_CTRL,
3724 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3725 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3726
3727 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3728 & mask))
3729 ;
3730 /*
3731 * 2: assist
3732 * 3: vld_reset
3733 * 4: vld_part_reset
3734 * 5: vfifo reset
3735 * 6: iqidct
3736 * 7: mc
3737 * 8: dblk
3738 * 9: pic_dc
3739 * 10: psc
3740 * 11: mcpu
3741 * 12: ccpu
3742 * 13: ddr
3743 * 14: afifo
3744 */
3745 if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
3746 (get_cpu_major_id() != AM_MESON_CPU_MAJOR_ID_TL1)) {
3747 WRITE_VREG(DOS_SW_RESET0, (1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9));
3748 } else {
3749 WRITE_VREG(DOS_SW_RESET0,
3750 (1<<3)|(1<<4)|(1<<5));
3751 }
3752 WRITE_VREG(DOS_SW_RESET0, 0);
3753
3754 spin_lock_irqsave(&vdec_spin_lock, flags);
3755 codec_dmcbus_write(DMC_REQ_CTRL,
3756 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3757 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3758}
3759EXPORT_SYMBOL(vdec_reset_core);
3760
3761void hevc_mmu_dma_check(struct vdec_s *vdec)
3762{
3763 ulong timeout;
3764 u32 data;
3765 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A)
3766 return;
3767 timeout = jiffies + HZ/100;
3768 while (1) {
3769 data = READ_VREG(HEVC_CM_CORE_STATUS);
3770 if ((data & 0x1) == 0)
3771 break;
3772 if (time_after(jiffies, timeout)) {
3773 if (debug & 0x10)
3774 pr_info(" %s sao mmu dma idle\n", __func__);
3775 break;
3776 }
3777 }
3778 /*disable sao mmu dma */
3779 CLEAR_VREG_MASK(HEVC_SAO_MMU_DMA_CTRL, 1 << 0);
3780 timeout = jiffies + HZ/100;
3781 while (1) {
3782 data = READ_VREG(HEVC_SAO_MMU_DMA_STATUS);
3783 if ((data & 0x1))
3784 break;
3785 if (time_after(jiffies, timeout)) {
3786 if (debug & 0x10)
3787 pr_err("%s sao mmu dma timeout, num_buf_used = 0x%x\n",
3788 __func__, (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16));
3789 break;
3790 }
3791 }
3792}
3793EXPORT_SYMBOL(hevc_mmu_dma_check);
3794
3795void hevc_reset_core(struct vdec_s *vdec)
3796{
3797 unsigned long flags;
3798 unsigned int mask = 0;
3799
3800 mask = 1 << 4; /*bit4: hevc*/
3801 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
3802 mask |= 1 << 8; /*bit8: hevcb*/
3803
3804 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3805 spin_lock_irqsave(&vdec_spin_lock, flags);
3806 codec_dmcbus_write(DMC_REQ_CTRL,
3807 codec_dmcbus_read(DMC_REQ_CTRL) & ~mask);
3808 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3809
3810 while (!(codec_dmcbus_read(DMC_CHAN_STS)
3811 & mask))
3812 ;
3813
3814 if (vdec == NULL || input_frame_based(vdec))
3815 WRITE_VREG(HEVC_STREAM_CONTROL, 0);
3816
3817 /*
3818 * 2: assist
3819 * 3: parser
3820 * 4: parser_state
3821 * 8: dblk
3822 * 11:mcpu
3823 * 12:ccpu
3824 * 13:ddr
3825 * 14:iqit
3826 * 15:ipp
3827 * 17:qdct
3828 * 18:mpred
3829 * 19:sao
3830 * 24:hevc_afifo
3831 */
3832 WRITE_VREG(DOS_SW_RESET3,
3833 (1<<3)|(1<<4)|(1<<8)|(1<<11)|
3834 (1<<12)|(1<<13)|(1<<14)|(1<<15)|
3835 (1<<17)|(1<<18)|(1<<19)|(1<<24));
3836
3837 WRITE_VREG(DOS_SW_RESET3, 0);
3838
3839
3840 spin_lock_irqsave(&vdec_spin_lock, flags);
3841 codec_dmcbus_write(DMC_REQ_CTRL,
3842 codec_dmcbus_read(DMC_REQ_CTRL) | mask);
3843 spin_unlock_irqrestore(&vdec_spin_lock, flags);
3844
3845}
3846EXPORT_SYMBOL(hevc_reset_core);
3847
3848int vdec2_source_changed(int format, int width, int height, int fps)
3849{
3850 int ret = -1;
3851 static int on_setting;
3852
3853 if (has_vdec2()) {
3854 /* todo: add level routines for clock adjustment per chips */
3855 if (on_setting != 0)
3856 return ret;/*on changing clk,ignore this change*/
3857
3858 if (vdec_source_get(VDEC_2) == width * height * fps)
3859 return ret;
3860
3861 on_setting = 1;
3862 ret = vdec_source_changed_for_clk_set(format,
3863 width, height, fps);
3864 pr_debug("vdec2 video changed to %d x %d %d fps clk->%dMHZ\n",
3865 width, height, fps, vdec_clk_get(VDEC_2));
3866 on_setting = 0;
3867 return ret;
3868 }
3869 return 0;
3870}
3871EXPORT_SYMBOL(vdec2_source_changed);
3872
3873int hevc_source_changed(int format, int width, int height, int fps)
3874{
3875 /* todo: add level routines for clock adjustment per chips */
3876 int ret = -1;
3877 static int on_setting;
3878
3879 if (on_setting != 0)
3880 return ret;/*on changing clk,ignore this change*/
3881
3882 if (vdec_source_get(VDEC_HEVC) == width * height * fps)
3883 return ret;
3884
3885 on_setting = 1;
3886 ret = vdec_source_changed_for_clk_set(format, width, height, fps);
3887 pr_debug("hevc video changed to %d x %d %d fps clk->%dMHZ\n",
3888 width, height, fps, vdec_clk_get(VDEC_HEVC));
3889 on_setting = 0;
3890
3891 return ret;
3892}
3893EXPORT_SYMBOL(hevc_source_changed);
3894
3895static struct am_reg am_risc[] = {
3896 {"MSP", 0x300},
3897 {"MPSR", 0x301},
3898 {"MCPU_INT_BASE", 0x302},
3899 {"MCPU_INTR_GRP", 0x303},
3900 {"MCPU_INTR_MSK", 0x304},
3901 {"MCPU_INTR_REQ", 0x305},
3902 {"MPC-P", 0x306},
3903 {"MPC-D", 0x307},
3904 {"MPC_E", 0x308},
3905 {"MPC_W", 0x309},
3906 {"CSP", 0x320},
3907 {"CPSR", 0x321},
3908 {"CCPU_INT_BASE", 0x322},
3909 {"CCPU_INTR_GRP", 0x323},
3910 {"CCPU_INTR_MSK", 0x324},
3911 {"CCPU_INTR_REQ", 0x325},
3912 {"CPC-P", 0x326},
3913 {"CPC-D", 0x327},
3914 {"CPC_E", 0x328},
3915 {"CPC_W", 0x329},
3916 {"AV_SCRATCH_0", 0x09c0},
3917 {"AV_SCRATCH_1", 0x09c1},
3918 {"AV_SCRATCH_2", 0x09c2},
3919 {"AV_SCRATCH_3", 0x09c3},
3920 {"AV_SCRATCH_4", 0x09c4},
3921 {"AV_SCRATCH_5", 0x09c5},
3922 {"AV_SCRATCH_6", 0x09c6},
3923 {"AV_SCRATCH_7", 0x09c7},
3924 {"AV_SCRATCH_8", 0x09c8},
3925 {"AV_SCRATCH_9", 0x09c9},
3926 {"AV_SCRATCH_A", 0x09ca},
3927 {"AV_SCRATCH_B", 0x09cb},
3928 {"AV_SCRATCH_C", 0x09cc},
3929 {"AV_SCRATCH_D", 0x09cd},
3930 {"AV_SCRATCH_E", 0x09ce},
3931 {"AV_SCRATCH_F", 0x09cf},
3932 {"AV_SCRATCH_G", 0x09d0},
3933 {"AV_SCRATCH_H", 0x09d1},
3934 {"AV_SCRATCH_I", 0x09d2},
3935 {"AV_SCRATCH_J", 0x09d3},
3936 {"AV_SCRATCH_K", 0x09d4},
3937 {"AV_SCRATCH_L", 0x09d5},
3938 {"AV_SCRATCH_M", 0x09d6},
3939 {"AV_SCRATCH_N", 0x09d7},
3940};
3941
3942static ssize_t amrisc_regs_show(struct class *class,
3943 struct class_attribute *attr, char *buf)
3944{
3945 char *pbuf = buf;
3946 struct am_reg *regs = am_risc;
3947 int rsize = sizeof(am_risc) / sizeof(struct am_reg);
3948 int i;
3949 unsigned int val;
3950 ssize_t ret;
3951
3952 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
3953 mutex_lock(&vdec_mutex);
3954 if (!vdec_on(VDEC_1)) {
3955 mutex_unlock(&vdec_mutex);
3956 pbuf += sprintf(pbuf, "amrisc is power off\n");
3957 ret = pbuf - buf;
3958 return ret;
3959 }
3960 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3961 /*TODO:M6 define */
3962 /*
3963 * switch_mod_gate_by_type(MOD_VDEC, 1);
3964 */
3965 amports_switch_gate("vdec", 1);
3966 }
3967 pbuf += sprintf(pbuf, "amrisc registers show:\n");
3968 for (i = 0; i < rsize; i++) {
3969 val = READ_VREG(regs[i].offset);
3970 pbuf += sprintf(pbuf, "%s(%#x)\t:%#x(%d)\n",
3971 regs[i].name, regs[i].offset, val, val);
3972 }
3973 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
3974 mutex_unlock(&vdec_mutex);
3975 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
3976 /*TODO:M6 define */
3977 /*
3978 * switch_mod_gate_by_type(MOD_VDEC, 0);
3979 */
3980 amports_switch_gate("vdec", 0);
3981 }
3982 ret = pbuf - buf;
3983 return ret;
3984}
3985
3986static ssize_t dump_trace_show(struct class *class,
3987 struct class_attribute *attr, char *buf)
3988{
3989 int i;
3990 char *pbuf = buf;
3991 ssize_t ret;
3992 u16 *trace_buf = kmalloc(debug_trace_num * 2, GFP_KERNEL);
3993
3994 if (!trace_buf) {
3995 pbuf += sprintf(pbuf, "No Memory bug\n");
3996 ret = pbuf - buf;
3997 return ret;
3998 }
3999 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4000 mutex_lock(&vdec_mutex);
4001 if (!vdec_on(VDEC_1)) {
4002 mutex_unlock(&vdec_mutex);
4003 kfree(trace_buf);
4004 pbuf += sprintf(pbuf, "amrisc is power off\n");
4005 ret = pbuf - buf;
4006 return ret;
4007 }
4008 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4009 /*TODO:M6 define */
4010 /*
4011 * switch_mod_gate_by_type(MOD_VDEC, 1);
4012 */
4013 amports_switch_gate("vdec", 1);
4014 }
4015 pr_info("dump trace steps:%d start\n", debug_trace_num);
4016 i = 0;
4017 while (i <= debug_trace_num - 16) {
4018 trace_buf[i] = READ_VREG(MPC_E);
4019 trace_buf[i + 1] = READ_VREG(MPC_E);
4020 trace_buf[i + 2] = READ_VREG(MPC_E);
4021 trace_buf[i + 3] = READ_VREG(MPC_E);
4022 trace_buf[i + 4] = READ_VREG(MPC_E);
4023 trace_buf[i + 5] = READ_VREG(MPC_E);
4024 trace_buf[i + 6] = READ_VREG(MPC_E);
4025 trace_buf[i + 7] = READ_VREG(MPC_E);
4026 trace_buf[i + 8] = READ_VREG(MPC_E);
4027 trace_buf[i + 9] = READ_VREG(MPC_E);
4028 trace_buf[i + 10] = READ_VREG(MPC_E);
4029 trace_buf[i + 11] = READ_VREG(MPC_E);
4030 trace_buf[i + 12] = READ_VREG(MPC_E);
4031 trace_buf[i + 13] = READ_VREG(MPC_E);
4032 trace_buf[i + 14] = READ_VREG(MPC_E);
4033 trace_buf[i + 15] = READ_VREG(MPC_E);
4034 i += 16;
4035 };
4036 pr_info("dump trace steps:%d finished\n", debug_trace_num);
4037 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4038 mutex_unlock(&vdec_mutex);
4039 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4040 /*TODO:M6 define */
4041 /*
4042 * switch_mod_gate_by_type(MOD_VDEC, 0);
4043 */
4044 amports_switch_gate("vdec", 0);
4045 }
4046 for (i = 0; i < debug_trace_num; i++) {
4047 if (i % 4 == 0) {
4048 if (i % 16 == 0)
4049 pbuf += sprintf(pbuf, "\n");
4050 else if (i % 8 == 0)
4051 pbuf += sprintf(pbuf, " ");
4052 else /* 4 */
4053 pbuf += sprintf(pbuf, " ");
4054 }
4055 pbuf += sprintf(pbuf, "%04x:", trace_buf[i]);
4056 }
4057 while (i < debug_trace_num)
4058 ;
4059 kfree(trace_buf);
4060 pbuf += sprintf(pbuf, "\n");
4061 ret = pbuf - buf;
4062 return ret;
4063}
4064
4065static ssize_t clock_level_show(struct class *class,
4066 struct class_attribute *attr, char *buf)
4067{
4068 char *pbuf = buf;
4069 size_t ret;
4070
4071 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_1));
4072
4073 if (has_vdec2())
4074 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_2));
4075
4076 if (has_hevc_vdec())
4077 pbuf += sprintf(pbuf, "%dMHZ\n", vdec_clk_get(VDEC_HEVC));
4078
4079 ret = pbuf - buf;
4080 return ret;
4081}
4082
4083static ssize_t store_poweron_clock_level(struct class *class,
4084 struct class_attribute *attr,
4085 const char *buf, size_t size)
4086{
4087 unsigned int val;
4088 ssize_t ret;
4089
4090 /*ret = sscanf(buf, "%d", &val);*/
4091 ret = kstrtoint(buf, 0, &val);
4092
4093 if (ret != 0)
4094 return -EINVAL;
4095 poweron_clock_level = val;
4096 return size;
4097}
4098
4099static ssize_t show_poweron_clock_level(struct class *class,
4100 struct class_attribute *attr, char *buf)
4101{
4102 return sprintf(buf, "%d\n", poweron_clock_level);
4103}
4104
4105/*
4106 *if keep_vdec_mem == 1
4107 *always don't release
4108 *vdec 64 memory for fast play.
4109 */
4110static ssize_t store_keep_vdec_mem(struct class *class,
4111 struct class_attribute *attr,
4112 const char *buf, size_t size)
4113{
4114 unsigned int val;
4115 ssize_t ret;
4116
4117 /*ret = sscanf(buf, "%d", &val);*/
4118 ret = kstrtoint(buf, 0, &val);
4119 if (ret != 0)
4120 return -EINVAL;
4121 keep_vdec_mem = val;
4122 return size;
4123}
4124
4125static ssize_t show_keep_vdec_mem(struct class *class,
4126 struct class_attribute *attr, char *buf)
4127{
4128 return sprintf(buf, "%d\n", keep_vdec_mem);
4129}
4130
4131#ifdef VDEC_DEBUG_SUPPORT
4132static ssize_t store_debug(struct class *class,
4133 struct class_attribute *attr,
4134 const char *buf, size_t size)
4135{
4136 struct vdec_s *vdec;
4137 struct vdec_core_s *core = vdec_core;
4138 unsigned long flags;
4139
4140 unsigned id;
4141 unsigned val;
4142 ssize_t ret;
4143 char cbuf[32];
4144
4145 cbuf[0] = 0;
4146 ret = sscanf(buf, "%s %x %x", cbuf, &id, &val);
4147 /*pr_info(
4148 "%s(%s)=>ret %ld: %s, %x, %x\n",
4149 __func__, buf, ret, cbuf, id, val);*/
4150 if (strcmp(cbuf, "schedule") == 0) {
4151 pr_info("VDEC_DEBUG: force schedule\n");
4152 up(&core->sem);
4153 } else if (strcmp(cbuf, "power_off") == 0) {
4154 pr_info("VDEC_DEBUG: power off core %d\n", id);
4155 vdec_poweroff(id);
4156 } else if (strcmp(cbuf, "power_on") == 0) {
4157 pr_info("VDEC_DEBUG: power_on core %d\n", id);
4158 vdec_poweron(id);
4159 } else if (strcmp(cbuf, "wr") == 0) {
4160 pr_info("VDEC_DEBUG: WRITE_VREG(0x%x, 0x%x)\n",
4161 id, val);
4162 WRITE_VREG(id, val);
4163 } else if (strcmp(cbuf, "rd") == 0) {
4164 pr_info("VDEC_DEBUG: READ_VREG(0x%x) = 0x%x\n",
4165 id, READ_VREG(id));
4166 } else if (strcmp(cbuf, "read_hevc_clk_reg") == 0) {
4167 pr_info(
4168 "VDEC_DEBUG: HHI_VDEC4_CLK_CNTL = 0x%x, HHI_VDEC2_CLK_CNTL = 0x%x\n",
4169 READ_HHI_REG(HHI_VDEC4_CLK_CNTL),
4170 READ_HHI_REG(HHI_VDEC2_CLK_CNTL));
4171 }
4172
4173 flags = vdec_core_lock(vdec_core);
4174
4175 list_for_each_entry(vdec,
4176 &core->connected_vdec_list, list) {
4177 pr_info("vdec: status %d, id %d\n", vdec->status, vdec->id);
4178 if (((vdec->status == VDEC_STATUS_CONNECTED
4179 || vdec->status == VDEC_STATUS_ACTIVE)) &&
4180 (vdec->id == id)) {
4181 /*to add*/
4182 break;
4183 }
4184 }
4185 vdec_core_unlock(vdec_core, flags);
4186 return size;
4187}
4188
4189static ssize_t show_debug(struct class *class,
4190 struct class_attribute *attr, char *buf)
4191{
4192 char *pbuf = buf;
4193 struct vdec_s *vdec;
4194 struct vdec_core_s *core = vdec_core;
4195 unsigned long flags = vdec_core_lock(vdec_core);
4196 u64 tmp;
4197
4198 pbuf += sprintf(pbuf,
4199 "============== help:\n");
4200 pbuf += sprintf(pbuf,
4201 "'echo xxx > debug' usuage:\n");
4202 pbuf += sprintf(pbuf,
4203 "schedule - trigger schedule thread to run\n");
4204 pbuf += sprintf(pbuf,
4205 "power_off core_num - call vdec_poweroff(core_num)\n");
4206 pbuf += sprintf(pbuf,
4207 "power_on core_num - call vdec_poweron(core_num)\n");
4208 pbuf += sprintf(pbuf,
4209 "wr adr val - call WRITE_VREG(adr, val)\n");
4210 pbuf += sprintf(pbuf,
4211 "rd adr - call READ_VREG(adr)\n");
4212 pbuf += sprintf(pbuf,
4213 "read_hevc_clk_reg - read HHI register for hevc clk\n");
4214 pbuf += sprintf(pbuf,
4215 "===================\n");
4216
4217 pbuf += sprintf(pbuf,
4218 "name(core)\tschedule_count\trun_count\tinput_underrun\tdecbuf_not_ready\trun_time\n");
4219 list_for_each_entry(vdec,
4220 &core->connected_vdec_list, list) {
4221 enum vdec_type_e type;
4222 if ((vdec->status == VDEC_STATUS_CONNECTED
4223 || vdec->status == VDEC_STATUS_ACTIVE)) {
4224 for (type = VDEC_1; type < VDEC_MAX; type++) {
4225 if (vdec->core_mask & (1 << type)) {
4226 pbuf += sprintf(pbuf, "%s(%d):",
4227 vdec->vf_provider_name, type);
4228 pbuf += sprintf(pbuf, "\t%d",
4229 vdec->check_count[type]);
4230 pbuf += sprintf(pbuf, "\t%d",
4231 vdec->run_count[type]);
4232 pbuf += sprintf(pbuf, "\t%d",
4233 vdec->input_underrun_count[type]);
4234 pbuf += sprintf(pbuf, "\t%d",
4235 vdec->not_run_ready_count[type]);
4236 tmp = vdec->run_clk[type] * 100;
4237 do_div(tmp, vdec->total_clk[type]);
4238 pbuf += sprintf(pbuf,
4239 "\t%d%%\n",
4240 vdec->total_clk[type] == 0 ? 0 :
4241 (u32)tmp);
4242 }
4243 }
4244 }
4245 }
4246
4247 vdec_core_unlock(vdec_core, flags);
4248 return pbuf - buf;
4249
4250}
4251#endif
4252
4253/*irq num as same as .dts*/
4254/*
4255 * interrupts = <0 3 1
4256 * 0 23 1
4257 * 0 32 1
4258 * 0 43 1
4259 * 0 44 1
4260 * 0 45 1>;
4261 * interrupt-names = "vsync",
4262 * "demux",
4263 * "parser",
4264 * "mailbox_0",
4265 * "mailbox_1",
4266 * "mailbox_2";
4267 */
4268s32 vdec_request_threaded_irq(enum vdec_irq_num num,
4269 irq_handler_t handler,
4270 irq_handler_t thread_fn,
4271 unsigned long irqflags,
4272 const char *devname, void *dev)
4273{
4274 s32 res_irq;
4275 s32 ret = 0;
4276
4277 if (num >= VDEC_IRQ_MAX) {
4278 pr_err("[%s] request irq error, irq num too big!", __func__);
4279 return -EINVAL;
4280 }
4281
4282 if (vdec_core->isr_context[num].irq < 0) {
4283 res_irq = platform_get_irq(
4284 vdec_core->vdec_core_platform_device, num);
4285 if (res_irq < 0) {
4286 pr_err("[%s] get irq error!", __func__);
4287 return -EINVAL;
4288 }
4289
4290 vdec_core->isr_context[num].irq = res_irq;
4291 vdec_core->isr_context[num].dev_isr = handler;
4292 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4293 vdec_core->isr_context[num].dev_id = dev;
4294
4295 ret = request_threaded_irq(res_irq,
4296 vdec_isr,
4297 vdec_thread_isr,
4298 (thread_fn) ? IRQF_ONESHOT : irqflags,
4299 devname,
4300 &vdec_core->isr_context[num]);
4301
4302 if (ret) {
4303 vdec_core->isr_context[num].irq = -1;
4304 vdec_core->isr_context[num].dev_isr = NULL;
4305 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4306 vdec_core->isr_context[num].dev_id = NULL;
4307
4308 pr_err("vdec irq register error for %s.\n", devname);
4309 return -EIO;
4310 }
4311 } else {
4312 vdec_core->isr_context[num].dev_isr = handler;
4313 vdec_core->isr_context[num].dev_threaded_isr = thread_fn;
4314 vdec_core->isr_context[num].dev_id = dev;
4315 }
4316
4317 return ret;
4318}
4319EXPORT_SYMBOL(vdec_request_threaded_irq);
4320
4321s32 vdec_request_irq(enum vdec_irq_num num, irq_handler_t handler,
4322 const char *devname, void *dev)
4323{
4324 pr_debug("vdec_request_irq %p, %s\n", handler, devname);
4325
4326 return vdec_request_threaded_irq(num,
4327 handler,
4328 NULL,/*no thread_fn*/
4329 IRQF_SHARED,
4330 devname,
4331 dev);
4332}
4333EXPORT_SYMBOL(vdec_request_irq);
4334
4335void vdec_free_irq(enum vdec_irq_num num, void *dev)
4336{
4337 if (num >= VDEC_IRQ_MAX) {
4338 pr_err("[%s] request irq error, irq num too big!", __func__);
4339 return;
4340 }
4341 /*
4342 *assume amrisc is stopped already and there is no mailbox interrupt
4343 * when we reset pointers here.
4344 */
4345 vdec_core->isr_context[num].dev_isr = NULL;
4346 vdec_core->isr_context[num].dev_threaded_isr = NULL;
4347 vdec_core->isr_context[num].dev_id = NULL;
4348 synchronize_irq(vdec_core->isr_context[num].irq);
4349}
4350EXPORT_SYMBOL(vdec_free_irq);
4351
4352struct vdec_s *vdec_get_default_vdec_for_userdata(void)
4353{
4354 struct vdec_s *vdec;
4355 struct vdec_s *ret_vdec;
4356 struct vdec_core_s *core = vdec_core;
4357 unsigned long flags;
4358 int id;
4359
4360 flags = vdec_core_lock(vdec_core);
4361
4362 id = 0x10000000;
4363 ret_vdec = NULL;
4364 if (!list_empty(&core->connected_vdec_list)) {
4365 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4366 if (vdec->id < id) {
4367 id = vdec->id;
4368 ret_vdec = vdec;
4369 }
4370 }
4371 }
4372
4373 vdec_core_unlock(vdec_core, flags);
4374
4375 return ret_vdec;
4376}
4377EXPORT_SYMBOL(vdec_get_default_vdec_for_userdata);
4378
4379struct vdec_s *vdec_get_vdec_by_id(int vdec_id)
4380{
4381 struct vdec_s *vdec;
4382 struct vdec_s *ret_vdec;
4383 struct vdec_core_s *core = vdec_core;
4384 unsigned long flags;
4385
4386 flags = vdec_core_lock(vdec_core);
4387
4388 ret_vdec = NULL;
4389 if (!list_empty(&core->connected_vdec_list)) {
4390 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4391 if (vdec->id == vdec_id) {
4392 ret_vdec = vdec;
4393 break;
4394 }
4395 }
4396 }
4397
4398 vdec_core_unlock(vdec_core, flags);
4399
4400 return ret_vdec;
4401}
4402EXPORT_SYMBOL(vdec_get_vdec_by_id);
4403
4404int vdec_read_user_data(struct vdec_s *vdec,
4405 struct userdata_param_t *p_userdata_param)
4406{
4407 int ret = 0;
4408
4409 if (!vdec)
4410 vdec = vdec_get_default_vdec_for_userdata();
4411
4412 if (vdec) {
4413 if (vdec->user_data_read)
4414 ret = vdec->user_data_read(vdec, p_userdata_param);
4415 }
4416 return ret;
4417}
4418EXPORT_SYMBOL(vdec_read_user_data);
4419
4420int vdec_wakeup_userdata_poll(struct vdec_s *vdec)
4421{
4422 if (vdec) {
4423 if (vdec->wakeup_userdata_poll)
4424 vdec->wakeup_userdata_poll(vdec);
4425 }
4426
4427 return 0;
4428}
4429EXPORT_SYMBOL(vdec_wakeup_userdata_poll);
4430
4431void vdec_reset_userdata_fifo(struct vdec_s *vdec, int bInit)
4432{
4433 if (!vdec)
4434 vdec = vdec_get_default_vdec_for_userdata();
4435
4436 if (vdec) {
4437 if (vdec->reset_userdata_fifo)
4438 vdec->reset_userdata_fifo(vdec, bInit);
4439 }
4440}
4441EXPORT_SYMBOL(vdec_reset_userdata_fifo);
4442
4443static int dump_mode;
4444static ssize_t dump_risc_mem_store(struct class *class,
4445 struct class_attribute *attr,
4446 const char *buf, size_t size)/*set*/
4447{
4448 unsigned int val;
4449 ssize_t ret;
4450 char dump_mode_str[4] = "PRL";
4451
4452 /*ret = sscanf(buf, "%d", &val);*/
4453 ret = kstrtoint(buf, 0, &val);
4454
4455 if (ret != 0)
4456 return -EINVAL;
4457 dump_mode = val & 0x3;
4458 pr_info("set dump mode to %d,%c_mem\n",
4459 dump_mode, dump_mode_str[dump_mode]);
4460 return size;
4461}
4462static u32 read_amrisc_reg(int reg)
4463{
4464 WRITE_VREG(0x31b, reg);
4465 return READ_VREG(0x31c);
4466}
4467
4468static void dump_pmem(void)
4469{
4470 int i;
4471
4472 WRITE_VREG(0x301, 0x8000);
4473 WRITE_VREG(0x31d, 0);
4474 pr_info("start dump amrisc pmem of risc\n");
4475 for (i = 0; i < 0xfff; i++) {
4476 /*same as .o format*/
4477 pr_info("%08x // 0x%04x:\n", read_amrisc_reg(i), i);
4478 }
4479}
4480
4481static void dump_lmem(void)
4482{
4483 int i;
4484
4485 WRITE_VREG(0x301, 0x8000);
4486 WRITE_VREG(0x31d, 2);
4487 pr_info("start dump amrisc lmem\n");
4488 for (i = 0; i < 0x3ff; i++) {
4489 /*same as */
4490 pr_info("[%04x] = 0x%08x:\n", i, read_amrisc_reg(i));
4491 }
4492}
4493
4494static ssize_t dump_risc_mem_show(struct class *class,
4495 struct class_attribute *attr, char *buf)
4496{
4497 char *pbuf = buf;
4498 int ret;
4499
4500 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8) {
4501 mutex_lock(&vdec_mutex);
4502 if (!vdec_on(VDEC_1)) {
4503 mutex_unlock(&vdec_mutex);
4504 pbuf += sprintf(pbuf, "amrisc is power off\n");
4505 ret = pbuf - buf;
4506 return ret;
4507 }
4508 } else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4509 /*TODO:M6 define */
4510 /*
4511 * switch_mod_gate_by_type(MOD_VDEC, 1);
4512 */
4513 amports_switch_gate("vdec", 1);
4514 }
4515 /*start do**/
4516 switch (dump_mode) {
4517 case 0:
4518 dump_pmem();
4519 break;
4520 case 2:
4521 dump_lmem();
4522 break;
4523 default:
4524 break;
4525 }
4526
4527 /*done*/
4528 if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M8)
4529 mutex_unlock(&vdec_mutex);
4530 else if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_M6) {
4531 /*TODO:M6 define */
4532 /*
4533 * switch_mod_gate_by_type(MOD_VDEC, 0);
4534 */
4535 amports_switch_gate("vdec", 0);
4536 }
4537 return sprintf(buf, "done\n");
4538}
4539
4540static ssize_t core_show(struct class *class, struct class_attribute *attr,
4541 char *buf)
4542{
4543 struct vdec_core_s *core = vdec_core;
4544 char *pbuf = buf;
4545
4546 if (list_empty(&core->connected_vdec_list))
4547 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4548 else {
4549 struct vdec_s *vdec;
4550
4551 pbuf += sprintf(pbuf,
4552 " Core: last_sched %p, sched_mask %lx\n",
4553 core->last_vdec,
4554 core->sched_mask);
4555
4556 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4557 pbuf += sprintf(pbuf,
4558 "\tvdec.%d (%p (%s)), status = %s,\ttype = %s, \tactive_mask = %lx\n",
4559 vdec->id,
4560 vdec,
4561 vdec_device_name[vdec->format * 2],
4562 vdec_status_str(vdec),
4563 vdec_type_str(vdec),
4564 vdec->active_mask);
4565 }
4566 }
4567
4568 return pbuf - buf;
4569}
4570
4571static ssize_t vdec_status_show(struct class *class,
4572 struct class_attribute *attr, char *buf)
4573{
4574 char *pbuf = buf;
4575 struct vdec_s *vdec;
4576 struct vdec_info vs;
4577 unsigned char vdec_num = 0;
4578 struct vdec_core_s *core = vdec_core;
4579 unsigned long flags = vdec_core_lock(vdec_core);
4580
4581 if (list_empty(&core->connected_vdec_list)) {
4582 pbuf += sprintf(pbuf, "No vdec.\n");
4583 goto out;
4584 }
4585
4586 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4587 if ((vdec->status == VDEC_STATUS_CONNECTED
4588 || vdec->status == VDEC_STATUS_ACTIVE)) {
4589 memset(&vs, 0, sizeof(vs));
4590 if (vdec_status(vdec, &vs)) {
4591 pbuf += sprintf(pbuf, "err.\n");
4592 goto out;
4593 }
4594 pbuf += sprintf(pbuf,
4595 "vdec channel %u statistics:\n",
4596 vdec_num);
4597 pbuf += sprintf(pbuf,
4598 "%13s : %s\n", "device name",
4599 vs.vdec_name);
4600 pbuf += sprintf(pbuf,
4601 "%13s : %u\n", "frame width",
4602 vs.frame_width);
4603 pbuf += sprintf(pbuf,
4604 "%13s : %u\n", "frame height",
4605 vs.frame_height);
4606 pbuf += sprintf(pbuf,
4607 "%13s : %u %s\n", "frame rate",
4608 vs.frame_rate, "fps");
4609 pbuf += sprintf(pbuf,
4610 "%13s : %u %s\n", "bit rate",
4611 vs.bit_rate / 1024 * 8, "kbps");
4612 pbuf += sprintf(pbuf,
4613 "%13s : %u\n", "status",
4614 vs.status);
4615 pbuf += sprintf(pbuf,
4616 "%13s : %u\n", "frame dur",
4617 vs.frame_dur);
4618 pbuf += sprintf(pbuf,
4619 "%13s : %u %s\n", "frame data",
4620 vs.frame_data / 1024, "KB");
4621 pbuf += sprintf(pbuf,
4622 "%13s : %u\n", "frame count",
4623 vs.frame_count);
4624 pbuf += sprintf(pbuf,
4625 "%13s : %u\n", "drop count",
4626 vs.drop_frame_count);
4627 pbuf += sprintf(pbuf,
4628 "%13s : %u\n", "fra err count",
4629 vs.error_frame_count);
4630 pbuf += sprintf(pbuf,
4631 "%13s : %u\n", "hw err count",
4632 vs.error_count);
4633 pbuf += sprintf(pbuf,
4634 "%13s : %llu %s\n", "total data",
4635 vs.total_data / 1024, "KB");
4636 pbuf += sprintf(pbuf,
4637 "%13s : %x\n\n", "ratio_control",
4638 vs.ratio_control);
4639
4640 vdec_num++;
4641 }
4642 }
4643out:
4644 vdec_core_unlock(vdec_core, flags);
4645 return pbuf - buf;
4646}
4647
4648static ssize_t dump_vdec_blocks_show(struct class *class,
4649 struct class_attribute *attr, char *buf)
4650{
4651 struct vdec_core_s *core = vdec_core;
4652 char *pbuf = buf;
4653 unsigned long flags = vdec_core_lock(vdec_core);
4654
4655 if (list_empty(&core->connected_vdec_list))
4656 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4657 else {
4658 struct vdec_s *vdec;
4659 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4660 pbuf += vdec_input_dump_blocks(&vdec->input,
4661 pbuf, PAGE_SIZE - (pbuf - buf));
4662 }
4663 }
4664 vdec_core_unlock(vdec_core, flags);
4665
4666 return pbuf - buf;
4667}
4668static ssize_t dump_vdec_chunks_show(struct class *class,
4669 struct class_attribute *attr, char *buf)
4670{
4671 struct vdec_core_s *core = vdec_core;
4672 char *pbuf = buf;
4673 unsigned long flags = vdec_core_lock(vdec_core);
4674
4675 if (list_empty(&core->connected_vdec_list))
4676 pbuf += sprintf(pbuf, "connected vdec list empty\n");
4677 else {
4678 struct vdec_s *vdec;
4679 list_for_each_entry(vdec, &core->connected_vdec_list, list) {
4680 pbuf += vdec_input_dump_chunks(vdec->id, &vdec->input,
4681 pbuf, PAGE_SIZE - (pbuf - buf));
4682 }
4683 }
4684 vdec_core_unlock(vdec_core, flags);
4685
4686 return pbuf - buf;
4687}
4688
4689static ssize_t dump_decoder_state_show(struct class *class,
4690 struct class_attribute *attr, char *buf)
4691{
4692 char *pbuf = buf;
4693 struct vdec_s *vdec;
4694 struct vdec_core_s *core = vdec_core;
4695 unsigned long flags = vdec_core_lock(vdec_core);
4696
4697 if (list_empty(&core->connected_vdec_list)) {
4698 pbuf += sprintf(pbuf, "No vdec.\n");
4699 } else {
4700 list_for_each_entry(vdec,
4701 &core->connected_vdec_list, list) {
4702 if ((vdec->status == VDEC_STATUS_CONNECTED
4703 || vdec->status == VDEC_STATUS_ACTIVE)
4704 && vdec->dump_state)
4705 vdec->dump_state(vdec);
4706 }
4707 }
4708 vdec_core_unlock(vdec_core, flags);
4709
4710 return pbuf - buf;
4711}
4712
4713static ssize_t dump_fps_show(struct class *class,
4714 struct class_attribute *attr, char *buf)
4715{
4716 char *pbuf = buf;
4717 struct vdec_core_s *core = vdec_core;
4718 int i;
4719
4720 unsigned long flags = vdec_fps_lock(vdec_core);
4721 for (i = 0; i < MAX_INSTANCE_MUN; i++)
4722 pbuf += sprintf(pbuf, "%d ", core->decode_fps[i].fps);
4723
4724 pbuf += sprintf(pbuf, "\n");
4725 vdec_fps_unlock(vdec_core, flags);
4726
4727 return pbuf - buf;
4728}
4729
4730
4731
4732static struct class_attribute vdec_class_attrs[] = {
4733 __ATTR_RO(amrisc_regs),
4734 __ATTR_RO(dump_trace),
4735 __ATTR_RO(clock_level),
4736 __ATTR(poweron_clock_level, S_IRUGO | S_IWUSR | S_IWGRP,
4737 show_poweron_clock_level, store_poweron_clock_level),
4738 __ATTR(dump_risc_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4739 dump_risc_mem_show, dump_risc_mem_store),
4740 __ATTR(keep_vdec_mem, S_IRUGO | S_IWUSR | S_IWGRP,
4741 show_keep_vdec_mem, store_keep_vdec_mem),
4742 __ATTR_RO(core),
4743 __ATTR_RO(vdec_status),
4744 __ATTR_RO(dump_vdec_blocks),
4745 __ATTR_RO(dump_vdec_chunks),
4746 __ATTR_RO(dump_decoder_state),
4747#ifdef VDEC_DEBUG_SUPPORT
4748 __ATTR(debug, S_IRUGO | S_IWUSR | S_IWGRP,
4749 show_debug, store_debug),
4750#endif
4751#ifdef FRAME_CHECK
4752 __ATTR(dump_yuv, S_IRUGO | S_IWUSR | S_IWGRP,
4753 dump_yuv_show, dump_yuv_store),
4754 __ATTR(frame_check, S_IRUGO | S_IWUSR | S_IWGRP,
4755 frame_check_show, frame_check_store),
4756#endif
4757 __ATTR_RO(dump_fps),
4758 __ATTR_NULL
4759};
4760
4761static struct class vdec_class = {
4762 .name = "vdec",
4763 .class_attrs = vdec_class_attrs,
4764 };
4765
4766struct device *get_vdec_device(void)
4767{
4768 return &vdec_core->vdec_core_platform_device->dev;
4769}
4770EXPORT_SYMBOL(get_vdec_device);
4771
4772static int vdec_probe(struct platform_device *pdev)
4773{
4774 s32 i, r;
4775
4776 vdec_core = (struct vdec_core_s *)devm_kzalloc(&pdev->dev,
4777 sizeof(struct vdec_core_s), GFP_KERNEL);
4778 if (vdec_core == NULL) {
4779 pr_err("vdec core allocation failed.\n");
4780 return -ENOMEM;
4781 }
4782
4783 atomic_set(&vdec_core->vdec_nr, 0);
4784 sema_init(&vdec_core->sem, 1);
4785
4786 r = class_register(&vdec_class);
4787 if (r) {
4788 pr_info("vdec class create fail.\n");
4789 return r;
4790 }
4791
4792 vdec_core->vdec_core_platform_device = pdev;
4793
4794 platform_set_drvdata(pdev, vdec_core);
4795
4796 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4797 vdec_core->isr_context[i].index = i;
4798 vdec_core->isr_context[i].irq = -1;
4799 }
4800
4801 r = vdec_request_threaded_irq(VDEC_IRQ_0, NULL, NULL,
4802 IRQF_ONESHOT, "vdec-0", NULL);
4803 if (r < 0) {
4804 pr_err("vdec interrupt request failed\n");
4805 return r;
4806 }
4807
4808 r = vdec_request_threaded_irq(VDEC_IRQ_1, NULL, NULL,
4809 IRQF_ONESHOT, "vdec-1", NULL);
4810 if (r < 0) {
4811 pr_err("vdec interrupt request failed\n");
4812 return r;
4813 }
4814#if 0
4815 if (get_cpu_major_id() >= MESON_CPU_MAJOR_ID_G12A) {
4816 r = vdec_request_threaded_irq(VDEC_IRQ_HEVC_BACK, NULL, NULL,
4817 IRQF_ONESHOT, "vdec-hevc_back", NULL);
4818 if (r < 0) {
4819 pr_err("vdec interrupt request failed\n");
4820 return r;
4821 }
4822 }
4823#endif
4824 r = of_reserved_mem_device_init(&pdev->dev);
4825 if (r == 0)
4826 pr_info("vdec_probe done\n");
4827
4828 vdec_core->cma_dev = &pdev->dev;
4829
4830 if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_M8) {
4831 /* default to 250MHz */
4832 vdec_clock_hi_enable();
4833 }
4834
4835 if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_GXBB) {
4836 /* set vdec dmc request to urgent */
4837 WRITE_DMCREG(DMC_AM5_CHAN_CTRL, 0x3f203cf);
4838 }
4839 INIT_LIST_HEAD(&vdec_core->connected_vdec_list);
4840 spin_lock_init(&vdec_core->lock);
4841 spin_lock_init(&vdec_core->canvas_lock);
4842 spin_lock_init(&vdec_core->fps_lock);
4843 spin_lock_init(&vdec_core->input_lock);
4844 ida_init(&vdec_core->ida);
4845 vdec_core->thread = kthread_run(vdec_core_thread, vdec_core,
4846 "vdec-core");
4847
4848 vdec_core->vdec_core_wq = alloc_ordered_workqueue("%s",__WQ_LEGACY |
4849 WQ_MEM_RECLAIM |WQ_HIGHPRI/*high priority*/, "vdec-work");
4850 /*work queue priority lower than vdec-core.*/
4851 return 0;
4852}
4853
4854static int vdec_remove(struct platform_device *pdev)
4855{
4856 int i;
4857
4858 for (i = 0; i < VDEC_IRQ_MAX; i++) {
4859 if (vdec_core->isr_context[i].irq >= 0) {
4860 free_irq(vdec_core->isr_context[i].irq,
4861 &vdec_core->isr_context[i]);
4862 vdec_core->isr_context[i].irq = -1;
4863 vdec_core->isr_context[i].dev_isr = NULL;
4864 vdec_core->isr_context[i].dev_threaded_isr = NULL;
4865 vdec_core->isr_context[i].dev_id = NULL;
4866 }
4867 }
4868
4869 kthread_stop(vdec_core->thread);
4870
4871 destroy_workqueue(vdec_core->vdec_core_wq);
4872 class_unregister(&vdec_class);
4873
4874 return 0;
4875}
4876
4877static const struct of_device_id amlogic_vdec_dt_match[] = {
4878 {
4879 .compatible = "amlogic, vdec",
4880 },
4881 {},
4882};
4883
4884static struct mconfig vdec_configs[] = {
4885 MC_PU32("debug_trace_num", &debug_trace_num),
4886 MC_PI32("hevc_max_reset_count", &hevc_max_reset_count),
4887 MC_PU32("clk_config", &clk_config),
4888 MC_PI32("step_mode", &step_mode),
4889 MC_PI32("poweron_clock_level", &poweron_clock_level),
4890};
4891static struct mconfig_node vdec_node;
4892
4893static struct platform_driver vdec_driver = {
4894 .probe = vdec_probe,
4895 .remove = vdec_remove,
4896 .driver = {
4897 .name = "vdec",
4898 .of_match_table = amlogic_vdec_dt_match,
4899 }
4900};
4901
4902static struct codec_profile_t amvdec_input_profile = {
4903 .name = "vdec_input",
4904 .profile = "drm_framemode"
4905};
4906
4907int vdec_module_init(void)
4908{
4909 if (platform_driver_register(&vdec_driver)) {
4910 pr_info("failed to register vdec module\n");
4911 return -ENODEV;
4912 }
4913 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4914 "vdec", vdec_configs, CONFIG_FOR_RW);
4915 vcodec_profile_register(&amvdec_input_profile);
4916 return 0;
4917}
4918EXPORT_SYMBOL(vdec_module_init);
4919
4920void vdec_module_exit(void)
4921{
4922 platform_driver_unregister(&vdec_driver);
4923}
4924EXPORT_SYMBOL(vdec_module_exit);
4925
4926#if 0
4927static int __init vdec_module_init(void)
4928{
4929 if (platform_driver_register(&vdec_driver)) {
4930 pr_info("failed to register vdec module\n");
4931 return -ENODEV;
4932 }
4933 INIT_REG_NODE_CONFIGS("media.decoder", &vdec_node,
4934 "vdec", vdec_configs, CONFIG_FOR_RW);
4935 return 0;
4936}
4937
4938static void __exit vdec_module_exit(void)
4939{
4940 platform_driver_unregister(&vdec_driver);
4941}
4942#endif
4943
4944static int vdec_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4945{
4946 vdec_core->cma_dev = dev;
4947
4948 return 0;
4949}
4950
4951static const struct reserved_mem_ops rmem_vdec_ops = {
4952 .device_init = vdec_mem_device_init,
4953};
4954
4955static int __init vdec_mem_setup(struct reserved_mem *rmem)
4956{
4957 rmem->ops = &rmem_vdec_ops;
4958 pr_info("vdec: reserved mem setup\n");
4959
4960 return 0;
4961}
4962
4963void vdec_fill_frame_info(struct vframe_qos_s *vframe_qos, int debug)
4964{
4965 if (frame_info_buf_in == NULL) {
4966 pr_info("error,frame_info_buf_in is null\n");
4967 return;
4968 }
4969 if (frame_info_buf_out == NULL) {
4970 pr_info("error,frame_info_buf_out is null\n");
4971 return;
4972 }
4973 if (frame_qos_wr >= QOS_FRAME_NUM)
4974 frame_qos_wr = 0;
4975
4976 if (frame_qos_wr >= QOS_FRAME_NUM ||
4977 frame_qos_wr < 0) {
4978 pr_info("error,index :%d is error\n", frame_qos_wr);
4979 return;
4980 }
4981 if (frameinfo_flag == DISABLE_FRAME_INFO)
4982 return;
4983
4984 if (frameinfo_flag == PRINT_FRAME_INFO) {
4985 pr_info("num %d size %d pts %d\n",
4986 vframe_qos->num,
4987 vframe_qos->size,
4988 vframe_qos->pts);
4989 pr_info("mv min_mv %d avg_mv %d max_mv %d\n",
4990 vframe_qos->min_mv,
4991 vframe_qos->avg_mv,
4992 vframe_qos->max_mv);
4993 pr_info("qp min_qp %d avg_qp %d max_qp %d\n",
4994 vframe_qos->min_qp,
4995 vframe_qos->avg_qp,
4996 vframe_qos->max_qp);
4997 pr_info("skip min_skip %d avg_skip %d max_skip %d\n",
4998 vframe_qos->min_skip,
4999 vframe_qos->avg_skip,
5000 vframe_qos->max_skip);
5001 }
5002 memcpy(&frame_info_buf_in[frame_qos_wr++],
5003 vframe_qos, sizeof(struct vframe_qos_s));
5004 if (frame_qos_wr >= QOS_FRAME_NUM)
5005 frame_qos_wr = 0;
5006
5007 /*pr_info("frame_qos_wr:%d\n", frame_qos_wr);*/
5008
5009}
5010EXPORT_SYMBOL(vdec_fill_frame_info);
5011
5012struct vframe_qos_s *vdec_get_qos_info(void)
5013{
5014 int write_count = 0;
5015 int qos_wr = frame_qos_wr;
5016
5017 if (frame_info_buf_in == NULL) {
5018 pr_info("error,frame_info_buf_in is null\n");
5019 return NULL;
5020 }
5021 if (frame_info_buf_out == NULL) {
5022 pr_info("error,frame_info_buf_out is null\n");
5023 return NULL;
5024 }
5025
5026
5027 memset(frame_info_buf_out, 0,
5028 QOS_FRAME_NUM*sizeof(struct vframe_qos_s));
5029 if (frame_qos_rd > qos_wr) {
5030 write_count = QOS_FRAME_NUM - frame_qos_rd;
5031 if (write_count > 0 && write_count <= QOS_FRAME_NUM) {
5032 memcpy(frame_info_buf_out, &frame_info_buf_in[0],
5033 write_count*sizeof(struct vframe_qos_s));
5034 if ((write_count + qos_wr) <= QOS_FRAME_NUM)
5035 memcpy(&frame_info_buf_out[write_count], frame_info_buf_in,
5036 qos_wr*sizeof(struct vframe_qos_s));
5037 else
5038 pr_info("get_qos_info:%d,out of range\n", __LINE__);
5039 } else
5040 pr_info("get_qos_info:%d,out of range\n", __LINE__);
5041 } else if (frame_qos_rd < qos_wr) {
5042 write_count = qos_wr - frame_qos_rd;
5043 if (write_count > 0 && write_count < QOS_FRAME_NUM)
5044 memcpy(frame_info_buf_out, &frame_info_buf_in[frame_qos_rd],
5045 (write_count)*sizeof(struct vframe_qos_s));
5046 else
5047 pr_info("get_qos_info:%d, out of range\n", __LINE__);
5048 }
5049 /*
5050 pr_info("cnt:%d,size:%d,num:%d,rd:%d,wr:%d\n",
5051 wirte_count,
5052 frame_info_buf_out[0].size,
5053 frame_info_buf_out[0].num,
5054 frame_qos_rd,qos_wr);
5055 */
5056 frame_qos_rd = qos_wr;
5057 return frame_info_buf_out;
5058}
5059EXPORT_SYMBOL(vdec_get_qos_info);
5060
5061
5062RESERVEDMEM_OF_DECLARE(vdec, "amlogic, vdec-memory", vdec_mem_setup);
5063/*
5064uint force_hevc_clock_cntl;
5065EXPORT_SYMBOL(force_hevc_clock_cntl);
5066
5067module_param(force_hevc_clock_cntl, uint, 0664);
5068*/
5069module_param(debug, uint, 0664);
5070module_param(debug_trace_num, uint, 0664);
5071module_param(hevc_max_reset_count, int, 0664);
5072module_param(clk_config, uint, 0664);
5073module_param(step_mode, int, 0664);
5074module_param(debugflags, int, 0664);
5075module_param(parallel_decode, int, 0664);
5076module_param(fps_detection, int, 0664);
5077module_param(fps_clear, int, 0664);
5078module_param(force_nosecure_even_drm, int, 0664);
5079module_param(disable_switch_single_to_mult, int, 0664);
5080
5081module_param(frameinfo_flag, int, 0664);
5082MODULE_PARM_DESC(frameinfo_flag,
5083 "\n frameinfo_flag\n");
5084/*
5085*module_init(vdec_module_init);
5086*module_exit(vdec_module_exit);
5087*/
5088#define CREATE_TRACE_POINTS
5089#include "vdec_trace.h"
5090MODULE_DESCRIPTION("AMLOGIC vdec driver");
5091MODULE_LICENSE("GPL");
5092MODULE_AUTHOR("Tim Yao <timyao@amlogic.com>");
5093