summaryrefslogtreecommitdiff
path: root/drivers/frame_provider/decoder/utils/vdec_input.c (plain)
blob: 74bbaa7814658b88bffa30028dfb6a96e63b519a
1/*
2 * drivers/amlogic/media/frame_provider/decoder/utils/vdec_input.c
3 *
4 * Copyright (C) 2016 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 */
17
18#include <linux/uaccess.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/dma-mapping.h>
22#include <linux/amlogic/media/codec_mm/codec_mm.h>
23
24#include "../../../stream_input/amports/amports_priv.h"
25#include "vdec.h"
26#include "vdec_input.h"
27
28#include <asm/cacheflush.h>
29#include <linux/crc32.h>
30
31
32#define VFRAME_BLOCK_SIZE (512 * SZ_1K)/*512 for 1080p default init.*/
33#define VFRAME_BLOCK_SIZE_4K (2 * SZ_1M) /*2M for 4K default.*/
34#define VFRAME_BLOCK_SIZE_MAX (4 * SZ_1M)
35
36#define VFRAME_BLOCK_PAGEALIGN 4
37#define VFRAME_BLOCK_MIN_LEVEL (2 * SZ_1M)
38#define VFRAME_BLOCK_MAX_LEVEL (8 * SZ_1M)
39#define VFRAME_BLOCK_MAX_TOTAL_SIZE (16 * SZ_1M)
40
41/*
422s for OMX
43*/
44#define MAX_FRAME_DURATION_S 2
45
46
47#define VFRAME_BLOCK_HOLE (SZ_64K)
48
49#define MIN_FRAME_PADDING_SIZE ((u32)(L1_CACHE_BYTES))
50
51#define EXTRA_PADDING_SIZE (16 * SZ_1K) /*HEVC_PADDING_SIZE*/
52
53#define MEM_NAME "VFRAME_INPUT"
54
55//static int vdec_input_get_duration_u64(struct vdec_input_s *input);
56static struct vframe_block_list_s *
57 vdec_input_alloc_new_block(struct vdec_input_s *input,
58 ulong phy_addr,
59 int size);
60
61static int aml_copy_from_user(void *to, const void *from, ulong n)
62{
63 int ret =0;
64
65 if (likely(access_ok(VERIFY_READ, from, n)))
66 ret = copy_from_user(to, from, n);
67 else
68 memcpy(to, from, n);
69
70 return ret;
71}
72
73static int copy_from_user_to_phyaddr(void *virts, const char __user *buf,
74 u32 size, ulong phys, u32 pading, bool is_mapped)
75{
76 u32 i, span = SZ_1M;
77 u32 count = size / PAGE_ALIGN(span);
78 u32 remain = size % PAGE_ALIGN(span);
79 ulong addr = phys;
80 u8 *p = virts;
81
82 if (is_mapped) {
83 if (aml_copy_from_user(p, buf, size))
84 return -EFAULT;
85
86 if (pading)
87 memset(p + size, 0, pading);
88
89 codec_mm_dma_flush(p, size + pading, DMA_TO_DEVICE);
90
91 return 0;
92 }
93
94 for (i = 0; i < count; i++) {
95 addr = phys + i * span;
96 p = codec_mm_vmap(addr, span);
97 if (!p)
98 return -1;
99
100 if (aml_copy_from_user(p, buf + i * span, span)) {
101 codec_mm_unmap_phyaddr(p);
102 return -EFAULT;
103 }
104
105 codec_mm_dma_flush(p, span, DMA_TO_DEVICE);
106 codec_mm_unmap_phyaddr(p);
107 }
108
109 if (!remain)
110 return 0;
111
112 span = size - remain;
113 addr = phys + span;
114 p = codec_mm_vmap(addr, remain + pading);
115 if (!p)
116 return -1;
117
118 if (aml_copy_from_user(p, buf + span, remain)) {
119 codec_mm_unmap_phyaddr(p);
120 return -EFAULT;
121 }
122
123 if (pading)
124 memset(p + remain, 0, pading);
125
126 codec_mm_dma_flush(p, remain + pading, DMA_TO_DEVICE);
127 codec_mm_unmap_phyaddr(p);
128
129 return 0;
130}
131
132static int vframe_chunk_fill(struct vdec_input_s *input,
133 struct vframe_chunk_s *chunk, const char *buf,
134 size_t count, struct vframe_block_list_s *block)
135{
136 u8 *p = (u8 *)block->start_virt + block->wp;
137 if (block->type == VDEC_TYPE_FRAME_BLOCK) {
138 copy_from_user_to_phyaddr(p, buf, count,
139 block->start + block->wp,
140 chunk->pading_size,
141 block->is_mapped);
142 } else if (block->type == VDEC_TYPE_FRAME_CIRCULAR) {
143 size_t len = min((size_t)(block->size - block->wp), count);
144 u32 wp;
145
146 copy_from_user_to_phyaddr(p, buf, len,
147 block->start + block->wp, 0,
148 block->is_mapped);
149 p += len;
150
151 if (count > len) {
152 copy_from_user_to_phyaddr(p, buf + len,
153 count - len,
154 block->start, 0,
155 block->is_mapped);
156
157 p += count - len;
158 }
159
160 wp = block->wp + count;
161 if (wp >= block->size)
162 wp -= block->size;
163
164 len = min(block->size - wp, chunk->pading_size);
165
166 if (!block->is_mapped) {
167 p = codec_mm_vmap(block->start + wp, len);
168 memset(p, 0, len);
169 codec_mm_dma_flush(p, len, DMA_TO_DEVICE);
170 codec_mm_unmap_phyaddr(p);
171 } else {
172 memset(p, 0, len);
173 codec_mm_dma_flush(p, len, DMA_TO_DEVICE);
174 }
175
176 if (chunk->pading_size > len) {
177 p = (u8 *)block->start_virt;
178
179 if (!block->is_mapped) {
180 p = codec_mm_vmap(block->start,
181 chunk->pading_size - len);
182 memset(p, 0, chunk->pading_size - len);
183 codec_mm_dma_flush(p,
184 chunk->pading_size - len,
185 DMA_TO_DEVICE);
186 codec_mm_unmap_phyaddr(p);
187 } else {
188 memset(p, 0, chunk->pading_size - len);
189 codec_mm_dma_flush(p,
190 chunk->pading_size - len,
191 DMA_TO_DEVICE);
192 }
193 }
194 }
195
196 return 0;
197}
198
199static inline u32 vframe_block_space(struct vframe_block_list_s *block)
200{
201 if (block->type == VDEC_TYPE_FRAME_BLOCK) {
202 return block->size - block->wp;
203 } else {
204 return (block->rp >= block->wp) ?
205 (block->rp - block->wp) :
206 (block->rp - block->wp + block->size);
207 }
208}
209
210static void vframe_block_add_chunk(struct vframe_block_list_s *block,
211 struct vframe_chunk_s *chunk)
212{
213 block->wp += chunk->size + chunk->pading_size;
214 if (block->wp >= block->size)
215 block->wp -= block->size;
216 block->data_size += chunk->size;
217 block->chunk_count++;
218 chunk->block = block;
219 block->input->wr_block = block;
220 chunk->sequence = block->input->sequence;
221 block->input->sequence++;
222}
223
224static void vframe_block_free_block(struct vframe_block_list_s *block)
225{
226 if (block->addr) {
227 codec_mm_free_for_dma(MEM_NAME, block->addr);
228 }
229 /*
230 *pr_err("free block %d, size=%d\n", block->id, block->size);
231 */
232 kfree(block);
233}
234
235static int vframe_block_init_alloc_storage(struct vdec_input_s *input,
236 struct vframe_block_list_s *block,
237 ulong phy_addr,
238 int size)
239{
240 int alloc_size = input->default_block_size;
241 block->magic = 0x4b434c42;
242 block->input = input;
243 block->type = input->type;
244
245 /*
246 * todo: for different type use different size
247 */
248 if (phy_addr) {
249 block->is_out_buf = 1;
250 block->start_virt = NULL;
251 block->start = phy_addr;
252 block->size = size;
253 } else {
254 alloc_size = PAGE_ALIGN(alloc_size);
255 block->addr = codec_mm_alloc_for_dma_ex(
256 MEM_NAME,
257 alloc_size/PAGE_SIZE,
258 VFRAME_BLOCK_PAGEALIGN,
259 CODEC_MM_FLAGS_DMA_CPU | CODEC_MM_FLAGS_FOR_VDECODER,
260 input->id,
261 block->id);
262
263 if (!block->addr) {
264 pr_err("Input block allocation failed\n");
265 return -ENOMEM;
266 }
267
268 block->start_virt = (void *)codec_mm_phys_to_virt(block->addr);
269 if (block->start_virt)
270 block->is_mapped = true;
271 block->start = block->addr;
272 block->size = alloc_size;
273 block->is_out_buf = 0;
274 }
275
276 return 0;
277}
278
279void vdec_input_init(struct vdec_input_s *input, struct vdec_s *vdec)
280{
281 INIT_LIST_HEAD(&input->vframe_block_list);
282 INIT_LIST_HEAD(&input->vframe_block_free_list);
283 INIT_LIST_HEAD(&input->vframe_chunk_list);
284 spin_lock_init(&input->lock);
285 input->id = vdec->id;
286 input->block_nums = 0;
287 input->vdec = vdec;
288 input->block_id_seq = 0;
289 input->size = 0;
290 input->default_block_size = VFRAME_BLOCK_SIZE;
291}
292int vdec_input_prepare_bufs(struct vdec_input_s *input,
293 int frame_width, int frame_height)
294{
295 struct vframe_block_list_s *block;
296 int i;
297 unsigned long flags;
298
299 if (vdec_secure(input->vdec))
300 return 0;
301 if (input->size > 0)
302 return 0;
303 if (frame_width * frame_height >= 1920 * 1088) {
304 /*have add data before. ignore prepare buffers.*/
305 input->default_block_size = VFRAME_BLOCK_SIZE_4K;
306 }
307 /*prepared 3 buffers for smooth start.*/
308 for (i = 0; i < 3; i++) {
309 block = vdec_input_alloc_new_block(input, 0, 0);
310 if (!block)
311 break;
312 flags = vdec_input_lock(input);
313 list_move_tail(&block->list,
314 &input->vframe_block_free_list);
315 input->wr_block = NULL;
316 vdec_input_unlock(input, flags);
317 }
318 return 0;
319}
320
321static int vdec_input_dump_block_locked(
322 struct vframe_block_list_s *block,
323 char *buf, int size)
324{
325 char *pbuf = buf;
326 char sbuf[512];
327 int tsize = 0;
328 int s;
329 if (!pbuf) {
330 pbuf = sbuf;
331 size = 512;
332 }
333 #define BUFPRINT(args...) \
334 do {\
335 s = snprintf(pbuf, size - tsize, args);\
336 tsize += s;\
337 pbuf += s; \
338 } while (0)
339
340 BUFPRINT("\tblock:[%d:%p]-addr=%p,vstart=%p,type=%d\n",
341 block->id,
342 block,
343 (void *)block->addr,
344 (void *)block->start_virt,
345 block->type);
346 BUFPRINT("\t-blocksize=%d,data=%d,wp=%d,rp=%d,chunk_count=%d\n",
347 block->size,
348 block->data_size,
349 block->wp,
350 block->rp,
351 block->chunk_count);
352 /*
353 BUFPRINT("\tlist=%p,next=%p,prev=%p\n",
354 &block->list,
355 block->list.next,
356 block->list.prev);
357 */
358 #undef BUFPRINT
359 if (!buf)
360 pr_info("%s", sbuf);
361 return tsize;
362}
363
364int vdec_input_dump_blocks(struct vdec_input_s *input,
365 char *bufs, int size)
366{
367 struct list_head *p, *tmp;
368 unsigned long flags;
369 char *lbuf = bufs;
370 char sbuf[256];
371 int s = 0;
372
373 if (size <= 0)
374 return 0;
375 if (!bufs)
376 lbuf = sbuf;
377 s += snprintf(lbuf + s, size - s,
378 "blocks:vdec-%d id:%d,bufsize=%d,dsize=%d,frames:%d,dur:%dms\n",
379 input->id,
380 input->block_nums,
381 input->size,
382 input->data_size,
383 input->have_frame_num,
384 vdec_input_get_duration_u64(input)/1000);
385 if (bufs)
386 lbuf += s;
387 else {
388 pr_info("%s", sbuf);
389 lbuf = NULL;
390 }
391
392 flags = vdec_input_lock(input);
393 /* dump input blocks */
394 list_for_each_safe(p, tmp, &input->vframe_block_list) {
395 struct vframe_block_list_s *block = list_entry(
396 p, struct vframe_block_list_s, list);
397 if (bufs != NULL) {
398 lbuf = bufs + s;
399 if (size - s < 128)
400 break;
401 }
402 s += vdec_input_dump_block_locked(block, lbuf, size - s);
403 }
404 list_for_each_safe(p, tmp, &input->vframe_block_free_list) {
405 struct vframe_block_list_s *block = list_entry(
406 p, struct vframe_block_list_s, list);
407 if (bufs != NULL) {
408 lbuf = bufs + s;
409 if (size - s < 128)
410 break;
411 }
412 s += vdec_input_dump_block_locked(block, lbuf, size - s);
413 }
414 vdec_input_unlock(input, flags);
415 return s;
416}
417
418static int vdec_input_dump_chunk_locked(
419 int id,
420 struct vframe_chunk_s *chunk,
421 char *buf, int size)
422{
423 char *pbuf = buf;
424 char sbuf[512];
425 int tsize = 0;
426 int s;
427 if (!pbuf) {
428 pbuf = sbuf;
429 size = 512;
430 }
431 #define BUFPRINT(args...) \
432 do {\
433 s = snprintf(pbuf, size - tsize, args);\
434 tsize += s;\
435 pbuf += s; \
436 } while (0)
437
438 BUFPRINT(
439 "\t[%d][%lld:%p]-off=%d,size:%d,p:%d,\tpts64=%lld,addr=%p\n",
440 id,
441 chunk->sequence,
442 chunk->block,
443 chunk->offset,
444 chunk->size,
445 chunk->pading_size,
446 chunk->pts64,
447 (void *)(chunk->block->addr + chunk->offset));
448 /*
449 BUFPRINT("\tlist=%p,next=%p,prev=%p\n",
450 &chunk->list,
451 chunk->list.next,
452 chunk->list.prev);
453 */
454 #undef BUFPRINT
455 if (!buf)
456 pr_info("%s", sbuf);
457 return tsize;
458}
459
460int vdec_input_dump_chunks(int id, struct vdec_input_s *input,
461 char *bufs, int size)
462{
463
464 struct list_head *p, *tmp;
465 unsigned long flags;
466 char *lbuf = bufs;
467 char sbuf[256];
468 int s = 0;
469 int i = 0;
470
471 if (size <= 0)
472 return 0;
473 if (!bufs)
474 lbuf = sbuf;
475 s = snprintf(lbuf + s, size - s,
476 "[%d]blocks:vdec-%d id:%d,bufsize=%d,dsize=%d,frames:%d,maxframe:%d\n",
477 id,
478 input->id,
479 input->block_nums,
480 input->size,
481 input->data_size,
482 input->have_frame_num,
483 input->frame_max_size);
484 if (bufs)
485 lbuf += s;
486 if (!bufs) {
487 pr_info("%s", sbuf);
488 lbuf = NULL;
489 }
490 flags = vdec_input_lock(input);
491 /*dump chunks list infos.*/
492 list_for_each_safe(p, tmp, &input->vframe_chunk_list) {
493 struct vframe_chunk_s *chunk = list_entry(
494 p, struct vframe_chunk_s, list);
495 if (bufs != NULL)
496 lbuf = bufs + s;
497 s += vdec_input_dump_chunk_locked(id, chunk, lbuf, size - s);
498 i++;
499 if (i >= 10)
500 break;
501 }
502 vdec_input_unlock(input, flags);
503 return s;
504}
505
506
507
508int vdec_input_set_buffer(struct vdec_input_s *input, u32 start, u32 size)
509{
510 if (input_frame_based(input))
511 return -EINVAL;
512
513 input->start = start;
514 input->size = size;
515 input->swap_rp = start;
516
517 if (vdec_secure(input->vdec))
518 input->swap_page_phys = codec_mm_alloc_for_dma("SWAP",
519 1, 0, CODEC_MM_FLAGS_TVP);
520 else {
521 input->swap_page = alloc_page(GFP_KERNEL);
522 if (input->swap_page) {
523 input->swap_page_phys =
524 page_to_phys(input->swap_page);
525 }
526 }
527
528 if (input->swap_page_phys == 0)
529 return -ENOMEM;
530
531 return 0;
532}
533EXPORT_SYMBOL(vdec_input_set_buffer);
534
535void vdec_input_set_type(struct vdec_input_s *input, int type, int target)
536{
537 input->type = type;
538 input->target = target;
539 if (type == VDEC_TYPE_FRAME_CIRCULAR) {
540 /*alway used max block.*/
541 input->default_block_size = VFRAME_BLOCK_SIZE_MAX;
542 }
543}
544EXPORT_SYMBOL(vdec_input_set_type);
545
546int vdec_input_get_status(struct vdec_input_s *input,
547 struct vdec_input_status_s *status)
548{
549 unsigned long flags;
550
551 if (input->vdec == NULL)
552 return -EINVAL;
553
554 flags = vdec_input_lock(input);
555
556 if (list_empty(&input->vframe_block_list)) {
557 status->size = VFRAME_BLOCK_SIZE;
558 status->data_len = 0;
559 status->free_len = VFRAME_BLOCK_SIZE;
560 status->read_pointer = 0;
561 } else {
562 int r = VFRAME_BLOCK_MAX_LEVEL - vdec_input_level(input)
563 - VFRAME_BLOCK_HOLE;
564 status->size = input->size;
565 status->data_len = vdec_input_level(input);
566 status->free_len = (r > 0) ? r : 0;
567 status->read_pointer = input->total_rd_count;
568 }
569
570 vdec_input_unlock(input, flags);
571
572 return 0;
573}
574EXPORT_SYMBOL(vdec_input_get_status);
575
576static void vdec_input_add_block(struct vdec_input_s *input,
577 struct vframe_block_list_s *block)
578{
579 unsigned long flags;
580
581 flags = vdec_input_lock(input);
582 block->wp = 0;
583 block->id = input->block_id_seq++;
584 list_add_tail(&block->list, &input->vframe_block_list);
585 input->size += block->size;
586 input->block_nums++;
587 input->wr_block = block;
588 vdec_input_unlock(input, flags);
589}
590
591static inline void vdec_input_del_block_locked(struct vdec_input_s *input,
592 struct vframe_block_list_s *block)
593{
594 list_del(&block->list);
595 input->size -= block->size;
596 input->block_nums--;
597}
598
599int vdec_input_level(struct vdec_input_s *input)
600{
601 return input->total_wr_count - input->total_rd_count;
602}
603EXPORT_SYMBOL(vdec_input_level);
604
605static struct vframe_block_list_s *
606 vdec_input_alloc_new_block(struct vdec_input_s *input,
607 ulong phy_addr,
608 int size)
609{
610 struct vframe_block_list_s *block;
611 block = kzalloc(sizeof(struct vframe_block_list_s),
612 GFP_KERNEL);
613 if (block == NULL) {
614 input->no_mem_err_cnt++;
615 pr_err("vframe_block structure allocation failed\n");
616 return NULL;
617 }
618
619 if (vframe_block_init_alloc_storage(input,
620 block, phy_addr, size) != 0) {
621 kfree(block);
622 pr_err("vframe_block storage allocation failed\n");
623 return NULL;
624 }
625
626 INIT_LIST_HEAD(&block->list);
627
628 vdec_input_add_block(input, block);
629
630 /*
631 *pr_info("vdec-%d:new block id=%d, total_blocks:%d, size=%d\n",
632 * input->id,
633 * block->id,
634 * input->block_nums,
635 * block->size);
636 */
637 if (0 && input->size > VFRAME_BLOCK_MAX_LEVEL * 2) {
638 /*
639 used
640 */
641 pr_info(
642 "input[%d] reach max: size:%d, blocks:%d",
643 input->id,
644 input->size,
645 input->block_nums);
646 pr_info("level:%d, wr:%lld,rd:%lld\n",
647 vdec_input_level(input),
648 input->total_wr_count,
649 input->total_rd_count);
650 vdec_input_dump_blocks(input, NULL, 0);
651 }
652 return block;
653}
654int vdec_input_get_duration_u64(struct vdec_input_s *input)
655{
656 int duration = (input->last_inpts_u64 - input->last_comsumed_pts_u64);
657 if (input->last_in_nopts_cnt > 0 &&
658 input->last_comsumed_pts_u64 > 0 &&
659 input->last_duration > 0) {
660 duration += (input->last_in_nopts_cnt -
661 input->last_comsumed_no_pts_cnt) *
662 input->last_duration;
663 }
664 if (duration > 1000 * 1000000)/*> 1000S,I think jumped.*/
665 duration = 0;
666 if (duration <= 0 && input->last_duration > 0) {
667 /*..*/
668 duration = input->last_duration * input->have_frame_num;
669 }
670 if (duration < 0)
671 duration = 0;
672 return duration;
673}
674EXPORT_SYMBOL(vdec_input_get_duration_u64);
675
676/*
677 ret >= 13: have enough buffer, blocked add more buffers
678*/
679static int vdec_input_have_blocks_enough(struct vdec_input_s *input)
680{
681 int ret = 0;
682 if (vdec_input_level(input) > VFRAME_BLOCK_MIN_LEVEL)
683 ret += 1;
684 if (vdec_input_level(input) >= VFRAME_BLOCK_MAX_LEVEL)
685 ret += 2;
686 if (vdec_input_get_duration_u64(input) > MAX_FRAME_DURATION_S)
687 ret += 4;
688 if (input->have_frame_num > 30)
689 ret += 8;
690 else
691 ret -= 8;/*not enough frames.*/
692 if (input->size >= VFRAME_BLOCK_MAX_TOTAL_SIZE)
693 ret += 100;/*always bloced add more buffers.*/
694
695 return ret;
696}
697static int vdec_input_get_free_block(
698 struct vdec_input_s *input,
699 int size,/*frame size + pading*/
700 struct vframe_block_list_s **block_ret)
701{
702 struct vframe_block_list_s *to_freeblock = NULL;
703 struct vframe_block_list_s *block = NULL;
704 unsigned long flags;
705 flags = vdec_input_lock(input);
706 /*get from free list.*/
707 if (!list_empty(&input->vframe_block_free_list)) {
708 block = list_entry(input->vframe_block_free_list.next,
709 struct vframe_block_list_s, list);
710 if (block->size < (size)) {
711 vdec_input_del_block_locked(input, block);
712 to_freeblock = block;
713 block = NULL;
714 } else {
715 list_move_tail(&block->list,
716 &input->vframe_block_list);
717 input->wr_block = block;/*swith to new block*/
718 }
719 }
720 vdec_input_unlock(input, flags);
721 if (to_freeblock) {
722 /*free the small block.*/
723 vframe_block_free_block(to_freeblock);
724 }
725 if (block) {
726 *block_ret = block;
727 return 0;
728 }
729
730 if (vdec_input_have_blocks_enough(input) > 13) {
731 /*buf fulled */
732 return -EAGAIN;
733 }
734 if (input->no_mem_err_cnt > 3) {
735 /*alloced failed more times.
736 */
737 return -EAGAIN;
738 }
739 if (input->default_block_size <=
740 size * 2) {
741 int def_size = input->default_block_size;
742 do {
743 def_size *= 2;
744 } while ((def_size <= 2 * size) &&
745 (def_size <= VFRAME_BLOCK_SIZE_MAX));
746 if (def_size < size)
747 def_size = ALIGN(size + 64, (1 << 17));
748 /*128k aligned,same as codec_mm*/
749 input->default_block_size = def_size;
750 }
751 block = vdec_input_alloc_new_block(input, 0, 0);
752 if (!block) {
753 input->no_mem_err_cnt++;
754 return -EAGAIN;
755 }
756 input->no_mem_err_cnt = 0;
757 *block_ret = block;
758 return 0;
759}
760
761int vdec_input_add_chunk(struct vdec_input_s *input, const char *buf,
762 size_t count, u32 handle)
763{
764 unsigned long flags;
765 struct vframe_chunk_s *chunk;
766 struct vdec_s *vdec = input->vdec;
767 struct vframe_block_list_s *block;
768 int need_pading_size = MIN_FRAME_PADDING_SIZE;
769
770 if (vdec_secure(vdec)) {
771 block = vdec_input_alloc_new_block(input, (ulong)buf,
772 PAGE_ALIGN(count + HEVC_PADDING_SIZE + 1)); /*Add padding large than HEVC_PADDING_SIZE */
773 if (!block)
774 return -ENOMEM;
775 block->handle = handle;
776 } else {
777#if 0
778 if (add_count == 0) {
779 add_count++;
780 memcpy(sps, buf, 30);
781 return 30;
782 } else if (add_count == 1) {
783 add_count++;
784 memcpy(pps, buf, 8);
785 return 8;
786 }
787 add_count++;
788#endif
789
790#if 0
791 pr_info("vdec_input_add_frame add %p, count=%d\n", buf, (int)count);
792
793 if (count >= 8) {
794 pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n",
795 buf[0], buf[1], buf[2], buf[3],
796 buf[4], buf[5], buf[6], buf[7]);
797 }
798 if (count >= 16) {
799 pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n",
800 buf[8], buf[9], buf[10], buf[11],
801 buf[12], buf[13], buf[14], buf[15]);
802 }
803 if (count >= 24) {
804 pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n",
805 buf[16], buf[17], buf[18], buf[19],
806 buf[20], buf[21], buf[22], buf[23]);
807 }
808 if (count >= 32) {
809 pr_info("%02x %02x %02x %02x %02x %02x %02x %02x\n",
810 buf[24], buf[25], buf[26], buf[27],
811 buf[28], buf[29], buf[30], buf[31]);
812 }
813#endif
814 if (input_stream_based(input))
815 return -EINVAL;
816
817 if (count < PAGE_SIZE) {
818 need_pading_size = PAGE_ALIGN(count + need_pading_size) -
819 count;
820 } else {
821 /*to 64 bytes aligned;*/
822 if (count & 0x3f)
823 need_pading_size += 64 - (count & 0x3f);
824 }
825 block = input->wr_block;
826 if (block &&
827 (vframe_block_space(block) > (count + need_pading_size))) {
828 /*this block have enough buffers.
829 do nothings.
830 */
831 } else if (block && (block->type == VDEC_TYPE_FRAME_CIRCULAR)) {
832 /*in circular module.
833 only one block,.*/
834 return -EAGAIN;
835 } else if (block != NULL) {
836 /*have block but not enough space.
837 recycle the no enough blocks.*/
838 flags = vdec_input_lock(input);
839 if (input->wr_block == block &&
840 block->chunk_count == 0) {
841 block->rp = 0;
842 block->wp = 0;
843 /*block no data move to freelist*/
844 list_move_tail(&block->list,
845 &input->vframe_block_free_list);
846 input->wr_block = NULL;
847 }
848 vdec_input_unlock(input, flags);
849 block = NULL;
850 }
851 if (!block) {/*try new block.*/
852 int ret = vdec_input_get_free_block(input,
853 count + need_pading_size + EXTRA_PADDING_SIZE,
854 &block);
855 if (ret < 0)/*no enough block now.*/
856 return ret;
857 }
858 }
859
860 chunk = kzalloc(sizeof(struct vframe_chunk_s), GFP_KERNEL);
861
862 if (!chunk) {
863 pr_err("vframe_chunk structure allocation failed\n");
864 return -ENOMEM;
865 }
866
867 chunk->magic = 0x4b554843;
868 if (vdec->pts_valid) {
869 chunk->pts = vdec->pts;
870 chunk->pts64 = vdec->pts64;
871 }
872
873 if (vdec->timestamp_valid)
874 chunk->timestamp = vdec->timestamp;
875
876 if (vdec->pts_valid &&
877 input->last_inpts_u64 > 0 &&
878 input->last_in_nopts_cnt == 0) {
879 int d = (int)(chunk->pts64 - input->last_inpts_u64);
880 if (d > 0 && (d < input->last_duration))
881 input->last_duration = d;
882 /* alwasy: used the smallest duration;
883 if 60fps->30 fps.
884 maybe have warning value.
885 */
886 }
887 chunk->pts_valid = vdec->pts_valid;
888 vdec->pts_valid = false;
889 INIT_LIST_HEAD(&chunk->list);
890
891 if (vdec_secure(vdec)) {
892 chunk->offset = 0;
893 chunk->size = count;
894 chunk->pading_size = PAGE_ALIGN(chunk->size + need_pading_size) -
895 chunk->size;
896 } else {
897 chunk->offset = block->wp;
898 chunk->size = count;
899 chunk->pading_size = need_pading_size;
900 if (vframe_chunk_fill(input, chunk, buf, count, block)) {
901 pr_err("vframe_chunk_fill failed\n");
902 kfree(chunk);
903 return -EFAULT;
904 }
905
906 }
907
908
909 flags = vdec_input_lock(input);
910
911 vframe_block_add_chunk(block, chunk);
912
913 list_add_tail(&chunk->list, &input->vframe_chunk_list);
914 input->data_size += chunk->size;
915 input->have_frame_num++;
916
917 if (input->have_frame_num == 1)
918 input->vdec_up(vdec);
919 ATRACE_COUNTER(MEM_NAME, input->have_frame_num);
920 if (chunk->pts_valid) {
921 input->last_inpts_u64 = chunk->pts64;
922 input->last_in_nopts_cnt = 0;
923 } else {
924 /*nopts*/
925 input->last_in_nopts_cnt++;
926 }
927 if (chunk->size > input->frame_max_size)
928 input->frame_max_size = chunk->size;
929 input->total_wr_count += count;
930 vdec_input_unlock(input, flags);
931#if 0
932 if (add_count == 2)
933 input->total_wr_count += 38;
934#endif
935
936 return count;
937}
938
939int vdec_input_add_frame(struct vdec_input_s *input, const char *buf,
940 size_t count)
941{
942 int ret = 0;
943 struct drm_info drm;
944 struct vdec_s *vdec = input->vdec;
945 unsigned long phy_buf;
946
947 if (vdec_secure(vdec)) {
948 while (count > 0) {
949 if (count < sizeof(struct drm_info))
950 return -EIO;
951 if (copy_from_user(&drm, buf + ret, sizeof(struct drm_info)))
952 return -EAGAIN;
953 if (!(drm.drm_flag & TYPE_DRMINFO_V2))
954 return -EIO; /*must drm info v2 version*/
955 phy_buf = (unsigned long) drm.drm_phy;
956 vdec_input_add_chunk(input, (char *)phy_buf,
957 (size_t)drm.drm_pktsize, drm.handle);
958 count -= sizeof(struct drm_info);
959 ret += sizeof(struct drm_info);
960
961 /* the drm frame data might include head infos and raw */
962 /* data thus the next drm unit still need a valid pts.*/
963 if (count >= sizeof(struct drm_info))
964 vdec->pts_valid = true;
965 }
966 } else {
967 ret = vdec_input_add_chunk(input, buf, count, 0);
968 }
969
970 return ret;
971}
972EXPORT_SYMBOL(vdec_input_add_frame);
973
974struct vframe_chunk_s *vdec_input_next_chunk(struct vdec_input_s *input)
975{
976 struct vframe_chunk_s *chunk = NULL;
977 unsigned long flags;
978 flags = vdec_input_lock(input);
979 if (!list_empty(&input->vframe_chunk_list)) {
980 chunk = list_first_entry(&input->vframe_chunk_list,
981 struct vframe_chunk_s, list);
982 }
983 vdec_input_unlock(input, flags);
984 return chunk;
985}
986EXPORT_SYMBOL(vdec_input_next_chunk);
987
988struct vframe_chunk_s *vdec_input_next_input_chunk(
989 struct vdec_input_s *input)
990{
991 struct vframe_chunk_s *chunk = NULL;
992 struct list_head *p;
993 unsigned long flags;
994 flags = vdec_input_lock(input);
995
996 list_for_each(p, &input->vframe_chunk_list) {
997 struct vframe_chunk_s *c = list_entry(
998 p, struct vframe_chunk_s, list);
999 if ((c->flag & VFRAME_CHUNK_FLAG_CONSUMED) == 0) {
1000 chunk = c;
1001 break;
1002 }
1003 }
1004 vdec_input_unlock(input, flags);
1005 return chunk;
1006}
1007EXPORT_SYMBOL(vdec_input_next_input_chunk);
1008
1009void vdec_input_release_chunk(struct vdec_input_s *input,
1010 struct vframe_chunk_s *chunk)
1011{
1012 struct vframe_chunk_s *p;
1013 u32 chunk_valid = 0;
1014 unsigned long flags;
1015 struct vframe_block_list_s *block = chunk->block;
1016 struct vframe_block_list_s *tofreeblock = NULL;
1017 flags = vdec_input_lock(input);
1018
1019 list_for_each_entry(p, &input->vframe_chunk_list, list) {
1020 if (p == chunk) {
1021 chunk_valid = 1;
1022 break;
1023 }
1024 }
1025 /* 2 threads go here, the other done the deletion,so return*/
1026 if (chunk_valid == 0) {
1027 vdec_input_unlock(input, flags);
1028 pr_err("%s chunk is deleted,so return.\n", __func__);
1029 return;
1030 }
1031
1032 list_del(&chunk->list);
1033 input->have_frame_num--;
1034 ATRACE_COUNTER(MEM_NAME, input->have_frame_num);
1035 if (chunk->pts_valid) {
1036 input->last_comsumed_no_pts_cnt = 0;
1037 input->last_comsumed_pts_u64 = chunk->pts64;
1038 } else
1039 input->last_comsumed_no_pts_cnt++;
1040 block->rp += chunk->size;
1041 if (block->rp >= block->size)
1042 block->rp -= block->size;
1043 block->data_size -= chunk->size;
1044 block->chunk_count--;
1045 input->data_size -= chunk->size;
1046 input->total_rd_count += chunk->size;
1047 if (block->is_out_buf) {
1048 list_move_tail(&block->list,
1049 &input->vframe_block_free_list);
1050 } else if (block->chunk_count == 0 &&
1051 input->wr_block != block ) {/*don't free used block*/
1052 if (block->size < input->default_block_size) {
1053 vdec_input_del_block_locked(input, block);
1054 tofreeblock = block;
1055 } else {
1056 block->rp = 0;
1057 block->wp = 0;
1058 list_move_tail(&block->list,
1059 &input->vframe_block_free_list);
1060 }
1061 }
1062
1063 vdec_input_unlock(input, flags);
1064 if (tofreeblock)
1065 vframe_block_free_block(tofreeblock);
1066 kfree(chunk);
1067}
1068EXPORT_SYMBOL(vdec_input_release_chunk);
1069
1070unsigned long vdec_input_lock(struct vdec_input_s *input)
1071{
1072 unsigned long flags;
1073
1074 spin_lock_irqsave(&input->lock, flags);
1075
1076 return flags;
1077}
1078EXPORT_SYMBOL(vdec_input_lock);
1079
1080void vdec_input_unlock(struct vdec_input_s *input, unsigned long flags)
1081{
1082 spin_unlock_irqrestore(&input->lock, flags);
1083}
1084EXPORT_SYMBOL(vdec_input_unlock);
1085
1086void vdec_input_release(struct vdec_input_s *input)
1087{
1088 struct list_head *p, *tmp;
1089
1090 /* release chunk data */
1091 list_for_each_safe(p, tmp, &input->vframe_chunk_list) {
1092 struct vframe_chunk_s *chunk = list_entry(
1093 p, struct vframe_chunk_s, list);
1094 vdec_input_release_chunk(input, chunk);
1095 }
1096 list_for_each_safe(p, tmp, &input->vframe_block_list) {
1097 /*should never here.*/
1098 list_move_tail(p, &input->vframe_block_free_list);
1099 }
1100 /* release input blocks */
1101 list_for_each_safe(p, tmp, &input->vframe_block_free_list) {
1102 struct vframe_block_list_s *block = list_entry(
1103 p, struct vframe_block_list_s, list);
1104 vdec_input_del_block_locked(input, block);
1105 vframe_block_free_block(block);
1106 }
1107
1108 /* release swap pages */
1109 if (input->swap_page_phys) {
1110 if (vdec_secure(input->vdec))
1111 codec_mm_free_for_dma("SWAP", input->swap_page_phys);
1112 else
1113 __free_page(input->swap_page);
1114 input->swap_page = NULL;
1115 input->swap_page_phys = 0;
1116 }
1117 input->swap_valid = false;
1118}
1119EXPORT_SYMBOL(vdec_input_release);
1120
1121u32 vdec_input_get_freed_handle(struct vdec_s *vdec)
1122{
1123 struct vframe_block_list_s *block;
1124 struct vdec_input_s *input = &vdec->input;
1125 unsigned long flags;
1126 u32 handle = 0;
1127
1128 if (!vdec_secure(vdec))
1129 return 0;
1130
1131 flags = vdec_input_lock(input);
1132 block = list_first_entry_or_null(&input->vframe_block_free_list,
1133 struct vframe_block_list_s, list);
1134
1135 if (block) {
1136 handle = block->handle;
1137 vdec_input_del_block_locked(input, block);
1138 kfree(block);
1139 }
1140 vdec_input_unlock(input, flags);
1141 return handle;
1142}
1143EXPORT_SYMBOL(vdec_input_get_freed_handle);
1144
1145
1146