summaryrefslogtreecommitdiff
path: root/drivers/frame_sink/encoder/h264/encoder.c (plain)
blob: 294c60005cf4308a9e4eb99dfcb9d779cd7069d6
1/*
2 * drivers/amlogic/amports/encoder.c
3 *
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16*/
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/timer.h>
23#include <linux/fs.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spinlock.h>
29#include <linux/ctype.h>
30#include <linux/amlogic/media/frame_sync/ptsserv.h>
31#include <linux/amlogic/media/utils/amstream.h>
32#include <linux/amlogic/media/canvas/canvas.h>
33#include <linux/amlogic/media/canvas/canvas_mgr.h>
34#include <linux/amlogic/media/codec_mm/codec_mm.h>
35
36#include <linux/amlogic/media/utils/vdec_reg.h>
37#include "../../../frame_provider/decoder/utils/vdec.h"
38#include <linux/delay.h>
39#include <linux/poll.h>
40#include <linux/of.h>
41#include <linux/of_fdt.h>
42#include <linux/dma-contiguous.h>
43#include <linux/kthread.h>
44#include <linux/sched/rt.h>
45#include <linux/amlogic/media/utils/amports_config.h>
46#include "encoder.h"
47#include "../../../frame_provider/decoder/utils/amvdec.h"
48#include <linux/amlogic/media/utils/amlog.h>
49#include "../../../stream_input/amports/amports_priv.h"
50#include "../../../frame_provider/decoder/utils/firmware.h"
51#include <linux/of_reserved_mem.h>
52
53
54#ifdef CONFIG_AM_JPEG_ENCODER
55#include "jpegenc.h"
56#endif
57
58#define ENCODE_NAME "encoder"
59#define AMVENC_CANVAS_INDEX 0xE4
60#define AMVENC_CANVAS_MAX_INDEX 0xEF
61
62#define MIN_SIZE amvenc_buffspec[0].min_buffsize
63#define DUMP_INFO_BYTES_PER_MB 80
64
65#define ADJUSTED_QP_FLAG 64
66
67static s32 avc_device_major;
68static struct device *amvenc_avc_dev;
69#define DRIVER_NAME "amvenc_avc"
70#define CLASS_NAME "amvenc_avc"
71#define DEVICE_NAME "amvenc_avc"
72
73static struct encode_manager_s encode_manager;
74
75#define MULTI_SLICE_MC
76#define H264_ENC_CBR
77/* #define MORE_MODULE_PARAM */
78
79#define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX
80
81#define UCODE_MODE_FULL 0
82
83/* #define ENABLE_IGNORE_FUNCTION */
84
85static u32 ie_me_mb_type;
86static u32 ie_me_mode;
87static u32 ie_pippeline_block = 3;
88static u32 ie_cur_ref_sel;
89/* static u32 avc_endian = 6; */
90static u32 clock_level = 5;
91
92static u32 encode_print_level = LOG_DEBUG;
93static u32 no_timeout;
94static int nr_mode = -1;
95static u32 qp_table_debug;
96
97#ifdef H264_ENC_SVC
98static u32 svc_enable = 0; /* Enable sac feature or not */
99static u32 svc_ref_conf = 0; /* Continuous no reference numbers */
100#endif
101
102static u32 me_mv_merge_ctl =
103 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
104 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
105 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
106 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
107 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
108 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
109 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
110 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
111 /* [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged */
112 (0x12 << 18) |
113 /* [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged */
114 (0x2b << 12) |
115 /* [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV */
116 (0x80 << 0);
117 /* ( 0x4 << 18) |
118 * // [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged
119 */
120 /* ( 0x3f << 12) |
121 * // [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged
122 */
123 /* ( 0xc0 << 0);
124 * // [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV
125 */
126
127static u32 me_mv_weight_01 = (0x40 << 24) | (0x30 << 16) | (0x20 << 8) | 0x30;
128static u32 me_mv_weight_23 = (0x40 << 8) | 0x30;
129static u32 me_sad_range_inc = 0x03030303;
130static u32 me_step0_close_mv = 0x003ffc21;
131static u32 me_f_skip_sad;
132static u32 me_f_skip_weight;
133static u32 me_sad_enough_01;/* 0x00018010; */
134static u32 me_sad_enough_23;/* 0x00000020; */
135
136/* [31:0] NUM_ROWS_PER_SLICE_P */
137/* [15:0] NUM_ROWS_PER_SLICE_I */
138static u32 fixed_slice_cfg;
139
140/* y tnr */
141static unsigned int y_tnr_mc_en = 1;
142static unsigned int y_tnr_txt_mode;
143static unsigned int y_tnr_mot_sad_margin = 1;
144static unsigned int y_tnr_mot_cortxt_rate = 1;
145static unsigned int y_tnr_mot_distxt_ofst = 5;
146static unsigned int y_tnr_mot_distxt_rate = 4;
147static unsigned int y_tnr_mot_dismot_ofst = 4;
148static unsigned int y_tnr_mot_frcsad_lock = 8;
149static unsigned int y_tnr_mot2alp_frc_gain = 10;
150static unsigned int y_tnr_mot2alp_nrm_gain = 216;
151static unsigned int y_tnr_mot2alp_dis_gain = 128;
152static unsigned int y_tnr_mot2alp_dis_ofst = 32;
153static unsigned int y_tnr_alpha_min = 32;
154static unsigned int y_tnr_alpha_max = 63;
155static unsigned int y_tnr_deghost_os;
156/* c tnr */
157static unsigned int c_tnr_mc_en = 1;
158static unsigned int c_tnr_txt_mode;
159static unsigned int c_tnr_mot_sad_margin = 1;
160static unsigned int c_tnr_mot_cortxt_rate = 1;
161static unsigned int c_tnr_mot_distxt_ofst = 5;
162static unsigned int c_tnr_mot_distxt_rate = 4;
163static unsigned int c_tnr_mot_dismot_ofst = 4;
164static unsigned int c_tnr_mot_frcsad_lock = 8;
165static unsigned int c_tnr_mot2alp_frc_gain = 10;
166static unsigned int c_tnr_mot2alp_nrm_gain = 216;
167static unsigned int c_tnr_mot2alp_dis_gain = 128;
168static unsigned int c_tnr_mot2alp_dis_ofst = 32;
169static unsigned int c_tnr_alpha_min = 32;
170static unsigned int c_tnr_alpha_max = 63;
171static unsigned int c_tnr_deghost_os;
172/* y snr */
173static unsigned int y_snr_err_norm = 1;
174static unsigned int y_snr_gau_bld_core = 1;
175static int y_snr_gau_bld_ofst = -1;
176static unsigned int y_snr_gau_bld_rate = 48;
177static unsigned int y_snr_gau_alp0_min;
178static unsigned int y_snr_gau_alp0_max = 63;
179static unsigned int y_bld_beta2alp_rate = 16;
180static unsigned int y_bld_beta_min;
181static unsigned int y_bld_beta_max = 63;
182/* c snr */
183static unsigned int c_snr_err_norm = 1;
184static unsigned int c_snr_gau_bld_core = 1;
185static int c_snr_gau_bld_ofst = -1;
186static unsigned int c_snr_gau_bld_rate = 48;
187static unsigned int c_snr_gau_alp0_min;
188static unsigned int c_snr_gau_alp0_max = 63;
189static unsigned int c_bld_beta2alp_rate = 16;
190static unsigned int c_bld_beta_min;
191static unsigned int c_bld_beta_max = 63;
192static unsigned int qp_mode;
193
194static DEFINE_SPINLOCK(lock);
195
196#define ADV_MV_LARGE_16x8 1
197#define ADV_MV_LARGE_8x16 1
198#define ADV_MV_LARGE_16x16 1
199
200/* me weight offset should not very small, it used by v1 me module. */
201/* the min real sad for me is 16 by hardware. */
202#define ME_WEIGHT_OFFSET 0x520
203#define I4MB_WEIGHT_OFFSET 0x655
204#define I16MB_WEIGHT_OFFSET 0x560
205
206#define ADV_MV_16x16_WEIGHT 0x080
207#define ADV_MV_16_8_WEIGHT 0x0e0
208#define ADV_MV_8x8_WEIGHT 0x240
209#define ADV_MV_4x4x4_WEIGHT 0x3000
210
211#define IE_SAD_SHIFT_I16 0x001
212#define IE_SAD_SHIFT_I4 0x001
213#define ME_SAD_SHIFT_INTER 0x001
214
215#define STEP_2_SKIP_SAD 0
216#define STEP_1_SKIP_SAD 0
217#define STEP_0_SKIP_SAD 0
218#define STEP_2_SKIP_WEIGHT 0
219#define STEP_1_SKIP_WEIGHT 0
220#define STEP_0_SKIP_WEIGHT 0
221
222#define ME_SAD_RANGE_0 0x1 /* 0x0 */
223#define ME_SAD_RANGE_1 0x0
224#define ME_SAD_RANGE_2 0x0
225#define ME_SAD_RANGE_3 0x0
226
227/* use 0 for v3, 0x18 for v2 */
228#define ME_MV_PRE_WEIGHT_0 0x18
229/* use 0 for v3, 0x18 for v2 */
230#define ME_MV_PRE_WEIGHT_1 0x18
231#define ME_MV_PRE_WEIGHT_2 0x0
232#define ME_MV_PRE_WEIGHT_3 0x0
233
234/* use 0 for v3, 0x18 for v2 */
235#define ME_MV_STEP_WEIGHT_0 0x18
236/* use 0 for v3, 0x18 for v2 */
237#define ME_MV_STEP_WEIGHT_1 0x18
238#define ME_MV_STEP_WEIGHT_2 0x0
239#define ME_MV_STEP_WEIGHT_3 0x0
240
241#define ME_SAD_ENOUGH_0_DATA 0x00
242#define ME_SAD_ENOUGH_1_DATA 0x04
243#define ME_SAD_ENOUGH_2_DATA 0x11
244#define ADV_MV_8x8_ENOUGH_DATA 0x20
245
246/* V4_COLOR_BLOCK_FIX */
247#define V3_FORCE_SKIP_SAD_0 0x10
248/* 4 Blocks */
249#define V3_FORCE_SKIP_SAD_1 0x60
250/* 16 Blocks + V3_SKIP_WEIGHT_2 */
251#define V3_FORCE_SKIP_SAD_2 0x250
252/* almost disable it -- use t_lac_coeff_2 output to F_ZERO is better */
253#define V3_ME_F_ZERO_SAD (ME_WEIGHT_OFFSET + 0x10)
254
255#define V3_IE_F_ZERO_SAD_I16 (I16MB_WEIGHT_OFFSET + 0x10)
256#define V3_IE_F_ZERO_SAD_I4 (I4MB_WEIGHT_OFFSET + 0x20)
257
258#define V3_SKIP_WEIGHT_0 0x10
259/* 4 Blocks 8 separate search sad can be very low */
260#define V3_SKIP_WEIGHT_1 0x8 /* (4 * ME_MV_STEP_WEIGHT_1 + 0x100) */
261#define V3_SKIP_WEIGHT_2 0x3
262
263#define V3_LEVEL_1_F_SKIP_MAX_SAD 0x0
264#define V3_LEVEL_1_SKIP_MAX_SAD 0x6
265
266#define I4_ipred_weight_most 0x18
267#define I4_ipred_weight_else 0x28
268
269#define C_ipred_weight_V 0x04
270#define C_ipred_weight_H 0x08
271#define C_ipred_weight_DC 0x0c
272
273#define I16_ipred_weight_V 0x04
274#define I16_ipred_weight_H 0x08
275#define I16_ipred_weight_DC 0x0c
276
277/* 0x00 same as disable */
278#define v3_left_small_max_ie_sad 0x00
279#define v3_left_small_max_me_sad 0x40
280
281#define v5_use_small_diff_cnt 0
282#define v5_simple_mb_inter_all_en 1
283#define v5_simple_mb_inter_8x8_en 1
284#define v5_simple_mb_inter_16_8_en 1
285#define v5_simple_mb_inter_16x16_en 1
286#define v5_simple_mb_intra_en 1
287#define v5_simple_mb_C_en 0
288#define v5_simple_mb_Y_en 1
289#define v5_small_diff_Y 0x10
290#define v5_small_diff_C 0x18
291/* shift 8-bits, 2, 1, 0, -1, -2, -3, -4 */
292#define v5_simple_dq_setting 0x43210fed
293#define v5_simple_me_weight_setting 0
294
295#ifdef H264_ENC_CBR
296#define CBR_TABLE_SIZE 0x800
297#define CBR_SHORT_SHIFT 12 /* same as disable */
298#define CBR_LONG_MB_NUM 2
299#define START_TABLE_ID 8
300#define CBR_LONG_THRESH 4
301#endif
302
303static u32 v3_mv_sad[64] = {
304 /* For step0 */
305 0x00000004,
306 0x00010008,
307 0x00020010,
308 0x00030018,
309 0x00040020,
310 0x00050028,
311 0x00060038,
312 0x00070048,
313 0x00080058,
314 0x00090068,
315 0x000a0080,
316 0x000b0098,
317 0x000c00b0,
318 0x000d00c8,
319 0x000e00e8,
320 0x000f0110,
321 /* For step1 */
322 0x00100002,
323 0x00110004,
324 0x00120008,
325 0x0013000c,
326 0x00140010,
327 0x00150014,
328 0x0016001c,
329 0x00170024,
330 0x0018002c,
331 0x00190034,
332 0x001a0044,
333 0x001b0054,
334 0x001c0064,
335 0x001d0074,
336 0x001e0094,
337 0x001f00b4,
338 /* For step2 */
339 0x00200006,
340 0x0021000c,
341 0x0022000c,
342 0x00230018,
343 0x00240018,
344 0x00250018,
345 0x00260018,
346 0x00270030,
347 0x00280030,
348 0x00290030,
349 0x002a0030,
350 0x002b0030,
351 0x002c0030,
352 0x002d0030,
353 0x002e0030,
354 0x002f0050,
355 /* For step2 4x4-8x8 */
356 0x00300001,
357 0x00310002,
358 0x00320002,
359 0x00330004,
360 0x00340004,
361 0x00350004,
362 0x00360004,
363 0x00370006,
364 0x00380006,
365 0x00390006,
366 0x003a0006,
367 0x003b0006,
368 0x003c0006,
369 0x003d0006,
370 0x003e0006,
371 0x003f0006
372};
373
374static struct BuffInfo_s amvenc_buffspec[] = {
375 {
376 .lev_id = 0,
377 .max_width = 1920,
378 .max_height = 1088,
379 .min_buffsize = 0x1400000,
380 .dct = {
381 .buf_start = 0,
382 .buf_size = 0x800000, /* 1920x1088x4 */
383 },
384 .dec0_y = {
385 .buf_start = 0x800000,
386 .buf_size = 0x300000,
387 },
388 .dec1_y = {
389 .buf_start = 0xb00000,
390 .buf_size = 0x300000,
391 },
392 .assit = {
393 .buf_start = 0xe10000,
394 .buf_size = 0xc0000,
395 },
396 .bitstream = {
397 .buf_start = 0xf00000,
398 .buf_size = 0x100000,
399 },
400 .scale_buff = {
401 .buf_start = 0x1000000,
402 .buf_size = 0x300000,
403 },
404 .dump_info = {
405 .buf_start = 0x1300000,
406 .buf_size = 0xa0000, /* (1920x1088/256)x80 */
407 },
408 .cbr_info = {
409 .buf_start = 0x13b0000,
410 .buf_size = 0x2000,
411 }
412 }
413};
414
415enum ucode_type_e {
416 UCODE_GXL,
417 UCODE_TXL,
418 UCODE_G12A,
419 UCODE_MAX
420};
421
422const char *ucode_name[] = {
423 "gxl_h264_enc",
424 "txl_h264_enc_cavlc",
425 "ga_h264_enc_cabac",
426};
427
428static void dma_flush(u32 buf_start, u32 buf_size);
429static void cache_flush(u32 buf_start, u32 buf_size);
430static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr);
431static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg);
432
433static const char *select_ucode(u32 ucode_index)
434{
435 enum ucode_type_e ucode = UCODE_GXL;
436
437 switch (ucode_index) {
438 case UCODE_MODE_FULL:
439 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
440 ucode = UCODE_G12A;
441 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL)
442 ucode = UCODE_TXL;
443 else /* (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) */
444 ucode = UCODE_GXL;
445 break;
446 break;
447 default:
448 break;
449 }
450 return (const char *)ucode_name[ucode];
451}
452
453static void hcodec_prog_qtbl(struct encode_wq_s *wq)
454{
455 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
456 (0 << 23) | /* quant_table_addr */
457 (1 << 22)); /* quant_table_addr_update */
458
459 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
460 wq->quant_tbl_i4[0]);
461 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
462 wq->quant_tbl_i4[1]);
463 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
464 wq->quant_tbl_i4[2]);
465 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
466 wq->quant_tbl_i4[3]);
467 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
468 wq->quant_tbl_i4[4]);
469 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
470 wq->quant_tbl_i4[5]);
471 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
472 wq->quant_tbl_i4[6]);
473 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
474 wq->quant_tbl_i4[7]);
475
476 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
477 (8 << 23) | /* quant_table_addr */
478 (1 << 22)); /* quant_table_addr_update */
479
480 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
481 wq->quant_tbl_i16[0]);
482 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
483 wq->quant_tbl_i16[1]);
484 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
485 wq->quant_tbl_i16[2]);
486 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
487 wq->quant_tbl_i16[3]);
488 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
489 wq->quant_tbl_i16[4]);
490 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
491 wq->quant_tbl_i16[5]);
492 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
493 wq->quant_tbl_i16[6]);
494 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
495 wq->quant_tbl_i16[7]);
496
497 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
498 (16 << 23) | /* quant_table_addr */
499 (1 << 22)); /* quant_table_addr_update */
500
501 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
502 wq->quant_tbl_me[0]);
503 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
504 wq->quant_tbl_me[1]);
505 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
506 wq->quant_tbl_me[2]);
507 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
508 wq->quant_tbl_me[3]);
509 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
510 wq->quant_tbl_me[4]);
511 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
512 wq->quant_tbl_me[5]);
513 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
514 wq->quant_tbl_me[6]);
515 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
516 wq->quant_tbl_me[7]);
517}
518
519static void InitEncodeWeight(void)
520{
521 me_mv_merge_ctl =
522 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
523 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
524 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
525 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
526 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
527 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
528 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
529 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
530 (0x12 << 18) |
531 /* [23:18] me_merge_mv_diff_16 - MV diff
532 * <= n pixel can be merged
533 */
534 (0x2b << 12) |
535 /* [17:12] me_merge_mv_diff_8 - MV diff
536 * <= n pixel can be merged
537 */
538 (0x80 << 0);
539 /* [11:0] me_merge_min_sad - SAD
540 * >= 0x180 can be merged with other MV
541 */
542
543 me_mv_weight_01 = (ME_MV_STEP_WEIGHT_1 << 24) |
544 (ME_MV_PRE_WEIGHT_1 << 16) |
545 (ME_MV_STEP_WEIGHT_0 << 8) |
546 (ME_MV_PRE_WEIGHT_0 << 0);
547
548 me_mv_weight_23 = (ME_MV_STEP_WEIGHT_3 << 24) |
549 (ME_MV_PRE_WEIGHT_3 << 16) |
550 (ME_MV_STEP_WEIGHT_2 << 8) |
551 (ME_MV_PRE_WEIGHT_2 << 0);
552
553 me_sad_range_inc = (ME_SAD_RANGE_3 << 24) |
554 (ME_SAD_RANGE_2 << 16) |
555 (ME_SAD_RANGE_1 << 8) |
556 (ME_SAD_RANGE_0 << 0);
557
558 me_step0_close_mv = (0x100 << 10) |
559 /* me_step0_big_sad -- two MV sad
560 * diff bigger will use use 1
561 */
562 (2 << 5) | /* me_step0_close_mv_y */
563 (2 << 0); /* me_step0_close_mv_x */
564
565 me_f_skip_sad = (0x00 << 24) | /* force_skip_sad_3 */
566 (STEP_2_SKIP_SAD << 16) | /* force_skip_sad_2 */
567 (STEP_1_SKIP_SAD << 8) | /* force_skip_sad_1 */
568 (STEP_0_SKIP_SAD << 0); /* force_skip_sad_0 */
569
570 me_f_skip_weight = (0x00 << 24) | /* force_skip_weight_3 */
571 /* force_skip_weight_2 */
572 (STEP_2_SKIP_WEIGHT << 16) |
573 /* force_skip_weight_1 */
574 (STEP_1_SKIP_WEIGHT << 8) |
575 /* force_skip_weight_0 */
576 (STEP_0_SKIP_WEIGHT << 0);
577
578 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
579 me_f_skip_sad = 0;
580 me_f_skip_weight = 0;
581 me_mv_weight_01 = 0;
582 me_mv_weight_23 = 0;
583 }
584
585 me_sad_enough_01 = (ME_SAD_ENOUGH_1_DATA << 12) |
586 /* me_sad_enough_1 */
587 (ME_SAD_ENOUGH_0_DATA << 0) |
588 /* me_sad_enough_0 */
589 (0 << 12) | /* me_sad_enough_1 */
590 (0 << 0); /* me_sad_enough_0 */
591
592 me_sad_enough_23 = (ADV_MV_8x8_ENOUGH_DATA << 12) |
593 /* adv_mv_8x8_enough */
594 (ME_SAD_ENOUGH_2_DATA << 0) |
595 /* me_sad_enough_2 */
596 (0 << 12) | /* me_sad_enough_3 */
597 (0 << 0); /* me_sad_enough_2 */
598}
599
600/*output stream buffer setting*/
601static void avc_init_output_buffer(struct encode_wq_s *wq)
602{
603 WRITE_HREG(HCODEC_VLC_VB_MEM_CTL,
604 ((1 << 31) | (0x3f << 24) |
605 (0x20 << 16) | (2 << 0)));
606 WRITE_HREG(HCODEC_VLC_VB_START_PTR,
607 wq->mem.BitstreamStart);
608 WRITE_HREG(HCODEC_VLC_VB_WR_PTR,
609 wq->mem.BitstreamStart);
610 WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR,
611 wq->mem.BitstreamStart);
612 WRITE_HREG(HCODEC_VLC_VB_END_PTR,
613 wq->mem.BitstreamEnd);
614 WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1);
615 WRITE_HREG(HCODEC_VLC_VB_CONTROL,
616 ((0 << 14) | (7 << 3) |
617 (1 << 1) | (0 << 0)));
618}
619
620/*input dct buffer setting*/
621static void avc_init_input_buffer(struct encode_wq_s *wq)
622{
623 WRITE_HREG(HCODEC_QDCT_MB_START_PTR,
624 wq->mem.dct_buff_start_addr);
625 WRITE_HREG(HCODEC_QDCT_MB_END_PTR,
626 wq->mem.dct_buff_end_addr);
627 WRITE_HREG(HCODEC_QDCT_MB_WR_PTR,
628 wq->mem.dct_buff_start_addr);
629 WRITE_HREG(HCODEC_QDCT_MB_RD_PTR,
630 wq->mem.dct_buff_start_addr);
631 WRITE_HREG(HCODEC_QDCT_MB_BUFF, 0);
632}
633
634/*input reference buffer setting*/
635static void avc_init_reference_buffer(s32 canvas)
636{
637 WRITE_HREG(HCODEC_ANC0_CANVAS_ADDR, canvas);
638 WRITE_HREG(HCODEC_VLC_HCMD_CONFIG, 0);
639}
640
641static void avc_init_assit_buffer(struct encode_wq_s *wq)
642{
643 WRITE_HREG(MEM_OFFSET_REG, wq->mem.assit_buffer_offset);
644}
645
646/*deblock buffer setting, same as INI_CANVAS*/
647static void avc_init_dblk_buffer(s32 canvas)
648{
649 WRITE_HREG(HCODEC_REC_CANVAS_ADDR, canvas);
650 WRITE_HREG(HCODEC_DBKR_CANVAS_ADDR, canvas);
651 WRITE_HREG(HCODEC_DBKW_CANVAS_ADDR, canvas);
652}
653
654static void avc_init_encoder(struct encode_wq_s *wq, bool idr)
655{
656 WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0);
657 WRITE_HREG(HCODEC_VLC_CONFIG, 0x07);
658 WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0);
659
660 WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15);
661 WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8);
662 WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14);
663
664 WRITE_HREG(IDR_PIC_ID, wq->pic.idr_pic_id);
665 WRITE_HREG(FRAME_NUMBER,
666 (idr == true) ? 0 : wq->pic.frame_number);
667 WRITE_HREG(PIC_ORDER_CNT_LSB,
668 (idr == true) ? 0 : wq->pic.pic_order_cnt_lsb);
669
670 WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB,
671 wq->pic.log2_max_pic_order_cnt_lsb);
672 WRITE_HREG(LOG2_MAX_FRAME_NUM,
673 wq->pic.log2_max_frame_num);
674 WRITE_HREG(ANC0_BUFFER_ID, 0);
675 WRITE_HREG(QPPICTURE, wq->pic.init_qppicture);
676}
677
678static void avc_canvas_init(struct encode_wq_s *wq)
679{
680 u32 canvas_width, canvas_height;
681 u32 start_addr = wq->mem.buf_start;
682
683 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
684 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
685
686 canvas_config(ENC_CANVAS_OFFSET,
687 start_addr + wq->mem.bufspec.dec0_y.buf_start,
688 canvas_width, canvas_height,
689 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
690 canvas_config(1 + ENC_CANVAS_OFFSET,
691 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
692 canvas_width, canvas_height / 2,
693 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
694 /*here the third plane use the same address as the second plane*/
695 canvas_config(2 + ENC_CANVAS_OFFSET,
696 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
697 canvas_width, canvas_height / 2,
698 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
699
700 canvas_config(3 + ENC_CANVAS_OFFSET,
701 start_addr + wq->mem.bufspec.dec1_y.buf_start,
702 canvas_width, canvas_height,
703 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
704 canvas_config(4 + ENC_CANVAS_OFFSET,
705 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
706 canvas_width, canvas_height / 2,
707 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
708 /*here the third plane use the same address as the second plane*/
709 canvas_config(5 + ENC_CANVAS_OFFSET,
710 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
711 canvas_width, canvas_height / 2,
712 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
713}
714
715static void avc_buffspec_init(struct encode_wq_s *wq)
716{
717 u32 canvas_width, canvas_height;
718 u32 start_addr = wq->mem.buf_start;
719 u32 mb_w = (wq->pic.encoder_width + 15) >> 4;
720 u32 mb_h = (wq->pic.encoder_height + 15) >> 4;
721 u32 mbs = mb_w * mb_h;
722
723 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
724 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
725
726 wq->mem.dct_buff_start_addr = start_addr +
727 wq->mem.bufspec.dct.buf_start;
728 wq->mem.dct_buff_end_addr =
729 wq->mem.dct_buff_start_addr +
730 wq->mem.bufspec.dct.buf_size - 1;
731 enc_pr(LOG_INFO, "dct_buff_start_addr is 0x%x, wq:%p.\n",
732 wq->mem.dct_buff_start_addr, (void *)wq);
733
734 wq->mem.bufspec.dec0_uv.buf_start =
735 wq->mem.bufspec.dec0_y.buf_start +
736 canvas_width * canvas_height;
737 wq->mem.bufspec.dec0_uv.buf_size = canvas_width * canvas_height / 2;
738 wq->mem.bufspec.dec1_uv.buf_start =
739 wq->mem.bufspec.dec1_y.buf_start +
740 canvas_width * canvas_height;
741 wq->mem.bufspec.dec1_uv.buf_size = canvas_width * canvas_height / 2;
742 wq->mem.assit_buffer_offset = start_addr +
743 wq->mem.bufspec.assit.buf_start;
744 enc_pr(LOG_INFO, "assit_buffer_offset is 0x%x, wq: %p.\n",
745 wq->mem.assit_buffer_offset, (void *)wq);
746 /*output stream buffer config*/
747 wq->mem.BitstreamStart = start_addr +
748 wq->mem.bufspec.bitstream.buf_start;
749 wq->mem.BitstreamEnd =
750 wq->mem.BitstreamStart +
751 wq->mem.bufspec.bitstream.buf_size - 1;
752 enc_pr(LOG_INFO, "BitstreamStart is 0x%x, wq: %p.\n",
753 wq->mem.BitstreamStart, (void *)wq);
754
755 wq->mem.scaler_buff_start_addr =
756 wq->mem.buf_start + wq->mem.bufspec.scale_buff.buf_start;
757 wq->mem.dump_info_ddr_start_addr =
758 wq->mem.buf_start + wq->mem.bufspec.dump_info.buf_start;
759 enc_pr(LOG_INFO,
760 "CBR: dump_info_ddr_start_addr:%x.\n",
761 wq->mem.dump_info_ddr_start_addr);
762 enc_pr(LOG_INFO, "CBR: buf_start :%d.\n",
763 wq->mem.buf_start);
764 enc_pr(LOG_INFO, "CBR: dump_info.buf_start :%d.\n",
765 wq->mem.bufspec.dump_info.buf_start);
766 wq->mem.dump_info_ddr_size =
767 DUMP_INFO_BYTES_PER_MB * mbs;
768 wq->mem.dump_info_ddr_size =
769 (wq->mem.dump_info_ddr_size + PAGE_SIZE - 1)
770 & ~(PAGE_SIZE - 1);
771 wq->mem.cbr_info_ddr_start_addr =
772 wq->mem.buf_start + wq->mem.bufspec.cbr_info.buf_start;
773 wq->mem.cbr_info_ddr_size =
774 wq->mem.bufspec.cbr_info.buf_size;
775 wq->mem.cbr_info_ddr_virt_addr =
776 codec_mm_vmap(wq->mem.cbr_info_ddr_start_addr,
777 wq->mem.bufspec.cbr_info.buf_size);
778
779 wq->mem.dblk_buf_canvas =
780 ((ENC_CANVAS_OFFSET + 2) << 16) |
781 ((ENC_CANVAS_OFFSET + 1) << 8) |
782 (ENC_CANVAS_OFFSET);
783 wq->mem.ref_buf_canvas =
784 ((ENC_CANVAS_OFFSET + 5) << 16) |
785 ((ENC_CANVAS_OFFSET + 4) << 8) |
786 (ENC_CANVAS_OFFSET + 3);
787}
788
789static void avc_init_ie_me_parameter(struct encode_wq_s *wq, u32 quant)
790{
791 ie_cur_ref_sel = 0;
792 ie_pippeline_block = 12;
793 /* currently disable half and sub pixel */
794 ie_me_mode =
795 (ie_pippeline_block & IE_PIPPELINE_BLOCK_MASK) <<
796 IE_PIPPELINE_BLOCK_SHIFT;
797
798 WRITE_HREG(IE_ME_MODE, ie_me_mode);
799 WRITE_HREG(IE_REF_SEL, ie_cur_ref_sel);
800 WRITE_HREG(IE_ME_MB_TYPE, ie_me_mb_type);
801#ifdef MULTI_SLICE_MC
802 if (fixed_slice_cfg)
803 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
804 else if (wq->pic.rows_per_slice !=
805 (wq->pic.encoder_height + 15) >> 4) {
806 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
807
808 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
809 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
810 } else
811 WRITE_HREG(FIXED_SLICE_CFG, 0);
812#else
813 WRITE_HREG(FIXED_SLICE_CFG, 0);
814#endif
815}
816
817/* for temp */
818#define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1)
819#define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2)
820#define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3)
821#define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4)
822#define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5)
823#define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6)
824#define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7)
825#define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8)
826#define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9)
827#define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa)
828#define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb)
829
830static void mfdin_basic(u32 input, u8 iformat,
831 u8 oformat, u32 picsize_x, u32 picsize_y,
832 u8 r2y_en, u8 nr, u8 ifmt_extra)
833{
834 u8 dsample_en; /* Downsample Enable */
835 u8 interp_en; /* Interpolation Enable */
836 u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */
837 u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */
838 /* mfdin_reg3_canv[25:24];
839 * // bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3
840 */
841 u8 canv_idx0_bppx;
842 /* mfdin_reg3_canv[27:26];
843 * // bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3
844 */
845 u8 canv_idx1_bppx;
846 /* mfdin_reg3_canv[29:28];
847 * // bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3
848 */
849 u8 canv_idx0_bppy;
850 /* mfdin_reg3_canv[31:30];
851 * // bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3
852 */
853 u8 canv_idx1_bppy;
854 u8 ifmt444, ifmt422, ifmt420, linear_bytes4p;
855 u8 nr_enable;
856 u8 cfg_y_snr_en;
857 u8 cfg_y_tnr_en;
858 u8 cfg_c_snr_en;
859 u8 cfg_c_tnr_en;
860 u32 linear_bytesperline;
861 s32 reg_offset;
862 bool linear_enable = false;
863 bool format_err = false;
864
865 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
866 if ((iformat == 7) && (ifmt_extra > 2))
867 format_err = true;
868 } else if (iformat == 7)
869 format_err = true;
870
871 if (format_err) {
872 enc_pr(LOG_ERROR,
873 "mfdin format err, iformat:%d, ifmt_extra:%d\n",
874 iformat, ifmt_extra);
875 return;
876 }
877 if (iformat != 7)
878 ifmt_extra = 0;
879
880 ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) ||
881 (iformat == 9) || (iformat == 12)) ? 1 : 0;
882 if (iformat == 7 && ifmt_extra == 1)
883 ifmt444 = 1;
884 ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0;
885 if (iformat == 7 && ifmt_extra != 1)
886 ifmt422 = 1;
887 ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) ||
888 (iformat == 11)) ? 1 : 0;
889 dsample_en = ((ifmt444 && (oformat != 2)) ||
890 (ifmt422 && (oformat == 0))) ? 1 : 0;
891 interp_en = ((ifmt422 && (oformat == 2)) ||
892 (ifmt420 && (oformat != 0))) ? 1 : 0;
893 y_size = (oformat != 0) ? 1 : 0;
894 if (iformat == 12)
895 y_size = 0;
896 r2y_mode = (r2y_en == 1) ? 1 : 0; /* Fixed to 1 (TODO) */
897 canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1;
898 canv_idx1_bppx = (iformat == 4) ? 0 : 1;
899 canv_idx0_bppy = 1;
900 canv_idx1_bppy = (iformat == 5) ? 1 : 0;
901
902 if ((iformat == 8) || (iformat == 9) || (iformat == 12))
903 linear_bytes4p = 3;
904 else if (iformat == 10)
905 linear_bytes4p = 2;
906 else if (iformat == 11)
907 linear_bytes4p = 1;
908 else
909 linear_bytes4p = 0;
910 if (iformat == 12)
911 linear_bytesperline = picsize_x * 4;
912 else
913 linear_bytesperline = picsize_x * linear_bytes4p;
914
915 if (iformat < 8)
916 linear_enable = false;
917 else
918 linear_enable = true;
919
920 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXBB) {
921 reg_offset = -8;
922 /* nr_mode: 0:Disabled 1:SNR Only 2:TNR Only 3:3DNR */
923 nr_enable = (nr) ? 1 : 0;
924 cfg_y_snr_en = ((nr == 1) || (nr == 3)) ? 1 : 0;
925 cfg_y_tnr_en = ((nr == 2) || (nr == 3)) ? 1 : 0;
926 cfg_c_snr_en = cfg_y_snr_en;
927 /* cfg_c_tnr_en = cfg_y_tnr_en; */
928 cfg_c_tnr_en = 0;
929
930 /* NR For Y */
931 WRITE_HREG((HCODEC_MFDIN_REG0D + reg_offset),
932 ((cfg_y_snr_en << 0) |
933 (y_snr_err_norm << 1) |
934 (y_snr_gau_bld_core << 2) |
935 (((y_snr_gau_bld_ofst) & 0xff) << 6) |
936 (y_snr_gau_bld_rate << 14) |
937 (y_snr_gau_alp0_min << 20) |
938 (y_snr_gau_alp0_max << 26)));
939 WRITE_HREG((HCODEC_MFDIN_REG0E + reg_offset),
940 ((cfg_y_tnr_en << 0) |
941 (y_tnr_mc_en << 1) |
942 (y_tnr_txt_mode << 2) |
943 (y_tnr_mot_sad_margin << 3) |
944 (y_tnr_alpha_min << 7) |
945 (y_tnr_alpha_max << 13) |
946 (y_tnr_deghost_os << 19)));
947 WRITE_HREG((HCODEC_MFDIN_REG0F + reg_offset),
948 ((y_tnr_mot_cortxt_rate << 0) |
949 (y_tnr_mot_distxt_ofst << 8) |
950 (y_tnr_mot_distxt_rate << 4) |
951 (y_tnr_mot_dismot_ofst << 16) |
952 (y_tnr_mot_frcsad_lock << 24)));
953 WRITE_HREG((HCODEC_MFDIN_REG10 + reg_offset),
954 ((y_tnr_mot2alp_frc_gain << 0) |
955 (y_tnr_mot2alp_nrm_gain << 8) |
956 (y_tnr_mot2alp_dis_gain << 16) |
957 (y_tnr_mot2alp_dis_ofst << 24)));
958 WRITE_HREG((HCODEC_MFDIN_REG11 + reg_offset),
959 ((y_bld_beta2alp_rate << 0) |
960 (y_bld_beta_min << 8) |
961 (y_bld_beta_max << 14)));
962
963 /* NR For C */
964 WRITE_HREG((HCODEC_MFDIN_REG12 + reg_offset),
965 ((cfg_y_snr_en << 0) |
966 (c_snr_err_norm << 1) |
967 (c_snr_gau_bld_core << 2) |
968 (((c_snr_gau_bld_ofst) & 0xff) << 6) |
969 (c_snr_gau_bld_rate << 14) |
970 (c_snr_gau_alp0_min << 20) |
971 (c_snr_gau_alp0_max << 26)));
972
973 WRITE_HREG((HCODEC_MFDIN_REG13 + reg_offset),
974 ((cfg_c_tnr_en << 0) |
975 (c_tnr_mc_en << 1) |
976 (c_tnr_txt_mode << 2) |
977 (c_tnr_mot_sad_margin << 3) |
978 (c_tnr_alpha_min << 7) |
979 (c_tnr_alpha_max << 13) |
980 (c_tnr_deghost_os << 19)));
981 WRITE_HREG((HCODEC_MFDIN_REG14 + reg_offset),
982 ((c_tnr_mot_cortxt_rate << 0) |
983 (c_tnr_mot_distxt_ofst << 8) |
984 (c_tnr_mot_distxt_rate << 4) |
985 (c_tnr_mot_dismot_ofst << 16) |
986 (c_tnr_mot_frcsad_lock << 24)));
987 WRITE_HREG((HCODEC_MFDIN_REG15 + reg_offset),
988 ((c_tnr_mot2alp_frc_gain << 0) |
989 (c_tnr_mot2alp_nrm_gain << 8) |
990 (c_tnr_mot2alp_dis_gain << 16) |
991 (c_tnr_mot2alp_dis_ofst << 24)));
992
993 WRITE_HREG((HCODEC_MFDIN_REG16 + reg_offset),
994 ((c_bld_beta2alp_rate << 0) |
995 (c_bld_beta_min << 8) |
996 (c_bld_beta_max << 14)));
997
998 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
999 (iformat << 0) | (oformat << 4) |
1000 (dsample_en << 6) | (y_size << 8) |
1001 (interp_en << 9) | (r2y_en << 12) |
1002 (r2y_mode << 13) | (ifmt_extra << 16) |
1003 (nr_enable << 19));
1004 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1005 (picsize_x << 14) | (picsize_y << 0));
1006 } else {
1007 reg_offset = 0;
1008 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
1009 (iformat << 0) | (oformat << 4) |
1010 (dsample_en << 6) | (y_size << 8) |
1011 (interp_en << 9) | (r2y_en << 12) |
1012 (r2y_mode << 13));
1013 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1014 (picsize_x << 12) | (picsize_y << 0));
1015 }
1016
1017 if (linear_enable == false) {
1018 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1019 (input & 0xffffff) |
1020 (canv_idx1_bppy << 30) |
1021 (canv_idx0_bppy << 28) |
1022 (canv_idx1_bppx << 26) |
1023 (canv_idx0_bppx << 24));
1024 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1025 (0 << 16) | (0 << 0));
1026 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0);
1027 } else {
1028 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1029 (canv_idx1_bppy << 30) |
1030 (canv_idx0_bppy << 28) |
1031 (canv_idx1_bppx << 26) |
1032 (canv_idx0_bppx << 24));
1033 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1034 (linear_bytes4p << 16) | (linear_bytesperline << 0));
1035 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input);
1036 }
1037
1038 if (iformat == 12)
1039 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1040 (2 << 0) | (1 << 3) | (0 << 6) |
1041 (3 << 9) | (6 << 12) | (5 << 15) |
1042 (4 << 18) | (7 << 21));
1043 else
1044 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1045 (7 << 0) | (6 << 3) | (5 << 6) |
1046 (4 << 9) | (3 << 12) | (2 << 15) |
1047 (1 << 18) | (0 << 21));
1048}
1049
1050#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1051static int scale_frame(struct encode_wq_s *wq,
1052 struct encode_request_s *request,
1053 struct config_para_ex_s *ge2d_config,
1054 u32 src_addr, bool canvas)
1055{
1056 struct ge2d_context_s *context = encode_manager.context;
1057 int src_top, src_left, src_width, src_height;
1058 struct canvas_s cs0, cs1, cs2, cd;
1059 u32 src_canvas, dst_canvas;
1060 u32 src_canvas_w, dst_canvas_w;
1061 u32 src_h = request->src_h;
1062 u32 dst_w = ((wq->pic.encoder_width + 15) >> 4) << 4;
1063 u32 dst_h = ((wq->pic.encoder_height + 15) >> 4) << 4;
1064 int input_format = GE2D_FORMAT_M24_NV21;
1065
1066 src_top = request->crop_top;
1067 src_left = request->crop_left;
1068 src_width = request->src_w - src_left - request->crop_right;
1069 src_height = request->src_h - src_top - request->crop_bottom;
1070 pr_err("request->fmt=%d, %d %d, canvas=%d\n", request->fmt, FMT_NV21, FMT_BGR888, canvas);
1071
1072 if (canvas) {
1073 if ((request->fmt == FMT_NV21)
1074 || (request->fmt == FMT_NV12)) {
1075 src_canvas = src_addr & 0xffff;
1076 input_format = GE2D_FORMAT_M24_NV21;
1077 } else if (request->fmt == FMT_BGR888) {
1078 src_canvas = src_addr & 0xffffff;
1079 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1080 } else if (request->fmt == FMT_RGBA8888) {
1081 src_canvas = src_addr & 0xffffff;
1082 input_format = GE2D_FORMAT_S32_ABGR;
1083 } else {
1084 src_canvas = src_addr & 0xffffff;
1085 input_format = GE2D_FORMAT_M24_YUV420;
1086 }
1087 } else {
1088 if ((request->fmt == FMT_NV21)
1089 || (request->fmt == FMT_NV12)) {
1090 src_canvas_w =
1091 ((request->src_w + 31) >> 5) << 5;
1092 canvas_config(ENC_CANVAS_OFFSET + 9,
1093 src_addr,
1094 src_canvas_w, src_h,
1095 CANVAS_ADDR_NOWRAP,
1096 CANVAS_BLKMODE_LINEAR);
1097 canvas_config(ENC_CANVAS_OFFSET + 10,
1098 src_addr + src_canvas_w * src_h,
1099 src_canvas_w, src_h / 2,
1100 CANVAS_ADDR_NOWRAP,
1101 CANVAS_BLKMODE_LINEAR);
1102 src_canvas =
1103 ((ENC_CANVAS_OFFSET + 10) << 8)
1104 | (ENC_CANVAS_OFFSET + 9);
1105 input_format = GE2D_FORMAT_M24_NV21;
1106 } else if (request->fmt == FMT_BGR888) {
1107 src_canvas_w =
1108 ((request->src_w + 31) >> 5) << 5;
1109
1110 canvas_config(ENC_CANVAS_OFFSET + 9,
1111 src_addr,
1112 src_canvas_w * 3, src_h,
1113 CANVAS_ADDR_NOWRAP,
1114 CANVAS_BLKMODE_LINEAR);
1115 src_canvas = ENC_CANVAS_OFFSET + 9;
1116 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1117 } else if (request->fmt == FMT_RGBA8888) {
1118 src_canvas_w =
1119 ((request->src_w + 31) >> 5) << 5;
1120 canvas_config(
1121 ENC_CANVAS_OFFSET + 9,
1122 src_addr,
1123 src_canvas_w * 4,
1124 src_h,
1125 CANVAS_ADDR_NOWRAP,
1126 CANVAS_BLKMODE_LINEAR);
1127 src_canvas = ENC_CANVAS_OFFSET + 9;
1128 input_format = GE2D_FORMAT_S32_ABGR; //Opposite color after ge2d
1129 } else {
1130 src_canvas_w =
1131 ((request->src_w + 63) >> 6) << 6;
1132 canvas_config(ENC_CANVAS_OFFSET + 9,
1133 src_addr,
1134 src_canvas_w, src_h,
1135 CANVAS_ADDR_NOWRAP,
1136 CANVAS_BLKMODE_LINEAR);
1137 canvas_config(ENC_CANVAS_OFFSET + 10,
1138 src_addr + src_canvas_w * src_h,
1139 src_canvas_w / 2, src_h / 2,
1140 CANVAS_ADDR_NOWRAP,
1141 CANVAS_BLKMODE_LINEAR);
1142 canvas_config(ENC_CANVAS_OFFSET + 11,
1143 src_addr + src_canvas_w * src_h * 5 / 4,
1144 src_canvas_w / 2, src_h / 2,
1145 CANVAS_ADDR_NOWRAP,
1146 CANVAS_BLKMODE_LINEAR);
1147 src_canvas =
1148 ((ENC_CANVAS_OFFSET + 11) << 16) |
1149 ((ENC_CANVAS_OFFSET + 10) << 8) |
1150 (ENC_CANVAS_OFFSET + 9);
1151 input_format = GE2D_FORMAT_M24_YUV420;
1152 }
1153 }
1154
1155 dst_canvas_w = ((dst_w + 31) >> 5) << 5;
1156
1157 canvas_config(ENC_CANVAS_OFFSET + 6,
1158 wq->mem.scaler_buff_start_addr,
1159 dst_canvas_w, dst_h,
1160 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1161
1162 canvas_config(ENC_CANVAS_OFFSET + 7,
1163 wq->mem.scaler_buff_start_addr + dst_canvas_w * dst_h,
1164 dst_canvas_w, dst_h / 2,
1165 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1166
1167 dst_canvas = ((ENC_CANVAS_OFFSET + 7) << 8) |
1168 (ENC_CANVAS_OFFSET + 6);
1169
1170 ge2d_config->alu_const_color = 0;
1171 ge2d_config->bitmask_en = 0;
1172 ge2d_config->src1_gb_alpha = 0;
1173 ge2d_config->dst_xy_swap = 0;
1174 canvas_read(src_canvas & 0xff, &cs0);
1175 canvas_read((src_canvas >> 8) & 0xff, &cs1);
1176 canvas_read((src_canvas >> 16) & 0xff, &cs2);
1177 ge2d_config->src_planes[0].addr = cs0.addr;
1178 ge2d_config->src_planes[0].w = dst_w * 4;//cs0.width;
1179 ge2d_config->src_planes[0].h = dst_h;//cs0.height;
1180 ge2d_config->src_planes[1].addr = cs1.addr;
1181 ge2d_config->src_planes[1].w = cs1.width;
1182 ge2d_config->src_planes[1].h = cs1.height;
1183 ge2d_config->src_planes[2].addr = cs2.addr;
1184 ge2d_config->src_planes[2].w = cs2.width;
1185 ge2d_config->src_planes[2].h = cs2.height;
1186
1187 canvas_read(dst_canvas & 0xff, &cd);
1188
1189 ge2d_config->dst_planes[0].addr = cd.addr;
1190 ge2d_config->dst_planes[0].w = dst_w * 4;//cd.width;
1191 ge2d_config->dst_planes[0].h = dst_h;//cd.height;
1192 ge2d_config->src_key.key_enable = 0;
1193 ge2d_config->src_key.key_mask = 0;
1194 ge2d_config->src_key.key_mode = 0;
1195 ge2d_config->src_para.canvas_index = src_canvas;
1196 ge2d_config->src_para.mem_type = CANVAS_TYPE_INVALID;
1197 ge2d_config->src_para.format = input_format | GE2D_LITTLE_ENDIAN;
1198 ge2d_config->src_para.fill_color_en = 0;
1199 ge2d_config->src_para.fill_mode = 0;
1200 ge2d_config->src_para.x_rev = 0;
1201 ge2d_config->src_para.y_rev = 0;
1202 ge2d_config->src_para.color = 0xffffffff;
1203 ge2d_config->src_para.top = 0;
1204 ge2d_config->src_para.left = 0;
1205 ge2d_config->src_para.width = dst_w;//request->src_w;
1206 ge2d_config->src_para.height = dst_h;//request->src_h;
1207 ge2d_config->src2_para.mem_type = CANVAS_TYPE_INVALID;
1208 ge2d_config->dst_para.canvas_index = dst_canvas;
1209 ge2d_config->dst_para.mem_type = CANVAS_TYPE_INVALID;
1210 ge2d_config->dst_para.format =
1211 GE2D_FORMAT_M24_NV21 | GE2D_LITTLE_ENDIAN;
1212
1213 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720) {
1214 ge2d_config->dst_para.format |= GE2D_FORMAT_BT_STANDARD;
1215 }
1216
1217 ge2d_config->dst_para.fill_color_en = 0;
1218 ge2d_config->dst_para.fill_mode = 0;
1219 ge2d_config->dst_para.x_rev = 0;
1220 ge2d_config->dst_para.y_rev = 0;
1221 ge2d_config->dst_para.color = 0;
1222 ge2d_config->dst_para.top = 0;
1223 ge2d_config->dst_para.left = 0;
1224 ge2d_config->dst_para.width = dst_w;
1225 ge2d_config->dst_para.height = dst_h;
1226 ge2d_config->dst_para.x_rev = 0;
1227 ge2d_config->dst_para.y_rev = 0;
1228
1229
1230 if (ge2d_context_config_ex(context, ge2d_config) < 0) {
1231 pr_err("++ge2d configing error.\n");
1232 return -1;
1233 }
1234 stretchblt_noalpha(context, src_left, src_top, src_width, src_height,
1235 0, 0, wq->pic.encoder_width, wq->pic.encoder_height);
1236 return dst_canvas_w*dst_h * 3 / 2;
1237}
1238#endif
1239
1240static s32 set_input_format(struct encode_wq_s *wq,
1241 struct encode_request_s *request)
1242{
1243 s32 ret = 0;
1244 u8 iformat = MAX_FRAME_FMT, oformat = MAX_FRAME_FMT, r2y_en = 0;
1245 u32 picsize_x, picsize_y, src_addr;
1246 u32 canvas_w = 0;
1247 u32 input = request->src;
1248 u32 input_y = 0;
1249 u32 input_u = 0;
1250 u32 input_v = 0;
1251 u8 ifmt_extra = 0;
1252
1253 if ((request->fmt == FMT_RGB565) || (request->fmt >= MAX_FRAME_FMT))
1254 return -1;
1255
1256 picsize_x = ((wq->pic.encoder_width + 15) >> 4) << 4;
1257 picsize_y = ((wq->pic.encoder_height + 15) >> 4) << 4;
1258 oformat = 0;
1259
1260 if ((request->type == LOCAL_BUFF)
1261 || (request->type == PHYSICAL_BUFF)
1262 || (request->type == DMA_BUFF)) {
1263 if ((request->type == LOCAL_BUFF) &&
1264 (request->flush_flag & AMVENC_FLUSH_FLAG_INPUT))
1265 dma_flush(wq->mem.dct_buff_start_addr,
1266 request->framesize);
1267 if (request->type == LOCAL_BUFF) {
1268 input = wq->mem.dct_buff_start_addr;
1269 src_addr =
1270 wq->mem.dct_buff_start_addr;
1271 } else if (request->type == DMA_BUFF) {
1272 if (request->plane_num == 3) {
1273 input_y = (unsigned long)request->dma_cfg[0].paddr;
1274 input_u = (unsigned long)request->dma_cfg[1].paddr;
1275 input_v = (unsigned long)request->dma_cfg[2].paddr;
1276 } else if (request->plane_num == 2) {
1277 input_y = (unsigned long)request->dma_cfg[0].paddr;
1278 input_u = (unsigned long)request->dma_cfg[1].paddr;
1279 input_v = input_u;
1280 } else if (request->plane_num == 1) {
1281 input_y = (unsigned long)request->dma_cfg[0].paddr;
1282 if (request->fmt == FMT_NV21
1283 || request->fmt == FMT_NV12) {
1284 input_u = input_y + picsize_x * picsize_y;
1285 input_v = input_u;
1286 }
1287 if (request->fmt == FMT_YUV420) {
1288 input_u = input_y + picsize_x * picsize_y;
1289 input_v = input_u + picsize_x * picsize_y / 4;
1290 }
1291 }
1292 src_addr = input_y;
1293 picsize_y = wq->pic.encoder_height;
1294 enc_pr(LOG_INFO, "dma addr[0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx]\n",
1295 (unsigned long)request->dma_cfg[0].vaddr,
1296 (unsigned long)request->dma_cfg[0].paddr,
1297 (unsigned long)request->dma_cfg[1].vaddr,
1298 (unsigned long)request->dma_cfg[1].paddr,
1299 (unsigned long)request->dma_cfg[2].vaddr,
1300 (unsigned long)request->dma_cfg[2].paddr);
1301 } else {
1302 src_addr = input;
1303 picsize_y = wq->pic.encoder_height;
1304 }
1305 if (request->scale_enable) {
1306#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1307 struct config_para_ex_s ge2d_config;
1308
1309 memset(&ge2d_config, 0,
1310 sizeof(struct config_para_ex_s));
1311 scale_frame(
1312 wq, request,
1313 &ge2d_config,
1314 src_addr,
1315 false);
1316 iformat = 2;
1317 r2y_en = 0;
1318 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1319 (ENC_CANVAS_OFFSET + 6);
1320 ret = 0;
1321 goto MFDIN;
1322#else
1323 enc_pr(LOG_ERROR,
1324 "Warning: need enable ge2d for scale frame!\n");
1325 return -1;
1326#endif
1327 }
1328 if ((request->fmt <= FMT_YUV444_PLANE) ||
1329 (request->fmt >= FMT_YUV422_12BIT))
1330 r2y_en = 0;
1331 else
1332 r2y_en = 1;
1333
1334 if (request->fmt >= FMT_YUV422_12BIT) {
1335 iformat = 7;
1336 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1337 if (request->fmt == FMT_YUV422_12BIT)
1338 canvas_w = picsize_x * 24 / 8;
1339 else if (request->fmt == FMT_YUV444_10BIT)
1340 canvas_w = picsize_x * 32 / 8;
1341 else
1342 canvas_w = (picsize_x * 20 + 7) / 8;
1343 canvas_w = ((canvas_w + 31) >> 5) << 5;
1344 canvas_config(ENC_CANVAS_OFFSET + 6,
1345 input,
1346 canvas_w, picsize_y,
1347 CANVAS_ADDR_NOWRAP,
1348 CANVAS_BLKMODE_LINEAR);
1349 input = ENC_CANVAS_OFFSET + 6;
1350 input = input & 0xff;
1351 } else if (request->fmt == FMT_YUV422_SINGLE)
1352 iformat = 10;
1353 else if ((request->fmt == FMT_YUV444_SINGLE)
1354 || (request->fmt == FMT_RGB888)) {
1355 iformat = 1;
1356 if (request->fmt == FMT_RGB888)
1357 r2y_en = 1;
1358 canvas_w = picsize_x * 3;
1359 canvas_w = ((canvas_w + 31) >> 5) << 5;
1360 canvas_config(ENC_CANVAS_OFFSET + 6,
1361 input,
1362 canvas_w, picsize_y,
1363 CANVAS_ADDR_NOWRAP,
1364 CANVAS_BLKMODE_LINEAR);
1365 input = ENC_CANVAS_OFFSET + 6;
1366 } else if ((request->fmt == FMT_NV21)
1367 || (request->fmt == FMT_NV12)) {
1368 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1369 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1370 if (request->type == DMA_BUFF) {
1371 canvas_config(ENC_CANVAS_OFFSET + 6,
1372 input_y,
1373 canvas_w, picsize_y,
1374 CANVAS_ADDR_NOWRAP,
1375 CANVAS_BLKMODE_LINEAR);
1376 canvas_config(ENC_CANVAS_OFFSET + 7,
1377 input_u,
1378 canvas_w, picsize_y / 2,
1379 CANVAS_ADDR_NOWRAP,
1380 CANVAS_BLKMODE_LINEAR);
1381 } else {
1382 canvas_config(ENC_CANVAS_OFFSET + 6,
1383 input,
1384 canvas_w, picsize_y,
1385 CANVAS_ADDR_NOWRAP,
1386 CANVAS_BLKMODE_LINEAR);
1387 canvas_config(ENC_CANVAS_OFFSET + 7,
1388 input + canvas_w * picsize_y,
1389 canvas_w, picsize_y / 2,
1390 CANVAS_ADDR_NOWRAP,
1391 CANVAS_BLKMODE_LINEAR);
1392 }
1393 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1394 (ENC_CANVAS_OFFSET + 6);
1395 } else if (request->fmt == FMT_YUV420) {
1396 iformat = 4;
1397 canvas_w = ((wq->pic.encoder_width + 63) >> 6) << 6;
1398 if (request->type == DMA_BUFF) {
1399 canvas_config(ENC_CANVAS_OFFSET + 6,
1400 input_y,
1401 canvas_w, picsize_y,
1402 CANVAS_ADDR_NOWRAP,
1403 CANVAS_BLKMODE_LINEAR);
1404 canvas_config(ENC_CANVAS_OFFSET + 7,
1405 input_u,
1406 canvas_w / 2, picsize_y / 2,
1407 CANVAS_ADDR_NOWRAP,
1408 CANVAS_BLKMODE_LINEAR);
1409 canvas_config(ENC_CANVAS_OFFSET + 8,
1410 input_v,
1411 canvas_w / 2, picsize_y / 2,
1412 CANVAS_ADDR_NOWRAP,
1413 CANVAS_BLKMODE_LINEAR);
1414 } else {
1415 canvas_config(ENC_CANVAS_OFFSET + 6,
1416 input,
1417 canvas_w, picsize_y,
1418 CANVAS_ADDR_NOWRAP,
1419 CANVAS_BLKMODE_LINEAR);
1420 canvas_config(ENC_CANVAS_OFFSET + 7,
1421 input + canvas_w * picsize_y,
1422 canvas_w / 2, picsize_y / 2,
1423 CANVAS_ADDR_NOWRAP,
1424 CANVAS_BLKMODE_LINEAR);
1425 canvas_config(ENC_CANVAS_OFFSET + 8,
1426 input + canvas_w * picsize_y * 5 / 4,
1427 canvas_w / 2, picsize_y / 2,
1428 CANVAS_ADDR_NOWRAP,
1429 CANVAS_BLKMODE_LINEAR);
1430
1431 }
1432 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1433 ((ENC_CANVAS_OFFSET + 7) << 8) |
1434 (ENC_CANVAS_OFFSET + 6);
1435 } else if ((request->fmt == FMT_YUV444_PLANE)
1436 || (request->fmt == FMT_RGB888_PLANE)) {
1437 if (request->fmt == FMT_RGB888_PLANE)
1438 r2y_en = 1;
1439 iformat = 5;
1440 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1441 canvas_config(ENC_CANVAS_OFFSET + 6,
1442 input,
1443 canvas_w, picsize_y,
1444 CANVAS_ADDR_NOWRAP,
1445 CANVAS_BLKMODE_LINEAR);
1446 canvas_config(ENC_CANVAS_OFFSET + 7,
1447 input + canvas_w * picsize_y,
1448 canvas_w, picsize_y,
1449 CANVAS_ADDR_NOWRAP,
1450 CANVAS_BLKMODE_LINEAR);
1451 canvas_config(ENC_CANVAS_OFFSET + 8,
1452 input + canvas_w * picsize_y * 2,
1453 canvas_w, picsize_y,
1454 CANVAS_ADDR_NOWRAP,
1455 CANVAS_BLKMODE_LINEAR);
1456 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1457 ((ENC_CANVAS_OFFSET + 7) << 8) |
1458 (ENC_CANVAS_OFFSET + 6);
1459 } else if (request->fmt == FMT_RGBA8888) {
1460 r2y_en = 1;
1461 iformat = 12;
1462 }
1463 ret = 0;
1464 } else if (request->type == CANVAS_BUFF) {
1465 r2y_en = 0;
1466 if (request->scale_enable) {
1467#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1468 struct config_para_ex_s ge2d_config;
1469 memset(&ge2d_config, 0,
1470 sizeof(struct config_para_ex_s));
1471 scale_frame(
1472 wq, request,
1473 &ge2d_config,
1474 input, true);
1475 iformat = 2;
1476 r2y_en = 0;
1477 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1478 (ENC_CANVAS_OFFSET + 6);
1479 ret = 0;
1480 goto MFDIN;
1481#else
1482 enc_pr(LOG_ERROR,
1483 "Warning: need enable ge2d for scale frame!\n");
1484 return -1;
1485#endif
1486 }
1487 if (request->fmt == FMT_YUV422_SINGLE) {
1488 iformat = 0;
1489 input = input & 0xff;
1490 } else if (request->fmt == FMT_YUV444_SINGLE) {
1491 iformat = 1;
1492 input = input & 0xff;
1493 } else if ((request->fmt == FMT_NV21)
1494 || (request->fmt == FMT_NV12)) {
1495 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1496 input = input & 0xffff;
1497 } else if (request->fmt == FMT_YUV420) {
1498 iformat = 4;
1499 input = input & 0xffffff;
1500 } else if ((request->fmt == FMT_YUV444_PLANE)
1501 || (request->fmt == FMT_RGB888_PLANE)) {
1502 if (request->fmt == FMT_RGB888_PLANE)
1503 r2y_en = 1;
1504 iformat = 5;
1505 input = input & 0xffffff;
1506 } else if ((request->fmt == FMT_YUV422_12BIT)
1507 || (request->fmt == FMT_YUV444_10BIT)
1508 || (request->fmt == FMT_YUV422_10BIT)) {
1509 iformat = 7;
1510 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1511 input = input & 0xff;
1512 } else
1513 ret = -1;
1514 }
1515#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1516MFDIN:
1517#endif
1518 if (ret == 0)
1519 mfdin_basic(input, iformat, oformat,
1520 picsize_x, picsize_y, r2y_en,
1521 request->nr_mode, ifmt_extra);
1522 return ret;
1523}
1524
1525#ifdef H264_ENC_CBR
1526static void ConvertTable2Risc(void *table, u32 len)
1527{
1528 u32 i, j;
1529 u16 temp;
1530 u16 *tbl = (u16 *)table;
1531
1532 if ((len < 8) || (len % 8) || (!table)) {
1533 enc_pr(LOG_ERROR, "ConvertTable2Risc tbl %p, len %d error\n",
1534 table, len);
1535 return;
1536 }
1537 for (i = 0; i < len / 8; i++) {
1538 j = i << 2;
1539 temp = tbl[j];
1540 tbl[j] = tbl[j + 3];
1541 tbl[j + 3] = temp;
1542
1543 temp = tbl[j + 1];
1544 tbl[j + 1] = tbl[j + 2];
1545 tbl[j + 2] = temp;
1546 }
1547
1548}
1549#endif
1550
1551static void avc_prot_init(struct encode_wq_s *wq,
1552 struct encode_request_s *request, u32 quant, bool IDR)
1553{
1554 u32 data32;
1555 u32 pic_width, pic_height;
1556 u32 pic_mb_nr;
1557 u32 pic_mbx, pic_mby;
1558 u32 i_pic_qp, p_pic_qp;
1559 u32 i_pic_qp_c, p_pic_qp_c;
1560 u32 pic_width_in_mb;
1561 u32 slice_qp;
1562
1563 pic_width = wq->pic.encoder_width;
1564 pic_height = wq->pic.encoder_height;
1565 pic_mb_nr = 0;
1566 pic_mbx = 0;
1567 pic_mby = 0;
1568 i_pic_qp = quant;
1569 p_pic_qp = quant;
1570
1571 pic_width_in_mb = (pic_width + 15) / 16;
1572 WRITE_HREG(HCODEC_HDEC_MC_OMEM_AUTO,
1573 (1 << 31) | /* use_omem_mb_xy */
1574 ((pic_width_in_mb - 1) << 16)); /* omem_max_mb_x */
1575
1576 WRITE_HREG(HCODEC_VLC_ADV_CONFIG,
1577 /* early_mix_mc_hcmd -- will enable in P Picture */
1578 (0 << 10) |
1579 (1 << 9) | /* update_top_left_mix */
1580 (1 << 8) | /* p_top_left_mix */
1581 /* mv_cal_mixed_type -- will enable in P Picture */
1582 (0 << 7) |
1583 /* mc_hcmd_mixed_type -- will enable in P Picture */
1584 (0 << 6) |
1585 (1 << 5) | /* use_separate_int_control */
1586 (1 << 4) | /* hcmd_intra_use_q_info */
1587 (1 << 3) | /* hcmd_left_use_prev_info */
1588 (1 << 2) | /* hcmd_use_q_info */
1589 (1 << 1) | /* use_q_delta_quant */
1590 /* detect_I16_from_I4 use qdct detected mb_type */
1591 (0 << 0));
1592
1593 WRITE_HREG(HCODEC_QDCT_ADV_CONFIG,
1594 (1 << 29) | /* mb_info_latch_no_I16_pred_mode */
1595 (1 << 28) | /* ie_dma_mbxy_use_i_pred */
1596 (1 << 27) | /* ie_dma_read_write_use_ip_idx */
1597 (1 << 26) | /* ie_start_use_top_dma_count */
1598 (1 << 25) | /* i_pred_top_dma_rd_mbbot */
1599 (1 << 24) | /* i_pred_top_dma_wr_disable */
1600 /* i_pred_mix -- will enable in P Picture */
1601 (0 << 23) |
1602 (1 << 22) | /* me_ab_rd_when_intra_in_p */
1603 (1 << 21) | /* force_mb_skip_run_when_intra */
1604 /* mc_out_mixed_type -- will enable in P Picture */
1605 (0 << 20) |
1606 (1 << 19) | /* ie_start_when_quant_not_full */
1607 (1 << 18) | /* mb_info_state_mix */
1608 /* mb_type_use_mix_result -- will enable in P Picture */
1609 (0 << 17) |
1610 /* me_cb_ie_read_enable -- will enable in P Picture */
1611 (0 << 16) |
1612 /* ie_cur_data_from_me -- will enable in P Picture */
1613 (0 << 15) |
1614 (1 << 14) | /* rem_per_use_table */
1615 (0 << 13) | /* q_latch_int_enable */
1616 (1 << 12) | /* q_use_table */
1617 (0 << 11) | /* q_start_wait */
1618 (1 << 10) | /* LUMA_16_LEFT_use_cur */
1619 (1 << 9) | /* DC_16_LEFT_SUM_use_cur */
1620 (1 << 8) | /* c_ref_ie_sel_cur */
1621 (0 << 7) | /* c_ipred_perfect_mode */
1622 (1 << 6) | /* ref_ie_ul_sel */
1623 (1 << 5) | /* mb_type_use_ie_result */
1624 (1 << 4) | /* detect_I16_from_I4 */
1625 (1 << 3) | /* ie_not_wait_ref_busy */
1626 (1 << 2) | /* ie_I16_enable */
1627 (3 << 0)); /* ie_done_sel // fastest when waiting */
1628
1629 if (request != NULL) {
1630 WRITE_HREG(HCODEC_IE_WEIGHT,
1631 (request->i16_weight << 16) |
1632 (request->i4_weight << 0));
1633 WRITE_HREG(HCODEC_ME_WEIGHT,
1634 (request->me_weight << 0));
1635 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1636 /* ie_sad_offset_I16 */
1637 (request->i16_weight << 16) |
1638 /* ie_sad_offset_I4 */
1639 (request->i4_weight << 0));
1640 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1641 /* ie_sad_shift_I16 */
1642 (IE_SAD_SHIFT_I16 << 24) |
1643 /* ie_sad_shift_I4 */
1644 (IE_SAD_SHIFT_I4 << 20) |
1645 /* me_sad_shift_INTER */
1646 (ME_SAD_SHIFT_INTER << 16) |
1647 /* me_sad_offset_INTER */
1648 (request->me_weight << 0));
1649 wq->me_weight = request->me_weight;
1650 wq->i4_weight = request->i4_weight;
1651 wq->i16_weight = request->i16_weight;
1652 } else {
1653 WRITE_HREG(HCODEC_IE_WEIGHT,
1654 (I16MB_WEIGHT_OFFSET << 16) |
1655 (I4MB_WEIGHT_OFFSET << 0));
1656 WRITE_HREG(HCODEC_ME_WEIGHT,
1657 (ME_WEIGHT_OFFSET << 0));
1658 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1659 /* ie_sad_offset_I16 */
1660 (I16MB_WEIGHT_OFFSET << 16) |
1661 /* ie_sad_offset_I4 */
1662 (I4MB_WEIGHT_OFFSET << 0));
1663 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1664 /* ie_sad_shift_I16 */
1665 (IE_SAD_SHIFT_I16 << 24) |
1666 /* ie_sad_shift_I4 */
1667 (IE_SAD_SHIFT_I4 << 20) |
1668 /* me_sad_shift_INTER */
1669 (ME_SAD_SHIFT_INTER << 16) |
1670 /* me_sad_offset_INTER */
1671 (ME_WEIGHT_OFFSET << 0));
1672 }
1673
1674 WRITE_HREG(HCODEC_ADV_MV_CTL0,
1675 (ADV_MV_LARGE_16x8 << 31) |
1676 (ADV_MV_LARGE_8x16 << 30) |
1677 (ADV_MV_8x8_WEIGHT << 16) | /* adv_mv_8x8_weight */
1678 /* adv_mv_4x4x4_weight should be set bigger */
1679 (ADV_MV_4x4x4_WEIGHT << 0));
1680 WRITE_HREG(HCODEC_ADV_MV_CTL1,
1681 /* adv_mv_16x16_weight */
1682 (ADV_MV_16x16_WEIGHT << 16) |
1683 (ADV_MV_LARGE_16x16 << 15) |
1684 (ADV_MV_16_8_WEIGHT << 0)); /* adv_mv_16_8_weight */
1685
1686 hcodec_prog_qtbl(wq);
1687 if (IDR) {
1688 i_pic_qp =
1689 wq->quant_tbl_i4[0] & 0xff;
1690 i_pic_qp +=
1691 wq->quant_tbl_i16[0] & 0xff;
1692 i_pic_qp /= 2;
1693 p_pic_qp = i_pic_qp;
1694 } else {
1695 i_pic_qp =
1696 wq->quant_tbl_i4[0] & 0xff;
1697 i_pic_qp +=
1698 wq->quant_tbl_i16[0] & 0xff;
1699 p_pic_qp = wq->quant_tbl_me[0] & 0xff;
1700 slice_qp = (i_pic_qp + p_pic_qp) / 3;
1701 i_pic_qp = slice_qp;
1702 p_pic_qp = i_pic_qp;
1703 }
1704#ifdef H264_ENC_CBR
1705 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
1706 data32 = READ_HREG(HCODEC_SAD_CONTROL_1);
1707 data32 = data32 & 0xffff; /* remove sad shift */
1708 WRITE_HREG(HCODEC_SAD_CONTROL_1, data32);
1709 WRITE_HREG(H264_ENC_CBR_TABLE_ADDR,
1710 wq->mem.cbr_info_ddr_start_addr);
1711 WRITE_HREG(H264_ENC_CBR_MB_SIZE_ADDR,
1712 wq->mem.cbr_info_ddr_start_addr
1713 + CBR_TABLE_SIZE);
1714 WRITE_HREG(H264_ENC_CBR_CTL,
1715 (wq->cbr_info.start_tbl_id << 28) |
1716 (wq->cbr_info.short_shift << 24) |
1717 (wq->cbr_info.long_mb_num << 16) |
1718 (wq->cbr_info.long_th << 0));
1719 WRITE_HREG(H264_ENC_CBR_REGION_SIZE,
1720 (wq->cbr_info.block_w << 16) |
1721 (wq->cbr_info.block_h << 0));
1722 }
1723#endif
1724 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_0,
1725 (0 << 19) | /* vlc_delta_quant_1 */
1726 (i_pic_qp << 13) | /* vlc_quant_1 */
1727 (0 << 6) | /* vlc_delta_quant_0 */
1728 (i_pic_qp << 0)); /* vlc_quant_0 */
1729 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_1,
1730 (14 << 6) | /* vlc_max_delta_q_neg */
1731 (13 << 0)); /* vlc_max_delta_q_pos */
1732 WRITE_HREG(HCODEC_VLC_PIC_SIZE,
1733 pic_width | (pic_height << 16));
1734 WRITE_HREG(HCODEC_VLC_PIC_POSITION,
1735 (pic_mb_nr << 16) |
1736 (pic_mby << 8) |
1737 (pic_mbx << 0));
1738
1739 /* synopsys parallel_case full_case */
1740 switch (i_pic_qp) {
1741 case 0:
1742 i_pic_qp_c = 0;
1743 break;
1744 case 1:
1745 i_pic_qp_c = 1;
1746 break;
1747 case 2:
1748 i_pic_qp_c = 2;
1749 break;
1750 case 3:
1751 i_pic_qp_c = 3;
1752 break;
1753 case 4:
1754 i_pic_qp_c = 4;
1755 break;
1756 case 5:
1757 i_pic_qp_c = 5;
1758 break;
1759 case 6:
1760 i_pic_qp_c = 6;
1761 break;
1762 case 7:
1763 i_pic_qp_c = 7;
1764 break;
1765 case 8:
1766 i_pic_qp_c = 8;
1767 break;
1768 case 9:
1769 i_pic_qp_c = 9;
1770 break;
1771 case 10:
1772 i_pic_qp_c = 10;
1773 break;
1774 case 11:
1775 i_pic_qp_c = 11;
1776 break;
1777 case 12:
1778 i_pic_qp_c = 12;
1779 break;
1780 case 13:
1781 i_pic_qp_c = 13;
1782 break;
1783 case 14:
1784 i_pic_qp_c = 14;
1785 break;
1786 case 15:
1787 i_pic_qp_c = 15;
1788 break;
1789 case 16:
1790 i_pic_qp_c = 16;
1791 break;
1792 case 17:
1793 i_pic_qp_c = 17;
1794 break;
1795 case 18:
1796 i_pic_qp_c = 18;
1797 break;
1798 case 19:
1799 i_pic_qp_c = 19;
1800 break;
1801 case 20:
1802 i_pic_qp_c = 20;
1803 break;
1804 case 21:
1805 i_pic_qp_c = 21;
1806 break;
1807 case 22:
1808 i_pic_qp_c = 22;
1809 break;
1810 case 23:
1811 i_pic_qp_c = 23;
1812 break;
1813 case 24:
1814 i_pic_qp_c = 24;
1815 break;
1816 case 25:
1817 i_pic_qp_c = 25;
1818 break;
1819 case 26:
1820 i_pic_qp_c = 26;
1821 break;
1822 case 27:
1823 i_pic_qp_c = 27;
1824 break;
1825 case 28:
1826 i_pic_qp_c = 28;
1827 break;
1828 case 29:
1829 i_pic_qp_c = 29;
1830 break;
1831 case 30:
1832 i_pic_qp_c = 29;
1833 break;
1834 case 31:
1835 i_pic_qp_c = 30;
1836 break;
1837 case 32:
1838 i_pic_qp_c = 31;
1839 break;
1840 case 33:
1841 i_pic_qp_c = 32;
1842 break;
1843 case 34:
1844 i_pic_qp_c = 32;
1845 break;
1846 case 35:
1847 i_pic_qp_c = 33;
1848 break;
1849 case 36:
1850 i_pic_qp_c = 34;
1851 break;
1852 case 37:
1853 i_pic_qp_c = 34;
1854 break;
1855 case 38:
1856 i_pic_qp_c = 35;
1857 break;
1858 case 39:
1859 i_pic_qp_c = 35;
1860 break;
1861 case 40:
1862 i_pic_qp_c = 36;
1863 break;
1864 case 41:
1865 i_pic_qp_c = 36;
1866 break;
1867 case 42:
1868 i_pic_qp_c = 37;
1869 break;
1870 case 43:
1871 i_pic_qp_c = 37;
1872 break;
1873 case 44:
1874 i_pic_qp_c = 37;
1875 break;
1876 case 45:
1877 i_pic_qp_c = 38;
1878 break;
1879 case 46:
1880 i_pic_qp_c = 38;
1881 break;
1882 case 47:
1883 i_pic_qp_c = 38;
1884 break;
1885 case 48:
1886 i_pic_qp_c = 39;
1887 break;
1888 case 49:
1889 i_pic_qp_c = 39;
1890 break;
1891 case 50:
1892 i_pic_qp_c = 39;
1893 break;
1894 default:
1895 i_pic_qp_c = 39;
1896 break;
1897 }
1898
1899 /* synopsys parallel_case full_case */
1900 switch (p_pic_qp) {
1901 case 0:
1902 p_pic_qp_c = 0;
1903 break;
1904 case 1:
1905 p_pic_qp_c = 1;
1906 break;
1907 case 2:
1908 p_pic_qp_c = 2;
1909 break;
1910 case 3:
1911 p_pic_qp_c = 3;
1912 break;
1913 case 4:
1914 p_pic_qp_c = 4;
1915 break;
1916 case 5:
1917 p_pic_qp_c = 5;
1918 break;
1919 case 6:
1920 p_pic_qp_c = 6;
1921 break;
1922 case 7:
1923 p_pic_qp_c = 7;
1924 break;
1925 case 8:
1926 p_pic_qp_c = 8;
1927 break;
1928 case 9:
1929 p_pic_qp_c = 9;
1930 break;
1931 case 10:
1932 p_pic_qp_c = 10;
1933 break;
1934 case 11:
1935 p_pic_qp_c = 11;
1936 break;
1937 case 12:
1938 p_pic_qp_c = 12;
1939 break;
1940 case 13:
1941 p_pic_qp_c = 13;
1942 break;
1943 case 14:
1944 p_pic_qp_c = 14;
1945 break;
1946 case 15:
1947 p_pic_qp_c = 15;
1948 break;
1949 case 16:
1950 p_pic_qp_c = 16;
1951 break;
1952 case 17:
1953 p_pic_qp_c = 17;
1954 break;
1955 case 18:
1956 p_pic_qp_c = 18;
1957 break;
1958 case 19:
1959 p_pic_qp_c = 19;
1960 break;
1961 case 20:
1962 p_pic_qp_c = 20;
1963 break;
1964 case 21:
1965 p_pic_qp_c = 21;
1966 break;
1967 case 22:
1968 p_pic_qp_c = 22;
1969 break;
1970 case 23:
1971 p_pic_qp_c = 23;
1972 break;
1973 case 24:
1974 p_pic_qp_c = 24;
1975 break;
1976 case 25:
1977 p_pic_qp_c = 25;
1978 break;
1979 case 26:
1980 p_pic_qp_c = 26;
1981 break;
1982 case 27:
1983 p_pic_qp_c = 27;
1984 break;
1985 case 28:
1986 p_pic_qp_c = 28;
1987 break;
1988 case 29:
1989 p_pic_qp_c = 29;
1990 break;
1991 case 30:
1992 p_pic_qp_c = 29;
1993 break;
1994 case 31:
1995 p_pic_qp_c = 30;
1996 break;
1997 case 32:
1998 p_pic_qp_c = 31;
1999 break;
2000 case 33:
2001 p_pic_qp_c = 32;
2002 break;
2003 case 34:
2004 p_pic_qp_c = 32;
2005 break;
2006 case 35:
2007 p_pic_qp_c = 33;
2008 break;
2009 case 36:
2010 p_pic_qp_c = 34;
2011 break;
2012 case 37:
2013 p_pic_qp_c = 34;
2014 break;
2015 case 38:
2016 p_pic_qp_c = 35;
2017 break;
2018 case 39:
2019 p_pic_qp_c = 35;
2020 break;
2021 case 40:
2022 p_pic_qp_c = 36;
2023 break;
2024 case 41:
2025 p_pic_qp_c = 36;
2026 break;
2027 case 42:
2028 p_pic_qp_c = 37;
2029 break;
2030 case 43:
2031 p_pic_qp_c = 37;
2032 break;
2033 case 44:
2034 p_pic_qp_c = 37;
2035 break;
2036 case 45:
2037 p_pic_qp_c = 38;
2038 break;
2039 case 46:
2040 p_pic_qp_c = 38;
2041 break;
2042 case 47:
2043 p_pic_qp_c = 38;
2044 break;
2045 case 48:
2046 p_pic_qp_c = 39;
2047 break;
2048 case 49:
2049 p_pic_qp_c = 39;
2050 break;
2051 case 50:
2052 p_pic_qp_c = 39;
2053 break;
2054 default:
2055 p_pic_qp_c = 39;
2056 break;
2057 }
2058 WRITE_HREG(HCODEC_QDCT_Q_QUANT_I,
2059 (i_pic_qp_c << 22) |
2060 (i_pic_qp << 16) |
2061 ((i_pic_qp_c % 6) << 12) |
2062 ((i_pic_qp_c / 6) << 8) |
2063 ((i_pic_qp % 6) << 4) |
2064 ((i_pic_qp / 6) << 0));
2065
2066 WRITE_HREG(HCODEC_QDCT_Q_QUANT_P,
2067 (p_pic_qp_c << 22) |
2068 (p_pic_qp << 16) |
2069 ((p_pic_qp_c % 6) << 12) |
2070 ((p_pic_qp_c / 6) << 8) |
2071 ((p_pic_qp % 6) << 4) |
2072 ((p_pic_qp / 6) << 0));
2073
2074#ifdef ENABLE_IGNORE_FUNCTION
2075 WRITE_HREG(HCODEC_IGNORE_CONFIG,
2076 (1 << 31) | /* ignore_lac_coeff_en */
2077 (1 << 26) | /* ignore_lac_coeff_else (<1) */
2078 (1 << 21) | /* ignore_lac_coeff_2 (<1) */
2079 (2 << 16) | /* ignore_lac_coeff_1 (<2) */
2080 (1 << 15) | /* ignore_cac_coeff_en */
2081 (1 << 10) | /* ignore_cac_coeff_else (<1) */
2082 (1 << 5) | /* ignore_cac_coeff_2 (<1) */
2083 (3 << 0)); /* ignore_cac_coeff_1 (<2) */
2084
2085 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB)
2086 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2087 (1 << 31) | /* ignore_t_lac_coeff_en */
2088 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2089 (2 << 21) | /* ignore_t_lac_coeff_2 (<2) */
2090 (6 << 16) | /* ignore_t_lac_coeff_1 (<6) */
2091 (1<<15) | /* ignore_cdc_coeff_en */
2092 (0<<14) | /* ignore_t_lac_coeff_else_le_3 */
2093 (1<<13) | /* ignore_t_lac_coeff_else_le_4 */
2094 (1<<12) | /* ignore_cdc_only_when_empty_cac_inter */
2095 (1<<11) | /* ignore_cdc_only_when_one_empty_inter */
2096 /* ignore_cdc_range_max_inter 0-0, 1-1, 2-2, 3-3 */
2097 (2<<9) |
2098 /* ignore_cdc_abs_max_inter 0-1, 1-2, 2-3, 3-4 */
2099 (0<<7) |
2100 /* ignore_cdc_only_when_empty_cac_intra */
2101 (1<<5) |
2102 /* ignore_cdc_only_when_one_empty_intra */
2103 (1<<4) |
2104 /* ignore_cdc_range_max_intra 0-0, 1-1, 2-2, 3-3 */
2105 (1<<2) |
2106 /* ignore_cdc_abs_max_intra 0-1, 1-2, 2-3, 3-4 */
2107 (0<<0));
2108 else
2109 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2110 (1 << 31) | /* ignore_t_lac_coeff_en */
2111 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2112 (1 << 21) | /* ignore_t_lac_coeff_2 (<1) */
2113 (5 << 16) | /* ignore_t_lac_coeff_1 (<5) */
2114 (0 << 0));
2115#else
2116 WRITE_HREG(HCODEC_IGNORE_CONFIG, 0);
2117 WRITE_HREG(HCODEC_IGNORE_CONFIG_2, 0);
2118#endif
2119
2120 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2121 (1 << 9) | /* mb_info_soft_reset */
2122 (1 << 0)); /* mb read buffer soft reset */
2123
2124 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2125 (1 << 28) | /* ignore_t_p8x8 */
2126 (0 << 27) | /* zero_mc_out_null_non_skipped_mb */
2127 (0 << 26) | /* no_mc_out_null_non_skipped_mb */
2128 (0 << 25) | /* mc_out_even_skipped_mb */
2129 (0 << 24) | /* mc_out_wait_cbp_ready */
2130 (0 << 23) | /* mc_out_wait_mb_type_ready */
2131 (1 << 29) | /* ie_start_int_enable */
2132 (1 << 19) | /* i_pred_enable */
2133 (1 << 20) | /* ie_sub_enable */
2134 (1 << 18) | /* iq_enable */
2135 (1 << 17) | /* idct_enable */
2136 (1 << 14) | /* mb_pause_enable */
2137 (1 << 13) | /* q_enable */
2138 (1 << 12) | /* dct_enable */
2139 (1 << 10) | /* mb_info_en */
2140 (0 << 3) | /* endian */
2141 (0 << 1) | /* mb_read_en */
2142 (0 << 0)); /* soft reset */
2143
2144 WRITE_HREG(HCODEC_SAD_CONTROL,
2145 (0 << 3) | /* ie_result_buff_enable */
2146 (1 << 2) | /* ie_result_buff_soft_reset */
2147 (0 << 1) | /* sad_enable */
2148 (1 << 0)); /* sad soft reset */
2149 WRITE_HREG(HCODEC_IE_RESULT_BUFFER, 0);
2150
2151 WRITE_HREG(HCODEC_SAD_CONTROL,
2152 (1 << 3) | /* ie_result_buff_enable */
2153 (0 << 2) | /* ie_result_buff_soft_reset */
2154 (1 << 1) | /* sad_enable */
2155 (0 << 0)); /* sad soft reset */
2156
2157 WRITE_HREG(HCODEC_IE_CONTROL,
2158 (1 << 30) | /* active_ul_block */
2159 (0 << 1) | /* ie_enable */
2160 (1 << 0)); /* ie soft reset */
2161
2162 WRITE_HREG(HCODEC_IE_CONTROL,
2163 (1 << 30) | /* active_ul_block */
2164 (0 << 1) | /* ie_enable */
2165 (0 << 0)); /* ie soft reset */
2166
2167 WRITE_HREG(HCODEC_ME_SKIP_LINE,
2168 (8 << 24) | /* step_3_skip_line */
2169 (8 << 18) | /* step_2_skip_line */
2170 (2 << 12) | /* step_1_skip_line */
2171 (0 << 6) | /* step_0_skip_line */
2172 (0 << 0));
2173
2174 WRITE_HREG(HCODEC_ME_MV_MERGE_CTL, me_mv_merge_ctl);
2175 WRITE_HREG(HCODEC_ME_STEP0_CLOSE_MV, me_step0_close_mv);
2176 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_01, me_sad_enough_01);
2177 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_23, me_sad_enough_23);
2178 WRITE_HREG(HCODEC_ME_F_SKIP_SAD, me_f_skip_sad);
2179 WRITE_HREG(HCODEC_ME_F_SKIP_WEIGHT, me_f_skip_weight);
2180 WRITE_HREG(HCODEC_ME_MV_WEIGHT_01, me_mv_weight_01);
2181 WRITE_HREG(HCODEC_ME_MV_WEIGHT_23, me_mv_weight_23);
2182 WRITE_HREG(HCODEC_ME_SAD_RANGE_INC, me_sad_range_inc);
2183
2184 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
2185 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, 0);
2186 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL,
2187 (v5_use_small_diff_cnt << 7) |
2188 (v5_simple_mb_inter_all_en << 6) |
2189 (v5_simple_mb_inter_8x8_en << 5) |
2190 (v5_simple_mb_inter_16_8_en << 4) |
2191 (v5_simple_mb_inter_16x16_en << 3) |
2192 (v5_simple_mb_intra_en << 2) |
2193 (v5_simple_mb_C_en << 1) |
2194 (v5_simple_mb_Y_en << 0));
2195 WRITE_HREG(HCODEC_V5_MB_DIFF_SUM, 0);
2196 WRITE_HREG(HCODEC_V5_SMALL_DIFF_CNT,
2197 (v5_small_diff_C<<16) |
2198 (v5_small_diff_Y<<0));
2199 if (qp_mode == 1) {
2200 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2201 0);
2202 } else {
2203 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2204 v5_simple_dq_setting);
2205 }
2206 WRITE_HREG(HCODEC_V5_SIMPLE_MB_ME_WEIGHT,
2207 v5_simple_me_weight_setting);
2208 /* txlx can remove it */
2209 WRITE_HREG(HCODEC_QDCT_CONFIG, 1 << 0);
2210 }
2211
2212 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
2213 WRITE_HREG(HCODEC_V4_FORCE_SKIP_CFG,
2214 (i_pic_qp << 26) | /* v4_force_q_r_intra */
2215 (i_pic_qp << 20) | /* v4_force_q_r_inter */
2216 (0 << 19) | /* v4_force_q_y_enable */
2217 (5 << 16) | /* v4_force_qr_y */
2218 (6 << 12) | /* v4_force_qp_y */
2219 (0 << 0)); /* v4_force_skip_sad */
2220
2221 /* V3 Force skip */
2222 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2223 (1 << 31) | /* v3_skip_enable */
2224 (0 << 30) | /* v3_step_1_weight_enable */
2225 (1 << 28) | /* v3_mv_sad_weight_enable */
2226 (1 << 27) | /* v3_ipred_type_enable */
2227 (V3_FORCE_SKIP_SAD_1 << 12) |
2228 (V3_FORCE_SKIP_SAD_0 << 0));
2229 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2230 (V3_SKIP_WEIGHT_1 << 16) |
2231 (V3_SKIP_WEIGHT_0 << 0));
2232 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2233 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2234 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2235 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2236 (V3_FORCE_SKIP_SAD_2 << 16) |
2237 (V3_SKIP_WEIGHT_2 << 0));
2238 if (request != NULL) {
2239 unsigned int off1, off2;
2240
2241 off1 = V3_IE_F_ZERO_SAD_I4 - I4MB_WEIGHT_OFFSET;
2242 off2 = V3_IE_F_ZERO_SAD_I16
2243 - I16MB_WEIGHT_OFFSET;
2244 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2245 ((request->i16_weight + off2) << 16) |
2246 ((request->i4_weight + off1) << 0));
2247 off1 = V3_ME_F_ZERO_SAD - ME_WEIGHT_OFFSET;
2248 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2249 (0 << 25) |
2250 /* v3_no_ver_when_top_zero_en */
2251 (0 << 24) |
2252 /* v3_no_hor_when_left_zero_en */
2253 (3 << 16) | /* type_hor break */
2254 ((request->me_weight + off1) << 0));
2255 } else {
2256 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2257 (V3_IE_F_ZERO_SAD_I16 << 16) |
2258 (V3_IE_F_ZERO_SAD_I4 << 0));
2259 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2260 (0 << 25) |
2261 /* v3_no_ver_when_top_zero_en */
2262 (0 << 24) |
2263 /* v3_no_hor_when_left_zero_en */
2264 (3 << 16) | /* type_hor break */
2265 (V3_ME_F_ZERO_SAD << 0));
2266 }
2267 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2268 /* V3 Force skip */
2269 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2270 (1 << 31) | /* v3_skip_enable */
2271 (0 << 30) | /* v3_step_1_weight_enable */
2272 (1 << 28) | /* v3_mv_sad_weight_enable */
2273 (1 << 27) | /* v3_ipred_type_enable */
2274 (0 << 12) | /* V3_FORCE_SKIP_SAD_1 */
2275 (0 << 0)); /* V3_FORCE_SKIP_SAD_0 */
2276 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2277 (V3_SKIP_WEIGHT_1 << 16) |
2278 (V3_SKIP_WEIGHT_0 << 0));
2279 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2280 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2281 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2282 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2283 (0 << 16) | /* V3_FORCE_SKIP_SAD_2 */
2284 (V3_SKIP_WEIGHT_2 << 0));
2285 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2286 (0 << 16) | /* V3_IE_F_ZERO_SAD_I16 */
2287 (0 << 0)); /* V3_IE_F_ZERO_SAD_I4 */
2288 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2289 (0 << 25) | /* v3_no_ver_when_top_zero_en */
2290 (0 << 24) | /* v3_no_hor_when_left_zero_en */
2291 (3 << 16) | /* type_hor break */
2292 (0 << 0)); /* V3_ME_F_ZERO_SAD */
2293 }
2294 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2295 int i;
2296 /* MV SAD Table */
2297 for (i = 0; i < 64; i++)
2298 WRITE_HREG(HCODEC_V3_MV_SAD_TABLE,
2299 v3_mv_sad[i]);
2300
2301 /* IE PRED SAD Table*/
2302 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_0,
2303 (C_ipred_weight_H << 24) |
2304 (C_ipred_weight_V << 16) |
2305 (I4_ipred_weight_else << 8) |
2306 (I4_ipred_weight_most << 0));
2307 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_1,
2308 (I16_ipred_weight_DC << 24) |
2309 (I16_ipred_weight_H << 16) |
2310 (I16_ipred_weight_V << 8) |
2311 (C_ipred_weight_DC << 0));
2312 WRITE_HREG(HCODEC_V3_LEFT_SMALL_MAX_SAD,
2313 (v3_left_small_max_me_sad << 16) |
2314 (v3_left_small_max_ie_sad << 0));
2315 }
2316 WRITE_HREG(HCODEC_IE_DATA_FEED_BUFF_INFO, 0);
2317 WRITE_HREG(HCODEC_CURR_CANVAS_CTRL, 0);
2318 data32 = READ_HREG(HCODEC_VLC_CONFIG);
2319 data32 = data32 | (1 << 0); /* set pop_coeff_even_all_zero */
2320 WRITE_HREG(HCODEC_VLC_CONFIG, data32);
2321
2322 WRITE_HREG(INFO_DUMP_START_ADDR,
2323 wq->mem.dump_info_ddr_start_addr);
2324
2325 /* clear mailbox interrupt */
2326 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2327
2328 /* enable mailbox interrupt */
2329 WRITE_HREG(HCODEC_IRQ_MBOX_MASK, 1);
2330}
2331
2332void amvenc_reset(void)
2333{
2334 READ_VREG(DOS_SW_RESET1);
2335 READ_VREG(DOS_SW_RESET1);
2336 READ_VREG(DOS_SW_RESET1);
2337 WRITE_VREG(DOS_SW_RESET1,
2338 (1 << 2) | (1 << 6) |
2339 (1 << 7) | (1 << 8) |
2340 (1 << 14) | (1 << 16) |
2341 (1 << 17));
2342 WRITE_VREG(DOS_SW_RESET1, 0);
2343 READ_VREG(DOS_SW_RESET1);
2344 READ_VREG(DOS_SW_RESET1);
2345 READ_VREG(DOS_SW_RESET1);
2346}
2347
2348void amvenc_start(void)
2349{
2350 READ_VREG(DOS_SW_RESET1);
2351 READ_VREG(DOS_SW_RESET1);
2352 READ_VREG(DOS_SW_RESET1);
2353 WRITE_VREG(DOS_SW_RESET1,
2354 (1 << 12) | (1 << 11));
2355 WRITE_VREG(DOS_SW_RESET1, 0);
2356
2357 READ_VREG(DOS_SW_RESET1);
2358 READ_VREG(DOS_SW_RESET1);
2359 READ_VREG(DOS_SW_RESET1);
2360
2361 WRITE_HREG(HCODEC_MPSR, 0x0001);
2362}
2363
2364void amvenc_stop(void)
2365{
2366 ulong timeout = jiffies + HZ;
2367
2368 WRITE_HREG(HCODEC_MPSR, 0);
2369 WRITE_HREG(HCODEC_CPSR, 0);
2370 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2371 if (time_after(jiffies, timeout))
2372 break;
2373 }
2374 READ_VREG(DOS_SW_RESET1);
2375 READ_VREG(DOS_SW_RESET1);
2376 READ_VREG(DOS_SW_RESET1);
2377
2378 WRITE_VREG(DOS_SW_RESET1,
2379 (1 << 12) | (1 << 11) |
2380 (1 << 2) | (1 << 6) |
2381 (1 << 7) | (1 << 8) |
2382 (1 << 14) | (1 << 16) |
2383 (1 << 17));
2384
2385 WRITE_VREG(DOS_SW_RESET1, 0);
2386
2387 READ_VREG(DOS_SW_RESET1);
2388 READ_VREG(DOS_SW_RESET1);
2389 READ_VREG(DOS_SW_RESET1);
2390}
2391
2392static void __iomem *mc_addr;
2393static u32 mc_addr_map;
2394#define MC_SIZE (4096 * 8)
2395s32 amvenc_loadmc(const char *p, struct encode_wq_s *wq)
2396{
2397 ulong timeout;
2398 s32 ret = 0;
2399
2400 /* use static mempry*/
2401 if (mc_addr == NULL) {
2402 mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
2403 if (!mc_addr) {
2404 enc_pr(LOG_ERROR, "avc loadmc iomap mc addr error.\n");
2405 return -ENOMEM;
2406 }
2407 }
2408
2409 enc_pr(LOG_ALL, "avc encode ucode name is %s\n", p);
2410 ret = get_data_from_name(p, (u8 *)mc_addr);
2411 if (ret < 0) {
2412 enc_pr(LOG_ERROR,
2413 "avc microcode fail ret=%d, name: %s, wq:%p.\n",
2414 ret, p, (void *)wq);
2415 }
2416
2417 mc_addr_map = dma_map_single(
2418 &encode_manager.this_pdev->dev,
2419 mc_addr, MC_SIZE, DMA_TO_DEVICE);
2420
2421 /* mc_addr_map = wq->mem.assit_buffer_offset; */
2422 /* mc_addr = ioremap_wc(mc_addr_map, MC_SIZE); */
2423 /* memcpy(mc_addr, p, MC_SIZE); */
2424 enc_pr(LOG_ALL, "address 0 is 0x%x\n", *((u32 *)mc_addr));
2425 enc_pr(LOG_ALL, "address 1 is 0x%x\n", *((u32 *)mc_addr + 1));
2426 enc_pr(LOG_ALL, "address 2 is 0x%x\n", *((u32 *)mc_addr + 2));
2427 enc_pr(LOG_ALL, "address 3 is 0x%x\n", *((u32 *)mc_addr + 3));
2428 WRITE_HREG(HCODEC_MPSR, 0);
2429 WRITE_HREG(HCODEC_CPSR, 0);
2430
2431 /* Read CBUS register for timing */
2432 timeout = READ_HREG(HCODEC_MPSR);
2433 timeout = READ_HREG(HCODEC_MPSR);
2434
2435 timeout = jiffies + HZ;
2436
2437 WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map);
2438 WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000);
2439 WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
2440
2441 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2442 if (time_before(jiffies, timeout))
2443 schedule();
2444 else {
2445 enc_pr(LOG_ERROR, "hcodec load mc error\n");
2446 ret = -EBUSY;
2447 break;
2448 }
2449 }
2450 dma_unmap_single(
2451 &encode_manager.this_pdev->dev,
2452 mc_addr_map, MC_SIZE, DMA_TO_DEVICE);
2453 return ret;
2454}
2455
2456const u32 fix_mc[] __aligned(8) = {
2457 0x0809c05a, 0x06696000, 0x0c780000, 0x00000000
2458};
2459
2460
2461/*
2462 * DOS top level register access fix.
2463 * When hcodec is running, a protocol register HCODEC_CCPU_INTR_MSK
2464 * is set to make hcodec access one CBUS out of DOS domain once
2465 * to work around a HW bug for 4k2k dual decoder implementation.
2466 * If hcodec is not running, then a ucode is loaded and executed
2467 * instead.
2468 */
2469void amvenc_dos_top_reg_fix(void)
2470{
2471 bool hcodec_on;
2472 ulong flags;
2473
2474 spin_lock_irqsave(&lock, flags);
2475
2476 hcodec_on = vdec_on(VDEC_HCODEC);
2477
2478 if ((hcodec_on) && (READ_VREG(HCODEC_MPSR) & 1)) {
2479 WRITE_HREG(HCODEC_CCPU_INTR_MSK, 1);
2480 spin_unlock_irqrestore(&lock, flags);
2481 return;
2482 }
2483
2484 if (!hcodec_on)
2485 vdec_poweron(VDEC_HCODEC);
2486
2487 amhcodec_loadmc(fix_mc);
2488
2489 amhcodec_start();
2490
2491 udelay(1000);
2492
2493 amhcodec_stop();
2494
2495 if (!hcodec_on)
2496 vdec_poweroff(VDEC_HCODEC);
2497
2498 spin_unlock_irqrestore(&lock, flags);
2499}
2500
2501bool amvenc_avc_on(void)
2502{
2503 bool hcodec_on;
2504 ulong flags;
2505
2506 spin_lock_irqsave(&lock, flags);
2507
2508 hcodec_on = vdec_on(VDEC_HCODEC);
2509 hcodec_on &= (encode_manager.wq_count > 0);
2510
2511 spin_unlock_irqrestore(&lock, flags);
2512 return hcodec_on;
2513}
2514
2515static s32 avc_poweron(u32 clock)
2516{
2517 ulong flags;
2518 u32 data32;
2519
2520 data32 = 0;
2521
2522 amports_switch_gate("vdec", 1);
2523
2524 spin_lock_irqsave(&lock, flags);
2525
2526 WRITE_AOREG(AO_RTI_PWR_CNTL_REG0,
2527 (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18)));
2528 udelay(10);
2529 /* Powerup HCODEC */
2530 /* [1:0] HCODEC */
2531 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2532 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2533 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2534 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2535 ? ~0x1 : ~0x3));
2536
2537 udelay(10);
2538
2539 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2540 WRITE_VREG(DOS_SW_RESET1, 0);
2541
2542 /* Enable Dos internal clock gating */
2543 hvdec_clock_enable(clock);
2544
2545 /* Powerup HCODEC memories */
2546 WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0);
2547
2548 /* Remove HCODEC ISO */
2549 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2550 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2551 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2552 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2553 ? ~0x1 : ~0x30));
2554
2555 udelay(10);
2556 /* Disable auto-clock gate */
2557 WRITE_VREG(DOS_GEN_CTRL0,
2558 (READ_VREG(DOS_GEN_CTRL0) | 0x1));
2559 WRITE_VREG(DOS_GEN_CTRL0,
2560 (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE));
2561
2562 spin_unlock_irqrestore(&lock, flags);
2563
2564 mdelay(10);
2565 return 0;
2566}
2567
2568static s32 avc_poweroff(void)
2569{
2570 ulong flags;
2571
2572 spin_lock_irqsave(&lock, flags);
2573
2574 /* enable HCODEC isolation */
2575 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2576 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2577 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2578 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2579 ? 0x1 : 0x30));
2580
2581 /* power off HCODEC memories */
2582 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2583
2584 /* disable HCODEC clock */
2585 hvdec_clock_disable();
2586
2587 /* HCODEC power off */
2588 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2589 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2590 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2591 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2592 ? 0x1 : 0x3));
2593
2594 spin_unlock_irqrestore(&lock, flags);
2595
2596 /* release DOS clk81 clock gating */
2597 amports_switch_gate("vdec", 0);
2598 return 0;
2599}
2600
2601static s32 reload_mc(struct encode_wq_s *wq)
2602{
2603 const char *p = select_ucode(encode_manager.ucode_index);
2604
2605 amvenc_stop();
2606
2607 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2608 WRITE_VREG(DOS_SW_RESET1, 0);
2609
2610 udelay(10);
2611
2612 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2613 enc_pr(LOG_INFO, "reload microcode\n");
2614
2615 if (amvenc_loadmc(p, wq) < 0)
2616 return -EBUSY;
2617 return 0;
2618}
2619
2620static void encode_isr_tasklet(ulong data)
2621{
2622 struct encode_manager_s *manager = (struct encode_manager_s *)data;
2623
2624 enc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status);
2625 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2626 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2627 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2628 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2629 && (manager->process_irq)) {
2630 wake_up_interruptible(&manager->event.hw_complete);
2631 }
2632}
2633
2634/* irq function */
2635static irqreturn_t enc_isr(s32 irq_number, void *para)
2636{
2637 struct encode_manager_s *manager = (struct encode_manager_s *)para;
2638
2639 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2640
2641 manager->encode_hw_status = READ_HREG(ENCODER_STATUS);
2642 if ((manager->encode_hw_status == ENCODER_IDR_DONE)
2643 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2644 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2645 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
2646 enc_pr(LOG_ALL, "encoder stage is %d\n",
2647 manager->encode_hw_status);
2648 }
2649
2650 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2651 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2652 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2653 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2654 && (!manager->process_irq)) {
2655 manager->process_irq = true;
2656 if (manager->encode_hw_status != ENCODER_SEQUENCE_DONE)
2657 manager->need_reset = true;
2658 tasklet_schedule(&manager->encode_tasklet);
2659 }
2660 return IRQ_HANDLED;
2661}
2662
2663static s32 convert_request(struct encode_wq_s *wq, u32 *cmd_info)
2664{
2665 int i = 0;
2666 u8 *ptr;
2667 u32 data_offset;
2668 u32 cmd = cmd_info[0];
2669 unsigned long paddr = 0;
2670 struct enc_dma_cfg *cfg = NULL;
2671 s32 ret = 0;
2672 struct platform_device *pdev;
2673
2674 if (!wq)
2675 return -1;
2676 memset(&wq->request, 0, sizeof(struct encode_request_s));
2677 wq->request.me_weight = ME_WEIGHT_OFFSET;
2678 wq->request.i4_weight = I4MB_WEIGHT_OFFSET;
2679 wq->request.i16_weight = I16MB_WEIGHT_OFFSET;
2680
2681 if (cmd == ENCODER_SEQUENCE) {
2682 wq->request.cmd = cmd;
2683 wq->request.ucode_mode = cmd_info[1];
2684 wq->request.quant = cmd_info[2];
2685 wq->request.flush_flag = cmd_info[3];
2686 wq->request.timeout = cmd_info[4];
2687 wq->request.timeout = 5000; /* 5000 ms */
2688 } else if ((cmd == ENCODER_IDR) || (cmd == ENCODER_NON_IDR)) {
2689 wq->request.cmd = cmd;
2690 wq->request.ucode_mode = cmd_info[1];
2691 wq->request.type = cmd_info[2];
2692 wq->request.fmt = cmd_info[3];
2693 wq->request.src = cmd_info[4];
2694 wq->request.framesize = cmd_info[5];
2695 wq->request.quant = cmd_info[6];
2696 wq->request.flush_flag = cmd_info[7];
2697 wq->request.timeout = cmd_info[8];
2698 wq->request.crop_top = cmd_info[9];
2699 wq->request.crop_bottom = cmd_info[10];
2700 wq->request.crop_left = cmd_info[11];
2701 wq->request.crop_right = cmd_info[12];
2702 wq->request.src_w = cmd_info[13];
2703 wq->request.src_h = cmd_info[14];
2704 wq->request.scale_enable = cmd_info[15];
2705
2706 enc_pr(LOG_INFO, "hwenc: wq->pic.encoder_width %d, ",
2707 wq->pic.encoder_width);
2708 enc_pr(LOG_INFO, "wq->pic.encoder_height:%d, request fmt=%d\n",
2709 wq->pic.encoder_height, wq->request.fmt);
2710
2711 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720 && wq->request.fmt == FMT_RGBA8888) {
2712 wq->request.scale_enable = 1;
2713 wq->request.src_w = wq->pic.encoder_width;
2714 wq->request.src_h = wq->pic.encoder_height;
2715 pr_err("hwenc: force wq->request.scale_enable=%d\n", wq->request.scale_enable);
2716 }
2717
2718 wq->request.nr_mode =
2719 (nr_mode > 0) ? nr_mode : cmd_info[16];
2720 if (cmd == ENCODER_IDR)
2721 wq->request.nr_mode = 0;
2722
2723 data_offset = 17 +
2724 (sizeof(wq->quant_tbl_i4)
2725 + sizeof(wq->quant_tbl_i16)
2726 + sizeof(wq->quant_tbl_me)) / 4;
2727
2728 if (wq->request.quant == ADJUSTED_QP_FLAG) {
2729 ptr = (u8 *) &cmd_info[17];
2730 memcpy(wq->quant_tbl_i4, ptr,
2731 sizeof(wq->quant_tbl_i4));
2732 ptr += sizeof(wq->quant_tbl_i4);
2733 memcpy(wq->quant_tbl_i16, ptr,
2734 sizeof(wq->quant_tbl_i16));
2735 ptr += sizeof(wq->quant_tbl_i16);
2736 memcpy(wq->quant_tbl_me, ptr,
2737 sizeof(wq->quant_tbl_me));
2738 wq->request.i4_weight -=
2739 cmd_info[data_offset++];
2740 wq->request.i16_weight -=
2741 cmd_info[data_offset++];
2742 wq->request.me_weight -=
2743 cmd_info[data_offset++];
2744 if (qp_table_debug) {
2745 u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]);
2746
2747 for (i = 0; i < 32; i++) {
2748 enc_pr(LOG_INFO, "%d ", *qp_tb);
2749 qp_tb++;
2750 }
2751 enc_pr(LOG_INFO, "\n");
2752
2753 qp_tb = (u8 *)(&wq->quant_tbl_i16[0]);
2754 for (i = 0; i < 32; i++) {
2755 enc_pr(LOG_INFO, "%d ", *qp_tb);
2756 qp_tb++;
2757 }
2758 enc_pr(LOG_INFO, "\n");
2759
2760 qp_tb = (u8 *)(&wq->quant_tbl_me[0]);
2761 for (i = 0; i < 32; i++) {
2762 enc_pr(LOG_INFO, "%d ", *qp_tb);
2763 qp_tb++;
2764 }
2765 enc_pr(LOG_INFO, "\n");
2766 }
2767 } else {
2768 memset(wq->quant_tbl_me, wq->request.quant,
2769 sizeof(wq->quant_tbl_me));
2770 memset(wq->quant_tbl_i4, wq->request.quant,
2771 sizeof(wq->quant_tbl_i4));
2772 memset(wq->quant_tbl_i16, wq->request.quant,
2773 sizeof(wq->quant_tbl_i16));
2774 data_offset += 3;
2775 }
2776#ifdef H264_ENC_CBR
2777 wq->cbr_info.block_w = cmd_info[data_offset++];
2778 wq->cbr_info.block_h = cmd_info[data_offset++];
2779 wq->cbr_info.long_th = cmd_info[data_offset++];
2780 wq->cbr_info.start_tbl_id = cmd_info[data_offset++];
2781 wq->cbr_info.short_shift = CBR_SHORT_SHIFT;
2782 wq->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
2783#endif
2784 data_offset = 17 +
2785 (sizeof(wq->quant_tbl_i4)
2786 + sizeof(wq->quant_tbl_i16)
2787 + sizeof(wq->quant_tbl_me)) / 4 + 7;
2788
2789 if (wq->request.type == DMA_BUFF) {
2790 wq->request.plane_num = cmd_info[data_offset++];
2791 enc_pr(LOG_INFO, "wq->request.plane_num %d\n",
2792 wq->request.plane_num);
2793 if (wq->request.fmt == FMT_NV12 ||
2794 wq->request.fmt == FMT_NV21 ||
2795 wq->request.fmt == FMT_YUV420) {
2796 for (i = 0; i < wq->request.plane_num; i++) {
2797 cfg = &wq->request.dma_cfg[i];
2798 cfg->dir = DMA_TO_DEVICE;
2799 cfg->fd = cmd_info[data_offset++];
2800 pdev = encode_manager.this_pdev;
2801 cfg->dev = &(pdev->dev);
2802
2803 ret = enc_dma_buf_get_phys(cfg, &paddr);
2804 if (ret < 0) {
2805 enc_pr(LOG_ERROR,
2806 "import fd %d failed\n",
2807 cfg->fd);
2808 cfg->paddr = NULL;
2809 cfg->vaddr = NULL;
2810 return -1;
2811 }
2812 cfg->paddr = (void *)paddr;
2813 enc_pr(LOG_INFO, "vaddr %p\n",
2814 cfg->vaddr);
2815 }
2816 } else {
2817 enc_pr(LOG_ERROR, "error fmt = %d\n",
2818 wq->request.fmt);
2819 }
2820 }
2821
2822 } else {
2823 enc_pr(LOG_ERROR, "error cmd = %d, wq: %p.\n",
2824 cmd, (void *)wq);
2825 return -1;
2826 }
2827 wq->request.parent = wq;
2828 return 0;
2829}
2830
2831void amvenc_avc_start_cmd(struct encode_wq_s *wq,
2832 struct encode_request_s *request)
2833{
2834 u32 reload_flag = 0;
2835
2836 if (request->ucode_mode != encode_manager.ucode_index) {
2837 encode_manager.ucode_index = request->ucode_mode;
2838 if (reload_mc(wq)) {
2839 enc_pr(LOG_ERROR,
2840 "reload mc fail, wq:%p\n", (void *)wq);
2841 return;
2842 }
2843 reload_flag = 1;
2844 encode_manager.need_reset = true;
2845 }
2846
2847 wq->hw_status = 0;
2848 wq->output_size = 0;
2849 wq->ucode_index = encode_manager.ucode_index;
2850
2851 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
2852 if (encode_manager.need_reset) {
2853 encode_manager.need_reset = false;
2854 encode_manager.encode_hw_status = ENCODER_IDLE;
2855 amvenc_reset();
2856 avc_canvas_init(wq);
2857 avc_init_encoder(wq,
2858 (request->cmd == ENCODER_IDR) ? true : false);
2859 avc_init_input_buffer(wq);
2860 avc_init_output_buffer(wq);
2861 avc_prot_init(wq, request, request->quant,
2862 (request->cmd == ENCODER_IDR) ? true : false);
2863 avc_init_assit_buffer(wq);
2864 enc_pr(LOG_INFO,
2865 "begin to new frame, request->cmd: %d, ucode mode: %d, wq:%p\n",
2866 request->cmd, request->ucode_mode, (void *)wq);
2867 }
2868 if ((request->cmd == ENCODER_IDR) ||
2869 (request->cmd == ENCODER_NON_IDR)) {
2870#ifdef H264_ENC_SVC
2871 /* encode non reference frame or not */
2872 if (request->cmd == ENCODER_IDR)
2873 wq->pic.non_ref_cnt = 0; //IDR reset counter
2874 if (wq->pic.enable_svc && wq->pic.non_ref_cnt) {
2875 enc_pr(LOG_INFO,
2876 "PIC is NON REF cmd %d cnt %d value 0x%x\n",
2877 request->cmd, wq->pic.non_ref_cnt,
2878 ENC_SLC_NON_REF);
2879 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_NON_REF);
2880 } else {
2881 enc_pr(LOG_INFO,
2882 "PIC is REF cmd %d cnt %d val 0x%x\n",
2883 request->cmd, wq->pic.non_ref_cnt,
2884 ENC_SLC_REF);
2885 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2886 }
2887#else
2888 /* if FW defined but not defined SVC in driver here*/
2889 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2890#endif
2891 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
2892 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
2893 }
2894 if ((request->cmd == ENCODER_IDR) ||
2895 (request->cmd == ENCODER_NON_IDR))
2896 set_input_format(wq, request);
2897
2898 if (request->cmd == ENCODER_IDR)
2899 ie_me_mb_type = HENC_MB_Type_I4MB;
2900 else if (request->cmd == ENCODER_NON_IDR)
2901 ie_me_mb_type =
2902 (HENC_SKIP_RUN_AUTO << 16) |
2903 (HENC_MB_Type_AUTO << 4) |
2904 (HENC_MB_Type_AUTO << 0);
2905 else
2906 ie_me_mb_type = 0;
2907 avc_init_ie_me_parameter(wq, request->quant);
2908
2909#ifdef MULTI_SLICE_MC
2910 if (fixed_slice_cfg)
2911 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
2912 else if (wq->pic.rows_per_slice !=
2913 (wq->pic.encoder_height + 15) >> 4) {
2914 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
2915
2916 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
2917 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
2918 } else
2919 WRITE_HREG(FIXED_SLICE_CFG, 0);
2920#else
2921 WRITE_HREG(FIXED_SLICE_CFG, 0);
2922#endif
2923
2924 encode_manager.encode_hw_status = request->cmd;
2925 wq->hw_status = request->cmd;
2926 WRITE_HREG(ENCODER_STATUS, request->cmd);
2927 if ((request->cmd == ENCODER_IDR)
2928 || (request->cmd == ENCODER_NON_IDR)
2929 || (request->cmd == ENCODER_SEQUENCE)
2930 || (request->cmd == ENCODER_PICTURE))
2931 encode_manager.process_irq = false;
2932
2933 if (reload_flag)
2934 amvenc_start();
2935 enc_pr(LOG_ALL, "amvenc_avc_start cmd out, request:%p.\n", (void*)request);
2936}
2937
2938static void dma_flush(u32 buf_start, u32 buf_size)
2939{
2940 if ((buf_start == 0) || (buf_size == 0))
2941 return;
2942 dma_sync_single_for_device(
2943 &encode_manager.this_pdev->dev, buf_start,
2944 buf_size, DMA_TO_DEVICE);
2945}
2946
2947static void cache_flush(u32 buf_start, u32 buf_size)
2948{
2949 if ((buf_start == 0) || (buf_size == 0))
2950 return;
2951 dma_sync_single_for_cpu(
2952 &encode_manager.this_pdev->dev, buf_start,
2953 buf_size, DMA_FROM_DEVICE);
2954}
2955
2956static u32 getbuffer(struct encode_wq_s *wq, u32 type)
2957{
2958 u32 ret = 0;
2959
2960 switch (type) {
2961 case ENCODER_BUFFER_INPUT:
2962 ret = wq->mem.dct_buff_start_addr;
2963 break;
2964 case ENCODER_BUFFER_REF0:
2965 ret = wq->mem.dct_buff_start_addr +
2966 wq->mem.bufspec.dec0_y.buf_start;
2967 break;
2968 case ENCODER_BUFFER_REF1:
2969 ret = wq->mem.dct_buff_start_addr +
2970 wq->mem.bufspec.dec1_y.buf_start;
2971 break;
2972 case ENCODER_BUFFER_OUTPUT:
2973 ret = wq->mem.BitstreamStart;
2974 break;
2975 case ENCODER_BUFFER_DUMP:
2976 ret = wq->mem.dump_info_ddr_start_addr;
2977 break;
2978 case ENCODER_BUFFER_CBR:
2979 ret = wq->mem.cbr_info_ddr_start_addr;
2980 break;
2981 default:
2982 break;
2983 }
2984 return ret;
2985}
2986
2987s32 amvenc_avc_start(struct encode_wq_s *wq, u32 clock)
2988{
2989 const char *p = select_ucode(encode_manager.ucode_index);
2990
2991 avc_poweron(clock);
2992 avc_canvas_init(wq);
2993
2994 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2995
2996 if (amvenc_loadmc(p, wq) < 0)
2997 return -EBUSY;
2998
2999 encode_manager.need_reset = true;
3000 encode_manager.process_irq = false;
3001 encode_manager.encode_hw_status = ENCODER_IDLE;
3002 amvenc_reset();
3003 avc_init_encoder(wq, true);
3004 avc_init_input_buffer(wq); /* dct buffer setting */
3005 avc_init_output_buffer(wq); /* output stream buffer */
3006
3007 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
3008 avc_prot_init(wq, NULL, wq->pic.init_qppicture, true);
3009 if (request_irq(encode_manager.irq_num, enc_isr, IRQF_SHARED,
3010 "enc-irq", (void *)&encode_manager) == 0)
3011 encode_manager.irq_requested = true;
3012 else
3013 encode_manager.irq_requested = false;
3014
3015 /* decoder buffer , need set before each frame start */
3016 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
3017 /* reference buffer , need set before each frame start */
3018 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
3019 avc_init_assit_buffer(wq); /* assitant buffer for microcode */
3020 ie_me_mb_type = 0;
3021 avc_init_ie_me_parameter(wq, wq->pic.init_qppicture);
3022 WRITE_HREG(ENCODER_STATUS, ENCODER_IDLE);
3023
3024#ifdef MULTI_SLICE_MC
3025 if (fixed_slice_cfg)
3026 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
3027 else if (wq->pic.rows_per_slice !=
3028 (wq->pic.encoder_height + 15) >> 4) {
3029 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
3030
3031 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
3032 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
3033 } else
3034 WRITE_HREG(FIXED_SLICE_CFG, 0);
3035#else
3036 WRITE_HREG(FIXED_SLICE_CFG, 0);
3037#endif
3038 amvenc_start();
3039 return 0;
3040}
3041
3042void amvenc_avc_stop(void)
3043{
3044 if ((encode_manager.irq_num >= 0) &&
3045 (encode_manager.irq_requested == true)) {
3046 free_irq(encode_manager.irq_num, &encode_manager);
3047 encode_manager.irq_requested = false;
3048 }
3049 amvenc_stop();
3050 avc_poweroff();
3051}
3052
3053static s32 avc_init(struct encode_wq_s *wq)
3054{
3055 s32 r = 0;
3056
3057 encode_manager.ucode_index = wq->ucode_index;
3058 r = amvenc_avc_start(wq, clock_level);
3059
3060 enc_pr(LOG_DEBUG,
3061 "init avc encode. microcode %d, ret=%d, wq:%p.\n",
3062 encode_manager.ucode_index, r, (void *)wq);
3063 return 0;
3064}
3065
3066static s32 amvenc_avc_light_reset(struct encode_wq_s *wq, u32 value)
3067{
3068 s32 r = 0;
3069
3070 amvenc_avc_stop();
3071
3072 mdelay(value);
3073
3074 encode_manager.ucode_index = UCODE_MODE_FULL;
3075 r = amvenc_avc_start(wq, clock_level);
3076
3077 enc_pr(LOG_DEBUG,
3078 "amvenc_avc_light_reset finish, wq:%p. ret=%d\n",
3079 (void *)wq, r);
3080 return r;
3081}
3082
3083#ifdef CONFIG_CMA
3084static u32 checkCMA(void)
3085{
3086 u32 ret;
3087
3088 if (encode_manager.cma_pool_size > 0) {
3089 ret = encode_manager.cma_pool_size;
3090 ret = ret / MIN_SIZE;
3091 } else
3092 ret = 0;
3093 return ret;
3094}
3095#endif
3096
3097/* file operation */
3098static s32 amvenc_avc_open(struct inode *inode, struct file *file)
3099{
3100 s32 r = 0;
3101 struct encode_wq_s *wq = NULL;
3102
3103 file->private_data = NULL;
3104 enc_pr(LOG_DEBUG, "avc open\n");
3105#ifdef CONFIG_AM_JPEG_ENCODER
3106 if (jpegenc_on() == true) {
3107 enc_pr(LOG_ERROR,
3108 "hcodec in use for JPEG Encode now.\n");
3109 return -EBUSY;
3110 }
3111#endif
3112
3113#ifdef CONFIG_CMA
3114 if ((encode_manager.use_reserve == false) &&
3115 (encode_manager.check_cma == false)) {
3116 encode_manager.max_instance = checkCMA();
3117 if (encode_manager.max_instance > 0) {
3118 enc_pr(LOG_DEBUG,
3119 "amvenc_avc check CMA pool success, max instance: %d.\n",
3120 encode_manager.max_instance);
3121 } else {
3122 enc_pr(LOG_ERROR,
3123 "amvenc_avc CMA pool too small.\n");
3124 }
3125 encode_manager.check_cma = true;
3126 }
3127#endif
3128
3129 wq = create_encode_work_queue();
3130 if (wq == NULL) {
3131 enc_pr(LOG_ERROR, "amvenc_avc create instance fail.\n");
3132 return -EBUSY;
3133 }
3134
3135#ifdef CONFIG_CMA
3136 if (encode_manager.use_reserve == false) {
3137 wq->mem.buf_start = codec_mm_alloc_for_dma(ENCODE_NAME,
3138 MIN_SIZE >> PAGE_SHIFT, 0,
3139 CODEC_MM_FLAGS_CPU);
3140 if (wq->mem.buf_start) {
3141 wq->mem.buf_size = MIN_SIZE;
3142 enc_pr(LOG_DEBUG,
3143 "allocating phys 0x%x, size %dk, wq:%p.\n",
3144 wq->mem.buf_start,
3145 wq->mem.buf_size >> 10, (void *)wq);
3146 } else {
3147 enc_pr(LOG_ERROR,
3148 "CMA failed to allocate dma buffer for %s, wq:%p.\n",
3149 encode_manager.this_pdev->name,
3150 (void *)wq);
3151 destroy_encode_work_queue(wq);
3152 return -ENOMEM;
3153 }
3154 }
3155#endif
3156
3157 if (wq->mem.buf_start == 0 ||
3158 wq->mem.buf_size < MIN_SIZE) {
3159 enc_pr(LOG_ERROR,
3160 "alloc mem failed, start: 0x%x, size:0x%x, wq:%p.\n",
3161 wq->mem.buf_start,
3162 wq->mem.buf_size, (void *)wq);
3163 destroy_encode_work_queue(wq);
3164 return -ENOMEM;
3165 }
3166
3167 memcpy(&wq->mem.bufspec, &amvenc_buffspec[0],
3168 sizeof(struct BuffInfo_s));
3169
3170 enc_pr(LOG_DEBUG,
3171 "amvenc_avc memory config success, buff start:0x%x, size is 0x%x, wq:%p.\n",
3172 wq->mem.buf_start, wq->mem.buf_size, (void *)wq);
3173
3174 file->private_data = (void *) wq;
3175 return r;
3176}
3177
3178static s32 amvenc_avc_release(struct inode *inode, struct file *file)
3179{
3180 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3181
3182 if (wq) {
3183 enc_pr(LOG_DEBUG, "avc release, wq:%p\n", (void *)wq);
3184 destroy_encode_work_queue(wq);
3185 }
3186 return 0;
3187}
3188
3189static long amvenc_avc_ioctl(struct file *file, u32 cmd, ulong arg)
3190{
3191 long r = 0;
3192 u32 amrisc_cmd = 0;
3193 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3194#define MAX_ADDR_INFO_SIZE 52
3195 u32 addr_info[MAX_ADDR_INFO_SIZE + 4];
3196 ulong argV;
3197 u32 buf_start;
3198 s32 canvas = -1;
3199 struct canvas_s dst;
3200
3201 switch (cmd) {
3202 case AMVENC_AVC_IOC_GET_ADDR:
3203 if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET))
3204 put_user(1, (u32 *)arg);
3205 else
3206 put_user(2, (u32 *)arg);
3207 break;
3208 case AMVENC_AVC_IOC_INPUT_UPDATE:
3209 break;
3210 case AMVENC_AVC_IOC_NEW_CMD:
3211 if (copy_from_user(addr_info, (void *)arg,
3212 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3213 enc_pr(LOG_ERROR,
3214 "avc get new cmd error, wq:%p.\n", (void *)wq);
3215 return -1;
3216 }
3217 r = convert_request(wq, addr_info);
3218 if (r == 0)
3219 r = encode_wq_add_request(wq);
3220 if (r) {
3221 enc_pr(LOG_ERROR,
3222 "avc add new request error, wq:%p.\n",
3223 (void *)wq);
3224 }
3225 break;
3226 case AMVENC_AVC_IOC_GET_STAGE:
3227 put_user(wq->hw_status, (u32 *)arg);
3228 break;
3229 case AMVENC_AVC_IOC_GET_OUTPUT_SIZE:
3230 addr_info[0] = wq->output_size;
3231 addr_info[1] = wq->me_weight;
3232 addr_info[2] = wq->i4_weight;
3233 addr_info[3] = wq->i16_weight;
3234 r = copy_to_user((u32 *)arg,
3235 addr_info, 4 * sizeof(u32));
3236 break;
3237 case AMVENC_AVC_IOC_CONFIG_INIT:
3238 if (copy_from_user(addr_info, (void *)arg,
3239 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3240 enc_pr(LOG_ERROR,
3241 "avc config init error, wq:%p.\n", (void *)wq);
3242 return -1;
3243 }
3244 wq->ucode_index = UCODE_MODE_FULL;
3245#ifdef MULTI_SLICE_MC
3246 wq->pic.rows_per_slice = addr_info[1];
3247 enc_pr(LOG_DEBUG,
3248 "avc init -- rows_per_slice: %d, wq: %p.\n",
3249 wq->pic.rows_per_slice, (void *)wq);
3250#endif
3251 enc_pr(LOG_DEBUG,
3252 "avc init as mode %d, wq: %p.\n",
3253 wq->ucode_index, (void *)wq);
3254
3255 if (addr_info[2] > wq->mem.bufspec.max_width ||
3256 addr_info[3] > wq->mem.bufspec.max_height) {
3257 enc_pr(LOG_ERROR,
3258 "avc config init- encode size %dx%d is larger than supported (%dx%d). wq:%p.\n",
3259 addr_info[2], addr_info[3],
3260 wq->mem.bufspec.max_width,
3261 wq->mem.bufspec.max_height, (void *)wq);
3262 return -1;
3263 }
3264 pr_err("hwenc: AMVENC_AVC_IOC_CONFIG_INIT: w:%d, h:%d\n", wq->pic.encoder_width, wq->pic.encoder_height);
3265 wq->pic.encoder_width = addr_info[2];
3266 wq->pic.encoder_height = addr_info[3];
3267 if (wq->pic.encoder_width *
3268 wq->pic.encoder_height >= 1280 * 720)
3269 clock_level = 6;
3270 else
3271 clock_level = 5;
3272 avc_buffspec_init(wq);
3273 complete(&encode_manager.event.request_in_com);
3274 addr_info[1] = wq->mem.bufspec.dct.buf_start;
3275 addr_info[2] = wq->mem.bufspec.dct.buf_size;
3276 addr_info[3] = wq->mem.bufspec.bitstream.buf_start;
3277 addr_info[4] = wq->mem.bufspec.bitstream.buf_size;
3278 addr_info[5] = wq->mem.bufspec.scale_buff.buf_start;
3279 addr_info[6] = wq->mem.bufspec.scale_buff.buf_size;
3280 addr_info[7] = wq->mem.bufspec.dump_info.buf_start;
3281 addr_info[8] = wq->mem.bufspec.dump_info.buf_size;
3282 addr_info[9] = wq->mem.bufspec.cbr_info.buf_start;
3283 addr_info[10] = wq->mem.bufspec.cbr_info.buf_size;
3284 r = copy_to_user((u32 *)arg, addr_info, 11*sizeof(u32));
3285 break;
3286 case AMVENC_AVC_IOC_FLUSH_CACHE:
3287 if (copy_from_user(addr_info, (void *)arg,
3288 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3289 enc_pr(LOG_ERROR,
3290 "avc flush cache error, wq: %p.\n", (void *)wq);
3291 return -1;
3292 }
3293 buf_start = getbuffer(wq, addr_info[0]);
3294 dma_flush(buf_start + addr_info[1],
3295 addr_info[2] - addr_info[1]);
3296 break;
3297 case AMVENC_AVC_IOC_FLUSH_DMA:
3298 if (copy_from_user(addr_info, (void *)arg,
3299 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3300 enc_pr(LOG_ERROR,
3301 "avc flush dma error, wq:%p.\n", (void *)wq);
3302 return -1;
3303 }
3304 buf_start = getbuffer(wq, addr_info[0]);
3305 cache_flush(buf_start + addr_info[1],
3306 addr_info[2] - addr_info[1]);
3307 break;
3308 case AMVENC_AVC_IOC_GET_BUFFINFO:
3309 put_user(wq->mem.buf_size, (u32 *)arg);
3310 break;
3311 case AMVENC_AVC_IOC_GET_DEVINFO:
3312 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
3313 /* send the same id as GXTVBB to upper*/
3314 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3315 strlen(AMVENC_DEVINFO_GXTVBB));
3316 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) {
3317 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3318 strlen(AMVENC_DEVINFO_GXTVBB));
3319 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) {
3320 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXBB,
3321 strlen(AMVENC_DEVINFO_GXBB));
3322 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_MG9TV) {
3323 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_G9,
3324 strlen(AMVENC_DEVINFO_G9));
3325 } else {
3326 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_M8,
3327 strlen(AMVENC_DEVINFO_M8));
3328 }
3329 break;
3330 case AMVENC_AVC_IOC_SUBMIT:
3331 get_user(amrisc_cmd, ((u32 *)arg));
3332 if (amrisc_cmd == ENCODER_IDR) {
3333 wq->pic.idr_pic_id++;
3334 if (wq->pic.idr_pic_id > 65535)
3335 wq->pic.idr_pic_id = 0;
3336 wq->pic.pic_order_cnt_lsb = 2;
3337 wq->pic.frame_number = 1;
3338 } else if (amrisc_cmd == ENCODER_NON_IDR) {
3339#ifdef H264_ENC_SVC
3340 /* only update when there is reference frame */
3341 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3342 wq->pic.frame_number++;
3343 enc_pr(LOG_INFO, "Increase frame_num to %d\n",
3344 wq->pic.frame_number);
3345 }
3346#else
3347 wq->pic.frame_number++;
3348#endif
3349
3350 wq->pic.pic_order_cnt_lsb += 2;
3351 if (wq->pic.frame_number > 65535)
3352 wq->pic.frame_number = 0;
3353 }
3354#ifdef H264_ENC_SVC
3355 /* only update when there is reference frame */
3356 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3357 amrisc_cmd = wq->mem.dblk_buf_canvas;
3358 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3359 /* current dblk buffer as next reference buffer */
3360 wq->mem.ref_buf_canvas = amrisc_cmd;
3361 enc_pr(LOG_INFO,
3362 "switch buffer enable %d cnt %d\n",
3363 wq->pic.enable_svc, wq->pic.non_ref_cnt);
3364 }
3365 if (wq->pic.enable_svc) {
3366 wq->pic.non_ref_cnt ++;
3367 if (wq->pic.non_ref_cnt > wq->pic.non_ref_limit) {
3368 enc_pr(LOG_INFO, "Svc clear cnt %d conf %d\n",
3369 wq->pic.non_ref_cnt,
3370 wq->pic.non_ref_limit);
3371 wq->pic.non_ref_cnt = 0;
3372 } else
3373 enc_pr(LOG_INFO,"Svc increase non ref counter to %d\n",
3374 wq->pic.non_ref_cnt );
3375 }
3376#else
3377 amrisc_cmd = wq->mem.dblk_buf_canvas;
3378 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3379 /* current dblk buffer as next reference buffer */
3380 wq->mem.ref_buf_canvas = amrisc_cmd;
3381#endif
3382 break;
3383 case AMVENC_AVC_IOC_READ_CANVAS:
3384 get_user(argV, ((u32 *)arg));
3385 canvas = argV;
3386 if (canvas & 0xff) {
3387 canvas_read(canvas & 0xff, &dst);
3388 addr_info[0] = dst.addr;
3389 if ((canvas & 0xff00) >> 8)
3390 canvas_read((canvas & 0xff00) >> 8, &dst);
3391 if ((canvas & 0xff0000) >> 16)
3392 canvas_read((canvas & 0xff0000) >> 16, &dst);
3393 addr_info[1] = dst.addr - addr_info[0] +
3394 dst.width * dst.height;
3395 } else {
3396 addr_info[0] = 0;
3397 addr_info[1] = 0;
3398 }
3399 dma_flush(dst.addr, dst.width * dst.height * 3 / 2);
3400 r = copy_to_user((u32 *)arg, addr_info, 2 * sizeof(u32));
3401 break;
3402 case AMVENC_AVC_IOC_MAX_INSTANCE:
3403 put_user(encode_manager.max_instance, (u32 *)arg);
3404 break;
3405 case AMVENC_AVC_IOC_QP_MODE:
3406 get_user(qp_mode, ((u32 *)arg));
3407 pr_info("qp_mode %d\n", qp_mode);
3408 break;
3409 default:
3410 r = -1;
3411 break;
3412 }
3413 return r;
3414}
3415
3416#ifdef CONFIG_COMPAT
3417static long amvenc_avc_compat_ioctl(struct file *filp,
3418 unsigned int cmd, unsigned long args)
3419{
3420 unsigned long ret;
3421
3422 args = (unsigned long)compat_ptr(args);
3423 ret = amvenc_avc_ioctl(filp, cmd, args);
3424 return ret;
3425}
3426#endif
3427
3428static s32 avc_mmap(struct file *filp, struct vm_area_struct *vma)
3429{
3430 struct encode_wq_s *wq = (struct encode_wq_s *)filp->private_data;
3431 ulong off = vma->vm_pgoff << PAGE_SHIFT;
3432 ulong vma_size = vma->vm_end - vma->vm_start;
3433
3434 if (vma_size == 0) {
3435 enc_pr(LOG_ERROR, "vma_size is 0, wq:%p.\n", (void *)wq);
3436 return -EAGAIN;
3437 }
3438 if (!off)
3439 off += wq->mem.buf_start;
3440 enc_pr(LOG_ALL,
3441 "vma_size is %ld , off is %ld, wq:%p.\n",
3442 vma_size, off, (void *)wq);
3443 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
3444 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
3445 if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
3446 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
3447 enc_pr(LOG_ERROR,
3448 "set_cached: failed remap_pfn_range, wq:%p.\n",
3449 (void *)wq);
3450 return -EAGAIN;
3451 }
3452 return 0;
3453}
3454
3455static u32 amvenc_avc_poll(struct file *file, poll_table *wait_table)
3456{
3457 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3458
3459 poll_wait(file, &wq->request_complete, wait_table);
3460
3461 if (atomic_read(&wq->request_ready)) {
3462 atomic_dec(&wq->request_ready);
3463 return POLLIN | POLLRDNORM;
3464 }
3465 return 0;
3466}
3467
3468static const struct file_operations amvenc_avc_fops = {
3469 .owner = THIS_MODULE,
3470 .open = amvenc_avc_open,
3471 .mmap = avc_mmap,
3472 .release = amvenc_avc_release,
3473 .unlocked_ioctl = amvenc_avc_ioctl,
3474#ifdef CONFIG_COMPAT
3475 .compat_ioctl = amvenc_avc_compat_ioctl,
3476#endif
3477 .poll = amvenc_avc_poll,
3478};
3479
3480/* work queue function */
3481static s32 encode_process_request(struct encode_manager_s *manager,
3482 struct encode_queue_item_s *pitem)
3483{
3484 s32 ret = 0;
3485 struct encode_wq_s *wq = pitem->request.parent;
3486 struct encode_request_s *request = &pitem->request;
3487 u32 timeout = (request->timeout == 0) ?
3488 1 : msecs_to_jiffies(request->timeout);
3489 u32 buf_start = 0;
3490 u32 size = 0;
3491 u32 flush_size = ((wq->pic.encoder_width + 31) >> 5 << 5) *
3492 ((wq->pic.encoder_height + 15) >> 4 << 4) * 3 / 2;
3493
3494 struct enc_dma_cfg *cfg = NULL;
3495 int i = 0;
3496
3497#ifdef H264_ENC_CBR
3498 if (request->cmd == ENCODER_IDR || request->cmd == ENCODER_NON_IDR) {
3499 if (request->flush_flag & AMVENC_FLUSH_FLAG_CBR
3500 && get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3501 void *vaddr = wq->mem.cbr_info_ddr_virt_addr;
3502 ConvertTable2Risc(vaddr, 0xa00);
3503 buf_start = getbuffer(wq, ENCODER_BUFFER_CBR);
3504 codec_mm_dma_flush(vaddr, wq->mem.cbr_info_ddr_size, DMA_TO_DEVICE);
3505 }
3506 }
3507#endif
3508
3509Again:
3510 amvenc_avc_start_cmd(wq, request);
3511
3512 if (no_timeout) {
3513 wait_event_interruptible(manager->event.hw_complete,
3514 (manager->encode_hw_status == ENCODER_IDR_DONE
3515 || manager->encode_hw_status == ENCODER_NON_IDR_DONE
3516 || manager->encode_hw_status == ENCODER_SEQUENCE_DONE
3517 || manager->encode_hw_status == ENCODER_PICTURE_DONE));
3518 } else {
3519 wait_event_interruptible_timeout(manager->event.hw_complete,
3520 ((manager->encode_hw_status == ENCODER_IDR_DONE)
3521 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
3522 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
3523 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)),
3524 timeout);
3525 }
3526
3527 if ((request->cmd == ENCODER_SEQUENCE) &&
3528 (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)) {
3529 wq->sps_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3530 wq->hw_status = manager->encode_hw_status;
3531 request->cmd = ENCODER_PICTURE;
3532 goto Again;
3533 } else if ((request->cmd == ENCODER_PICTURE) &&
3534 (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
3535 wq->pps_size =
3536 READ_HREG(HCODEC_VLC_TOTAL_BYTES) - wq->sps_size;
3537 wq->hw_status = manager->encode_hw_status;
3538 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3539 buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT);
3540 cache_flush(buf_start,
3541 wq->sps_size + wq->pps_size);
3542 }
3543 wq->output_size = (wq->sps_size << 16) | wq->pps_size;
3544 } else {
3545 wq->hw_status = manager->encode_hw_status;
3546 if ((manager->encode_hw_status == ENCODER_IDR_DONE) ||
3547 (manager->encode_hw_status == ENCODER_NON_IDR_DONE)) {
3548 wq->output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3549 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3550 buf_start = getbuffer(wq,
3551 ENCODER_BUFFER_OUTPUT);
3552 cache_flush(buf_start, wq->output_size);
3553 }
3554 if (request->flush_flag &
3555 AMVENC_FLUSH_FLAG_DUMP) {
3556 buf_start = getbuffer(wq,
3557 ENCODER_BUFFER_DUMP);
3558 size = wq->mem.dump_info_ddr_size;
3559 cache_flush(buf_start, size);
3560 //enc_pr(LOG_DEBUG, "CBR flush dump_info done");
3561 }
3562 if (request->flush_flag &
3563 AMVENC_FLUSH_FLAG_REFERENCE) {
3564 u32 ref_id = ENCODER_BUFFER_REF0;
3565
3566 if ((wq->mem.ref_buf_canvas & 0xff) ==
3567 (ENC_CANVAS_OFFSET))
3568 ref_id = ENCODER_BUFFER_REF0;
3569 else
3570 ref_id = ENCODER_BUFFER_REF1;
3571 buf_start = getbuffer(wq, ref_id);
3572 cache_flush(buf_start, flush_size);
3573 }
3574 } else {
3575 manager->encode_hw_status = ENCODER_ERROR;
3576 enc_pr(LOG_DEBUG, "avc encode light reset --- ");
3577 enc_pr(LOG_DEBUG,
3578 "frame type: %s, size: %dx%d, wq: %p\n",
3579 (request->cmd == ENCODER_IDR) ? "IDR" : "P",
3580 wq->pic.encoder_width,
3581 wq->pic.encoder_height, (void *)wq);
3582 enc_pr(LOG_DEBUG,
3583 "mb info: 0x%x, encode status: 0x%x, dct status: 0x%x ",
3584 READ_HREG(HCODEC_VLC_MB_INFO),
3585 READ_HREG(ENCODER_STATUS),
3586 READ_HREG(HCODEC_QDCT_STATUS_CTRL));
3587 enc_pr(LOG_DEBUG,
3588 "vlc status: 0x%x, me status: 0x%x, risc pc:0x%x, debug:0x%x\n",
3589 READ_HREG(HCODEC_VLC_STATUS_CTRL),
3590 READ_HREG(HCODEC_ME_STATUS),
3591 READ_HREG(HCODEC_MPC_E),
3592 READ_HREG(DEBUG_REG));
3593 amvenc_avc_light_reset(wq, 30);
3594 }
3595 for (i = 0; i < request->plane_num; i++) {
3596 cfg = &request->dma_cfg[i];
3597 enc_pr(LOG_INFO, "request vaddr %p, paddr %p\n",
3598 cfg->vaddr, cfg->paddr);
3599 if (cfg->fd >= 0 && cfg->vaddr != NULL)
3600 enc_dma_buf_unmap(cfg);
3601 }
3602 }
3603 atomic_inc(&wq->request_ready);
3604 wake_up_interruptible(&wq->request_complete);
3605 return ret;
3606}
3607
3608s32 encode_wq_add_request(struct encode_wq_s *wq)
3609{
3610 struct encode_queue_item_s *pitem = NULL;
3611 struct list_head *head = NULL;
3612 struct encode_wq_s *tmp = NULL;
3613 bool find = false;
3614
3615 spin_lock(&encode_manager.event.sem_lock);
3616
3617 head = &encode_manager.wq;
3618 list_for_each_entry(tmp, head, list) {
3619 if ((wq == tmp) && (wq != NULL)) {
3620 find = true;
3621 break;
3622 }
3623 }
3624
3625 if (find == false) {
3626 enc_pr(LOG_ERROR, "current wq (%p) doesn't register.\n",
3627 (void *)wq);
3628 goto error;
3629 }
3630
3631 if (list_empty(&encode_manager.free_queue)) {
3632 enc_pr(LOG_ERROR, "work queue no space, wq:%p.\n",
3633 (void *)wq);
3634 goto error;
3635 }
3636
3637 pitem = list_entry(encode_manager.free_queue.next,
3638 struct encode_queue_item_s, list);
3639 if (IS_ERR(pitem))
3640 goto error;
3641
3642 memcpy(&pitem->request, &wq->request, sizeof(struct encode_request_s));
3643
3644 enc_pr(LOG_INFO, "new work request %p, vaddr %p, paddr %p\n", &pitem->request,
3645 pitem->request.dma_cfg[0].vaddr,pitem->request.dma_cfg[0].paddr);
3646
3647 memset(&wq->request, 0, sizeof(struct encode_request_s));
3648 wq->request.dma_cfg[0].fd = -1;
3649 wq->request.dma_cfg[1].fd = -1;
3650 wq->request.dma_cfg[2].fd = -1;
3651 wq->hw_status = 0;
3652 wq->output_size = 0;
3653 pitem->request.parent = wq;
3654 list_move_tail(&pitem->list, &encode_manager.process_queue);
3655 spin_unlock(&encode_manager.event.sem_lock);
3656
3657 enc_pr(LOG_INFO,
3658 "add new work ok, cmd:%d, ucode mode: %d, wq:%p.\n",
3659 pitem->request.cmd, pitem->request.ucode_mode,
3660 (void *)wq);
3661 complete(&encode_manager.event.request_in_com);/* new cmd come in */
3662 return 0;
3663error:
3664 spin_unlock(&encode_manager.event.sem_lock);
3665 return -1;
3666}
3667
3668struct encode_wq_s *create_encode_work_queue(void)
3669{
3670 struct encode_wq_s *encode_work_queue = NULL;
3671 bool done = false;
3672 u32 i, max_instance;
3673 struct Buff_s *reserve_buff;
3674
3675 encode_work_queue = kzalloc(sizeof(struct encode_wq_s), GFP_KERNEL);
3676 if (IS_ERR(encode_work_queue)) {
3677 enc_pr(LOG_ERROR, "can't create work queue\n");
3678 return NULL;
3679 }
3680 max_instance = encode_manager.max_instance;
3681 encode_work_queue->pic.init_qppicture = 26;
3682 encode_work_queue->pic.log2_max_frame_num = 4;
3683 encode_work_queue->pic.log2_max_pic_order_cnt_lsb = 4;
3684 encode_work_queue->pic.idr_pic_id = 0;
3685 encode_work_queue->pic.frame_number = 0;
3686 encode_work_queue->pic.pic_order_cnt_lsb = 0;
3687#ifdef H264_ENC_SVC
3688 /* Get settings from the global*/
3689 encode_work_queue->pic.enable_svc = svc_enable;
3690 encode_work_queue->pic.non_ref_limit = svc_ref_conf;
3691 encode_work_queue->pic.non_ref_cnt = 0;
3692 enc_pr(LOG_INFO, "svc conf enable %d, duration %d\n",
3693 encode_work_queue->pic.enable_svc,
3694 encode_work_queue->pic.non_ref_limit);
3695#endif
3696 encode_work_queue->ucode_index = UCODE_MODE_FULL;
3697
3698#ifdef H264_ENC_CBR
3699 encode_work_queue->cbr_info.block_w = 16;
3700 encode_work_queue->cbr_info.block_h = 9;
3701 encode_work_queue->cbr_info.long_th = CBR_LONG_THRESH;
3702 encode_work_queue->cbr_info.start_tbl_id = START_TABLE_ID;
3703 encode_work_queue->cbr_info.short_shift = CBR_SHORT_SHIFT;
3704 encode_work_queue->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
3705#endif
3706 init_waitqueue_head(&encode_work_queue->request_complete);
3707 atomic_set(&encode_work_queue->request_ready, 0);
3708 spin_lock(&encode_manager.event.sem_lock);
3709 if (encode_manager.wq_count < encode_manager.max_instance) {
3710 list_add_tail(&encode_work_queue->list, &encode_manager.wq);
3711 encode_manager.wq_count++;
3712 if (encode_manager.use_reserve == true) {
3713 for (i = 0; i < max_instance; i++) {
3714 reserve_buff = &encode_manager.reserve_buff[i];
3715 if (reserve_buff->used == false) {
3716 encode_work_queue->mem.buf_start =
3717 reserve_buff->buf_start;
3718 encode_work_queue->mem.buf_size =
3719 reserve_buff->buf_size;
3720 reserve_buff->used = true;
3721 done = true;
3722 break;
3723 }
3724 }
3725 } else
3726 done = true;
3727 }
3728 spin_unlock(&encode_manager.event.sem_lock);
3729 if (done == false) {
3730 kfree(encode_work_queue);
3731 encode_work_queue = NULL;
3732 enc_pr(LOG_ERROR, "too many work queue!\n");
3733 }
3734 return encode_work_queue; /* find it */
3735}
3736
3737static void _destroy_encode_work_queue(struct encode_manager_s *manager,
3738 struct encode_wq_s **wq,
3739 struct encode_wq_s *encode_work_queue,
3740 bool *find)
3741{
3742 struct list_head *head;
3743 struct encode_wq_s *wp_tmp = NULL;
3744 u32 i, max_instance;
3745 struct Buff_s *reserve_buff;
3746 u32 buf_start = encode_work_queue->mem.buf_start;
3747
3748 max_instance = manager->max_instance;
3749 head = &manager->wq;
3750 list_for_each_entry_safe((*wq), wp_tmp, head, list) {
3751 if ((*wq) && (*wq == encode_work_queue)) {
3752 list_del(&(*wq)->list);
3753 if (manager->use_reserve == true) {
3754 for (i = 0; i < max_instance; i++) {
3755 reserve_buff =
3756 &manager->reserve_buff[i];
3757 if (reserve_buff->used == true &&
3758 buf_start ==
3759 reserve_buff->buf_start) {
3760 reserve_buff->used = false;
3761 break;
3762 }
3763 }
3764 }
3765 *find = true;
3766 manager->wq_count--;
3767 enc_pr(LOG_DEBUG,
3768 "remove encode_work_queue %p success, %s line %d.\n",
3769 (void *)encode_work_queue,
3770 __func__, __LINE__);
3771 break;
3772 }
3773 }
3774}
3775
3776s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue)
3777{
3778 struct encode_queue_item_s *pitem, *tmp;
3779 struct encode_wq_s *wq = NULL;
3780 bool find = false;
3781
3782 struct list_head *head;
3783
3784 if (encode_work_queue) {
3785 spin_lock(&encode_manager.event.sem_lock);
3786 if (encode_manager.current_wq == encode_work_queue) {
3787 encode_manager.remove_flag = true;
3788 spin_unlock(&encode_manager.event.sem_lock);
3789 enc_pr(LOG_DEBUG,
3790 "warning--Destroy the running queue, should not be here.\n");
3791 wait_for_completion(
3792 &encode_manager.event.process_complete);
3793 spin_lock(&encode_manager.event.sem_lock);
3794 } /* else we can delete it safely. */
3795
3796 head = &encode_manager.process_queue;
3797 list_for_each_entry_safe(pitem, tmp, head, list) {
3798 if (pitem && pitem->request.parent ==
3799 encode_work_queue) {
3800 pitem->request.parent = NULL;
3801 enc_pr(LOG_DEBUG,
3802 "warning--remove not process request, should not be here.\n");
3803 list_move_tail(&pitem->list,
3804 &encode_manager.free_queue);
3805 }
3806 }
3807
3808 _destroy_encode_work_queue(&encode_manager, &wq,
3809 encode_work_queue, &find);
3810 spin_unlock(&encode_manager.event.sem_lock);
3811#ifdef CONFIG_CMA
3812 if (encode_work_queue->mem.buf_start) {
3813 if (wq->mem.cbr_info_ddr_virt_addr != NULL) {
3814 codec_mm_unmap_phyaddr(wq->mem.cbr_info_ddr_virt_addr);
3815 wq->mem.cbr_info_ddr_virt_addr = NULL;
3816 }
3817 codec_mm_free_for_dma(
3818 ENCODE_NAME,
3819 encode_work_queue->mem.buf_start);
3820 encode_work_queue->mem.buf_start = 0;
3821
3822 }
3823#endif
3824 kfree(encode_work_queue);
3825 complete(&encode_manager.event.request_in_com);
3826 }
3827 return 0;
3828}
3829
3830static s32 encode_monitor_thread(void *data)
3831{
3832 struct encode_manager_s *manager = (struct encode_manager_s *)data;
3833 struct encode_queue_item_s *pitem = NULL;
3834 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
3835 s32 ret = 0;
3836
3837 enc_pr(LOG_DEBUG, "encode workqueue monitor start.\n");
3838 sched_setscheduler(current, SCHED_FIFO, &param);
3839 allow_signal(SIGTERM);
3840 /* setup current_wq here. */
3841 while (manager->process_queue_state != ENCODE_PROCESS_QUEUE_STOP) {
3842 if (kthread_should_stop())
3843 break;
3844
3845 ret = wait_for_completion_interruptible(
3846 &manager->event.request_in_com);
3847
3848 if (ret == -ERESTARTSYS)
3849 break;
3850
3851 if (kthread_should_stop())
3852 break;
3853 if (manager->inited == false) {
3854 spin_lock(&manager->event.sem_lock);
3855 if (!list_empty(&manager->wq)) {
3856 struct encode_wq_s *first_wq =
3857 list_entry(manager->wq.next,
3858 struct encode_wq_s, list);
3859 manager->current_wq = first_wq;
3860 spin_unlock(&manager->event.sem_lock);
3861 if (first_wq) {
3862#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3863 if (!manager->context)
3864 manager->context =
3865 create_ge2d_work_queue();
3866#endif
3867 avc_init(first_wq);
3868 manager->inited = true;
3869 }
3870 spin_lock(&manager->event.sem_lock);
3871 manager->current_wq = NULL;
3872 spin_unlock(&manager->event.sem_lock);
3873 if (manager->remove_flag) {
3874 complete(
3875 &manager
3876 ->event.process_complete);
3877 manager->remove_flag = false;
3878 }
3879 } else
3880 spin_unlock(&manager->event.sem_lock);
3881 continue;
3882 }
3883
3884 spin_lock(&manager->event.sem_lock);
3885 pitem = NULL;
3886 if (list_empty(&manager->wq)) {
3887 spin_unlock(&manager->event.sem_lock);
3888 manager->inited = false;
3889 amvenc_avc_stop();
3890#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3891 if (manager->context) {
3892 destroy_ge2d_work_queue(manager->context);
3893 manager->context = NULL;
3894 }
3895#endif
3896 enc_pr(LOG_DEBUG, "power off encode.\n");
3897 continue;
3898 } else if (!list_empty(&manager->process_queue)) {
3899 pitem = list_entry(manager->process_queue.next,
3900 struct encode_queue_item_s, list);
3901 list_del(&pitem->list);
3902 manager->current_item = pitem;
3903 manager->current_wq = pitem->request.parent;
3904 }
3905 spin_unlock(&manager->event.sem_lock);
3906
3907 if (pitem) {
3908 encode_process_request(manager, pitem);
3909 spin_lock(&manager->event.sem_lock);
3910 list_add_tail(&pitem->list, &manager->free_queue);
3911 manager->current_item = NULL;
3912 manager->last_wq = manager->current_wq;
3913 manager->current_wq = NULL;
3914 spin_unlock(&manager->event.sem_lock);
3915 }
3916 if (manager->remove_flag) {
3917 complete(&manager->event.process_complete);
3918 manager->remove_flag = false;
3919 }
3920 }
3921 while (!kthread_should_stop())
3922 msleep(20);
3923
3924 enc_pr(LOG_DEBUG, "exit encode_monitor_thread.\n");
3925 return 0;
3926}
3927
3928static s32 encode_start_monitor(void)
3929{
3930 s32 ret = 0;
3931
3932 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3933 y_tnr_mot2alp_nrm_gain = 216;
3934 y_tnr_mot2alp_dis_gain = 144;
3935 c_tnr_mot2alp_nrm_gain = 216;
3936 c_tnr_mot2alp_dis_gain = 144;
3937 } else {
3938 /* more tnr */
3939 y_tnr_mot2alp_nrm_gain = 144;
3940 y_tnr_mot2alp_dis_gain = 96;
3941 c_tnr_mot2alp_nrm_gain = 144;
3942 c_tnr_mot2alp_dis_gain = 96;
3943 }
3944
3945 enc_pr(LOG_DEBUG, "encode start monitor.\n");
3946 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_START;
3947 encode_manager.encode_thread = kthread_run(encode_monitor_thread,
3948 &encode_manager, "encode_monitor");
3949 if (IS_ERR(encode_manager.encode_thread)) {
3950 ret = PTR_ERR(encode_manager.encode_thread);
3951 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3952 enc_pr(LOG_ERROR,
3953 "encode monitor : failed to start kthread (%d)\n", ret);
3954 }
3955 return ret;
3956}
3957
3958static s32 encode_stop_monitor(void)
3959{
3960 enc_pr(LOG_DEBUG, "stop encode monitor thread\n");
3961 if (encode_manager.encode_thread) {
3962 spin_lock(&encode_manager.event.sem_lock);
3963 if (!list_empty(&encode_manager.wq)) {
3964 u32 count = encode_manager.wq_count;
3965
3966 spin_unlock(&encode_manager.event.sem_lock);
3967 enc_pr(LOG_ERROR,
3968 "stop encode monitor thread error, active wq (%d) is not 0.\n",
3969 count);
3970 return -1;
3971 }
3972 spin_unlock(&encode_manager.event.sem_lock);
3973 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3974 send_sig(SIGTERM, encode_manager.encode_thread, 1);
3975 complete(&encode_manager.event.request_in_com);
3976 kthread_stop(encode_manager.encode_thread);
3977 encode_manager.encode_thread = NULL;
3978 kfree(mc_addr);
3979 mc_addr = NULL;
3980 }
3981 return 0;
3982}
3983
3984static s32 encode_wq_init(void)
3985{
3986 u32 i = 0;
3987 struct encode_queue_item_s *pitem = NULL;
3988
3989 enc_pr(LOG_DEBUG, "encode_wq_init.\n");
3990 encode_manager.irq_requested = false;
3991
3992 spin_lock_init(&encode_manager.event.sem_lock);
3993 init_completion(&encode_manager.event.request_in_com);
3994 init_waitqueue_head(&encode_manager.event.hw_complete);
3995 init_completion(&encode_manager.event.process_complete);
3996 INIT_LIST_HEAD(&encode_manager.process_queue);
3997 INIT_LIST_HEAD(&encode_manager.free_queue);
3998 INIT_LIST_HEAD(&encode_manager.wq);
3999
4000 tasklet_init(&encode_manager.encode_tasklet,
4001 encode_isr_tasklet,
4002 (ulong)&encode_manager);
4003
4004 for (i = 0; i < MAX_ENCODE_REQUEST; i++) {
4005 pitem = kcalloc(1,
4006 sizeof(struct encode_queue_item_s),
4007 GFP_KERNEL);
4008 if (IS_ERR(pitem)) {
4009 enc_pr(LOG_ERROR, "can't request queue item memory.\n");
4010 return -1;
4011 }
4012 pitem->request.parent = NULL;
4013 list_add_tail(&pitem->list, &encode_manager.free_queue);
4014 }
4015 encode_manager.current_wq = NULL;
4016 encode_manager.last_wq = NULL;
4017 encode_manager.encode_thread = NULL;
4018 encode_manager.current_item = NULL;
4019 encode_manager.wq_count = 0;
4020 encode_manager.remove_flag = false;
4021 InitEncodeWeight();
4022 if (encode_start_monitor()) {
4023 enc_pr(LOG_ERROR, "encode create thread error.\n");
4024 return -1;
4025 }
4026 return 0;
4027}
4028
4029static s32 encode_wq_uninit(void)
4030{
4031 struct encode_queue_item_s *pitem, *tmp;
4032 struct list_head *head;
4033 u32 count = 0;
4034 s32 r = -1;
4035
4036 enc_pr(LOG_DEBUG, "uninit encode wq.\n");
4037 if (encode_stop_monitor() == 0) {
4038 if ((encode_manager.irq_num >= 0) &&
4039 (encode_manager.irq_requested == true)) {
4040 free_irq(encode_manager.irq_num, &encode_manager);
4041 encode_manager.irq_requested = false;
4042 }
4043 spin_lock(&encode_manager.event.sem_lock);
4044 head = &encode_manager.process_queue;
4045 list_for_each_entry_safe(pitem, tmp, head, list) {
4046 if (pitem) {
4047 list_del(&pitem->list);
4048 kfree(pitem);
4049 count++;
4050 }
4051 }
4052 head = &encode_manager.free_queue;
4053 list_for_each_entry_safe(pitem, tmp, head, list) {
4054 if (pitem) {
4055 list_del(&pitem->list);
4056 kfree(pitem);
4057 count++;
4058 }
4059 }
4060 spin_unlock(&encode_manager.event.sem_lock);
4061 if (count == MAX_ENCODE_REQUEST)
4062 r = 0;
4063 else {
4064 enc_pr(LOG_ERROR, "lost some request item %d.\n",
4065 MAX_ENCODE_REQUEST - count);
4066 }
4067 }
4068 return r;
4069}
4070
4071static ssize_t encode_status_show(struct class *cla,
4072 struct class_attribute *attr, char *buf)
4073{
4074 u32 process_count = 0;
4075 u32 free_count = 0;
4076 struct encode_queue_item_s *pitem = NULL;
4077 struct encode_wq_s *current_wq = NULL;
4078 struct encode_wq_s *last_wq = NULL;
4079 struct list_head *head = NULL;
4080 s32 irq_num = 0;
4081 u32 hw_status = 0;
4082 u32 process_queue_state = 0;
4083 u32 wq_count = 0;
4084 u32 ucode_index;
4085 bool need_reset;
4086 bool process_irq;
4087 bool inited;
4088 bool use_reserve;
4089 struct Buff_s reserve_mem;
4090 u32 max_instance;
4091#ifdef CONFIG_CMA
4092 bool check_cma = false;
4093#endif
4094
4095 spin_lock(&encode_manager.event.sem_lock);
4096 head = &encode_manager.free_queue;
4097 list_for_each_entry(pitem, head, list) {
4098 free_count++;
4099 if (free_count > MAX_ENCODE_REQUEST)
4100 break;
4101 }
4102
4103 head = &encode_manager.process_queue;
4104 list_for_each_entry(pitem, head, list) {
4105 process_count++;
4106 if (free_count > MAX_ENCODE_REQUEST)
4107 break;
4108 }
4109
4110 current_wq = encode_manager.current_wq;
4111 last_wq = encode_manager.last_wq;
4112 pitem = encode_manager.current_item;
4113 irq_num = encode_manager.irq_num;
4114 hw_status = encode_manager.encode_hw_status;
4115 process_queue_state = encode_manager.process_queue_state;
4116 wq_count = encode_manager.wq_count;
4117 ucode_index = encode_manager.ucode_index;
4118 need_reset = encode_manager.need_reset;
4119 process_irq = encode_manager.process_irq;
4120 inited = encode_manager.inited;
4121 use_reserve = encode_manager.use_reserve;
4122 reserve_mem.buf_start = encode_manager.reserve_mem.buf_start;
4123 reserve_mem.buf_size = encode_manager.reserve_mem.buf_size;
4124
4125 max_instance = encode_manager.max_instance;
4126#ifdef CONFIG_CMA
4127 check_cma = encode_manager.check_cma;
4128#endif
4129
4130 spin_unlock(&encode_manager.event.sem_lock);
4131
4132 enc_pr(LOG_DEBUG,
4133 "encode process queue count: %d, free queue count: %d.\n",
4134 process_count, free_count);
4135 enc_pr(LOG_DEBUG,
4136 "encode curent wq: %p, last wq: %p, wq count: %d, max_instance: %d.\n",
4137 current_wq, last_wq, wq_count, max_instance);
4138 if (current_wq)
4139 enc_pr(LOG_DEBUG,
4140 "encode curent wq -- encode width: %d, encode height: %d.\n",
4141 current_wq->pic.encoder_width,
4142 current_wq->pic.encoder_height);
4143 enc_pr(LOG_DEBUG,
4144 "encode curent pitem: %p, ucode_index: %d, hw_status: %d, need_reset: %s, process_irq: %s.\n",
4145 pitem, ucode_index, hw_status, need_reset ? "true" : "false",
4146 process_irq ? "true" : "false");
4147 enc_pr(LOG_DEBUG,
4148 "encode irq num: %d, inited: %s, process_queue_state: %d.\n",
4149 irq_num, inited ? "true" : "false", process_queue_state);
4150 if (use_reserve) {
4151 enc_pr(LOG_DEBUG,
4152 "encode use reserve memory, buffer start: 0x%x, size: %d MB.\n",
4153 reserve_mem.buf_start,
4154 reserve_mem.buf_size / SZ_1M);
4155 } else {
4156#ifdef CONFIG_CMA
4157 enc_pr(LOG_DEBUG, "encode check cma: %s.\n",
4158 check_cma ? "true" : "false");
4159#endif
4160 }
4161 return snprintf(buf, 40, "encode max instance: %d\n", max_instance);
4162}
4163
4164static struct class_attribute amvenc_class_attrs[] = {
4165 __ATTR(encode_status,
4166 S_IRUGO | S_IWUSR,
4167 encode_status_show,
4168 NULL),
4169 __ATTR_NULL
4170};
4171
4172static struct class amvenc_avc_class = {
4173 .name = CLASS_NAME,
4174 .class_attrs = amvenc_class_attrs,
4175};
4176
4177s32 init_avc_device(void)
4178{
4179 s32 r = 0;
4180
4181 r = register_chrdev(0, DEVICE_NAME, &amvenc_avc_fops);
4182 if (r <= 0) {
4183 enc_pr(LOG_ERROR, "register amvenc_avc device error.\n");
4184 return r;
4185 }
4186 avc_device_major = r;
4187
4188 r = class_register(&amvenc_avc_class);
4189 if (r < 0) {
4190 enc_pr(LOG_ERROR, "error create amvenc_avc class.\n");
4191 return r;
4192 }
4193
4194 amvenc_avc_dev = device_create(&amvenc_avc_class, NULL,
4195 MKDEV(avc_device_major, 0), NULL,
4196 DEVICE_NAME);
4197
4198 if (IS_ERR(amvenc_avc_dev)) {
4199 enc_pr(LOG_ERROR, "create amvenc_avc device error.\n");
4200 class_unregister(&amvenc_avc_class);
4201 return -1;
4202 }
4203 return r;
4204}
4205
4206s32 uninit_avc_device(void)
4207{
4208 if (amvenc_avc_dev)
4209 device_destroy(&amvenc_avc_class, MKDEV(avc_device_major, 0));
4210
4211 class_destroy(&amvenc_avc_class);
4212
4213 unregister_chrdev(avc_device_major, DEVICE_NAME);
4214 return 0;
4215}
4216
4217static s32 avc_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4218{
4219 s32 r;
4220 struct resource res;
4221
4222 if (!rmem) {
4223 enc_pr(LOG_ERROR,
4224 "Can not obtain I/O memory, and will allocate avc buffer!\n");
4225 r = -EFAULT;
4226 return r;
4227 }
4228 res.start = (phys_addr_t)rmem->base;
4229 res.end = res.start + (phys_addr_t)rmem->size - 1;
4230 encode_manager.reserve_mem.buf_start = res.start;
4231 encode_manager.reserve_mem.buf_size = res.end - res.start + 1;
4232
4233 if (encode_manager.reserve_mem.buf_size >=
4234 amvenc_buffspec[0].min_buffsize) {
4235 encode_manager.max_instance =
4236 encode_manager.reserve_mem.buf_size /
4237 amvenc_buffspec[0].min_buffsize;
4238 if (encode_manager.max_instance > MAX_ENCODE_INSTANCE)
4239 encode_manager.max_instance = MAX_ENCODE_INSTANCE;
4240 encode_manager.reserve_buff = kzalloc(
4241 encode_manager.max_instance *
4242 sizeof(struct Buff_s), GFP_KERNEL);
4243 if (encode_manager.reserve_buff) {
4244 u32 i;
4245 struct Buff_s *reserve_buff;
4246 u32 max_instance = encode_manager.max_instance;
4247
4248 for (i = 0; i < max_instance; i++) {
4249 reserve_buff = &encode_manager.reserve_buff[i];
4250 reserve_buff->buf_start =
4251 i *
4252 amvenc_buffspec[0]
4253 .min_buffsize +
4254 encode_manager.reserve_mem.buf_start;
4255 reserve_buff->buf_size =
4256 encode_manager.reserve_mem.buf_start;
4257 reserve_buff->used = false;
4258 }
4259 encode_manager.use_reserve = true;
4260 r = 0;
4261 enc_pr(LOG_DEBUG,
4262 "amvenc_avc use reserve memory, buff start: 0x%x, size: 0x%x, max instance is %d\n",
4263 encode_manager.reserve_mem.buf_start,
4264 encode_manager.reserve_mem.buf_size,
4265 encode_manager.max_instance);
4266 } else {
4267 enc_pr(LOG_ERROR,
4268 "amvenc_avc alloc reserve buffer pointer fail. max instance is %d.\n",
4269 encode_manager.max_instance);
4270 encode_manager.max_instance = 0;
4271 encode_manager.reserve_mem.buf_start = 0;
4272 encode_manager.reserve_mem.buf_size = 0;
4273 r = -ENOMEM;
4274 }
4275 } else {
4276 enc_pr(LOG_ERROR,
4277 "amvenc_avc memory resource too small, size is 0x%x. Need 0x%x bytes at least.\n",
4278 encode_manager.reserve_mem.buf_size,
4279 amvenc_buffspec[0]
4280 .min_buffsize);
4281 encode_manager.reserve_mem.buf_start = 0;
4282 encode_manager.reserve_mem.buf_size = 0;
4283 r = -ENOMEM;
4284 }
4285 return r;
4286}
4287
4288static s32 amvenc_avc_probe(struct platform_device *pdev)
4289{
4290 /* struct resource mem; */
4291 s32 res_irq;
4292 s32 idx;
4293 s32 r;
4294
4295 enc_pr(LOG_INFO, "amvenc_avc probe start.\n");
4296
4297 encode_manager.this_pdev = pdev;
4298#ifdef CONFIG_CMA
4299 encode_manager.check_cma = false;
4300#endif
4301 encode_manager.reserve_mem.buf_start = 0;
4302 encode_manager.reserve_mem.buf_size = 0;
4303 encode_manager.use_reserve = false;
4304 encode_manager.max_instance = 0;
4305 encode_manager.reserve_buff = NULL;
4306
4307 idx = of_reserved_mem_device_init(&pdev->dev);
4308 if (idx != 0) {
4309 enc_pr(LOG_DEBUG,
4310 "amvenc_avc_probe -- reserved memory config fail.\n");
4311 }
4312
4313 if (encode_manager.use_reserve == false) {
4314#ifndef CONFIG_CMA
4315 enc_pr(LOG_ERROR,
4316 "amvenc_avc memory is invaild, probe fail!\n");
4317 return -EFAULT;
4318#else
4319 encode_manager.cma_pool_size =
4320 (codec_mm_get_total_size() > (MIN_SIZE * 3)) ?
4321 (MIN_SIZE * 3) : codec_mm_get_total_size();
4322 enc_pr(LOG_DEBUG,
4323 "amvenc_avc - cma memory pool size: %d MB\n",
4324 (u32)encode_manager.cma_pool_size / SZ_1M);
4325#endif
4326 }
4327
4328 res_irq = platform_get_irq(pdev, 0);
4329 if (res_irq < 0) {
4330 enc_pr(LOG_ERROR, "[%s] get irq error!", __func__);
4331 return -EINVAL;
4332 }
4333
4334 encode_manager.irq_num = res_irq;
4335 if (encode_wq_init()) {
4336 kfree(encode_manager.reserve_buff);
4337 encode_manager.reserve_buff = NULL;
4338 enc_pr(LOG_ERROR, "encode work queue init error.\n");
4339 return -EFAULT;
4340 }
4341
4342 r = init_avc_device();
4343 enc_pr(LOG_INFO, "amvenc_avc probe end.\n");
4344 return r;
4345}
4346
4347static s32 amvenc_avc_remove(struct platform_device *pdev)
4348{
4349 kfree(encode_manager.reserve_buff);
4350 encode_manager.reserve_buff = NULL;
4351 if (encode_wq_uninit())
4352 enc_pr(LOG_ERROR, "encode work queue uninit error.\n");
4353 uninit_avc_device();
4354 enc_pr(LOG_INFO, "amvenc_avc remove.\n");
4355 return 0;
4356}
4357
4358static const struct of_device_id amlogic_avcenc_dt_match[] = {
4359 {
4360 .compatible = "amlogic, amvenc_avc",
4361 },
4362 {},
4363};
4364
4365static struct platform_driver amvenc_avc_driver = {
4366 .probe = amvenc_avc_probe,
4367 .remove = amvenc_avc_remove,
4368 .driver = {
4369 .name = DRIVER_NAME,
4370 .of_match_table = amlogic_avcenc_dt_match,
4371 }
4372};
4373
4374static struct codec_profile_t amvenc_avc_profile = {
4375 .name = "avc",
4376 .profile = ""
4377};
4378
4379static s32 __init amvenc_avc_driver_init_module(void)
4380{
4381 enc_pr(LOG_INFO, "amvenc_avc module init\n");
4382
4383 if (platform_driver_register(&amvenc_avc_driver)) {
4384 enc_pr(LOG_ERROR,
4385 "failed to register amvenc_avc driver\n");
4386 return -ENODEV;
4387 }
4388 vcodec_profile_register(&amvenc_avc_profile);
4389 return 0;
4390}
4391
4392static void __exit amvenc_avc_driver_remove_module(void)
4393{
4394 enc_pr(LOG_INFO, "amvenc_avc module remove.\n");
4395
4396 platform_driver_unregister(&amvenc_avc_driver);
4397}
4398
4399static const struct reserved_mem_ops rmem_avc_ops = {
4400 .device_init = avc_mem_device_init,
4401};
4402
4403static s32 __init avc_mem_setup(struct reserved_mem *rmem)
4404{
4405 rmem->ops = &rmem_avc_ops;
4406 enc_pr(LOG_DEBUG, "amvenc_avc reserved mem setup.\n");
4407 return 0;
4408}
4409
4410static int enc_dma_buf_map(struct enc_dma_cfg *cfg)
4411{
4412 long ret = -1;
4413 int fd = -1;
4414 struct dma_buf *dbuf = NULL;
4415 struct dma_buf_attachment *d_att = NULL;
4416 struct sg_table *sg = NULL;
4417 void *vaddr = NULL;
4418 struct device *dev = NULL;
4419 enum dma_data_direction dir;
4420
4421 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
4422 enc_pr(LOG_ERROR, "error input param\n");
4423 return -EINVAL;
4424 }
4425 enc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd);
4426
4427 fd = cfg->fd;
4428 dev = cfg->dev;
4429 dir = cfg->dir;
4430 enc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd);
4431
4432 dbuf = dma_buf_get(fd);
4433 if (dbuf == NULL) {
4434 enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd);
4435 return -EINVAL;
4436 }
4437
4438 d_att = dma_buf_attach(dbuf, dev);
4439 if (d_att == NULL) {
4440 enc_pr(LOG_ERROR, "failed to set dma attach\n");
4441 goto attach_err;
4442 }
4443
4444 sg = dma_buf_map_attachment(d_att, dir);
4445 if (sg == NULL) {
4446 enc_pr(LOG_ERROR, "failed to get dma sg\n");
4447 goto map_attach_err;
4448 }
4449
4450 ret = dma_buf_begin_cpu_access(dbuf, dir);
4451 if (ret != 0) {
4452 enc_pr(LOG_ERROR, "failed to access dma buff\n");
4453 goto access_err;
4454 }
4455
4456 vaddr = dma_buf_vmap(dbuf);
4457 if (vaddr == NULL) {
4458 enc_pr(LOG_ERROR, "failed to vmap dma buf\n");
4459 goto vmap_err;
4460 }
4461 cfg->dbuf = dbuf;
4462 cfg->attach = d_att;
4463 cfg->vaddr = vaddr;
4464 cfg->sg = sg;
4465
4466 return ret;
4467
4468vmap_err:
4469 dma_buf_end_cpu_access(dbuf, dir);
4470
4471access_err:
4472 dma_buf_unmap_attachment(d_att, sg, dir);
4473
4474map_attach_err:
4475 dma_buf_detach(dbuf, d_att);
4476
4477attach_err:
4478 dma_buf_put(dbuf);
4479
4480 return ret;
4481}
4482
4483static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr)
4484{
4485 struct sg_table *sg_table;
4486 struct page *page;
4487 int ret;
4488 enc_pr(LOG_INFO, "enc_dma_buf_get_phys in\n");
4489
4490 ret = enc_dma_buf_map(cfg);
4491 if (ret < 0) {
4492 enc_pr(LOG_ERROR, "gdc_dma_buf_map failed\n");
4493 return ret;
4494 }
4495 if (cfg->sg) {
4496 sg_table = cfg->sg;
4497 page = sg_page(sg_table->sgl);
4498 *addr = PFN_PHYS(page_to_pfn(page));
4499 ret = 0;
4500 }
4501 enc_pr(LOG_INFO, "enc_dma_buf_get_phys 0x%lx\n", *addr);
4502 return ret;
4503}
4504
4505static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg)
4506{
4507 int fd = -1;
4508 struct dma_buf *dbuf = NULL;
4509 struct dma_buf_attachment *d_att = NULL;
4510 struct sg_table *sg = NULL;
4511 void *vaddr = NULL;
4512 struct device *dev = NULL;
4513 enum dma_data_direction dir;
4514
4515 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL
4516 || cfg->dbuf == NULL || cfg->vaddr == NULL
4517 || cfg->attach == NULL || cfg->sg == NULL) {
4518 enc_pr(LOG_ERROR, "Error input param\n");
4519 return;
4520 }
4521
4522 fd = cfg->fd;
4523 dev = cfg->dev;
4524 dir = cfg->dir;
4525 dbuf = cfg->dbuf;
4526 vaddr = cfg->vaddr;
4527 d_att = cfg->attach;
4528 sg = cfg->sg;
4529
4530 dma_buf_vunmap(dbuf, vaddr);
4531
4532 dma_buf_end_cpu_access(dbuf, dir);
4533
4534 dma_buf_unmap_attachment(d_att, sg, dir);
4535
4536 dma_buf_detach(dbuf, d_att);
4537
4538 dma_buf_put(dbuf);
4539 enc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr);
4540}
4541
4542
4543module_param(fixed_slice_cfg, uint, 0664);
4544MODULE_PARM_DESC(fixed_slice_cfg, "\n fixed_slice_cfg\n");
4545
4546module_param(clock_level, uint, 0664);
4547MODULE_PARM_DESC(clock_level, "\n clock_level\n");
4548
4549module_param(encode_print_level, uint, 0664);
4550MODULE_PARM_DESC(encode_print_level, "\n encode_print_level\n");
4551
4552module_param(no_timeout, uint, 0664);
4553MODULE_PARM_DESC(no_timeout, "\n no_timeout flag for process request\n");
4554
4555module_param(nr_mode, int, 0664);
4556MODULE_PARM_DESC(nr_mode, "\n nr_mode option\n");
4557
4558module_param(qp_table_debug, uint, 0664);
4559MODULE_PARM_DESC(qp_table_debug, "\n print qp table\n");
4560
4561#ifdef H264_ENC_SVC
4562module_param(svc_enable, uint, 0664);
4563MODULE_PARM_DESC(svc_enable, "\n svc enable\n");
4564module_param(svc_ref_conf, uint, 0664);
4565MODULE_PARM_DESC(svc_ref_conf, "\n svc reference duration config\n");
4566#endif
4567
4568#ifdef MORE_MODULE_PARAM
4569module_param(me_mv_merge_ctl, uint, 0664);
4570MODULE_PARM_DESC(me_mv_merge_ctl, "\n me_mv_merge_ctl\n");
4571
4572module_param(me_step0_close_mv, uint, 0664);
4573MODULE_PARM_DESC(me_step0_close_mv, "\n me_step0_close_mv\n");
4574
4575module_param(me_f_skip_sad, uint, 0664);
4576MODULE_PARM_DESC(me_f_skip_sad, "\n me_f_skip_sad\n");
4577
4578module_param(me_f_skip_weight, uint, 0664);
4579MODULE_PARM_DESC(me_f_skip_weight, "\n me_f_skip_weight\n");
4580
4581module_param(me_mv_weight_01, uint, 0664);
4582MODULE_PARM_DESC(me_mv_weight_01, "\n me_mv_weight_01\n");
4583
4584module_param(me_mv_weight_23, uint, 0664);
4585MODULE_PARM_DESC(me_mv_weight_23, "\n me_mv_weight_23\n");
4586
4587module_param(me_sad_range_inc, uint, 0664);
4588MODULE_PARM_DESC(me_sad_range_inc, "\n me_sad_range_inc\n");
4589
4590module_param(me_sad_enough_01, uint, 0664);
4591MODULE_PARM_DESC(me_sad_enough_01, "\n me_sad_enough_01\n");
4592
4593module_param(me_sad_enough_23, uint, 0664);
4594MODULE_PARM_DESC(me_sad_enough_23, "\n me_sad_enough_23\n");
4595
4596module_param(y_tnr_mc_en, uint, 0664);
4597MODULE_PARM_DESC(y_tnr_mc_en, "\n y_tnr_mc_en option\n");
4598module_param(y_tnr_txt_mode, uint, 0664);
4599MODULE_PARM_DESC(y_tnr_txt_mode, "\n y_tnr_txt_mode option\n");
4600module_param(y_tnr_mot_sad_margin, uint, 0664);
4601MODULE_PARM_DESC(y_tnr_mot_sad_margin, "\n y_tnr_mot_sad_margin option\n");
4602module_param(y_tnr_mot_cortxt_rate, uint, 0664);
4603MODULE_PARM_DESC(y_tnr_mot_cortxt_rate, "\n y_tnr_mot_cortxt_rate option\n");
4604module_param(y_tnr_mot_distxt_ofst, uint, 0664);
4605MODULE_PARM_DESC(y_tnr_mot_distxt_ofst, "\n y_tnr_mot_distxt_ofst option\n");
4606module_param(y_tnr_mot_distxt_rate, uint, 0664);
4607MODULE_PARM_DESC(y_tnr_mot_distxt_rate, "\n y_tnr_mot_distxt_rate option\n");
4608module_param(y_tnr_mot_dismot_ofst, uint, 0664);
4609MODULE_PARM_DESC(y_tnr_mot_dismot_ofst, "\n y_tnr_mot_dismot_ofst option\n");
4610module_param(y_tnr_mot_frcsad_lock, uint, 0664);
4611MODULE_PARM_DESC(y_tnr_mot_frcsad_lock, "\n y_tnr_mot_frcsad_lock option\n");
4612module_param(y_tnr_mot2alp_frc_gain, uint, 0664);
4613MODULE_PARM_DESC(y_tnr_mot2alp_frc_gain, "\n y_tnr_mot2alp_frc_gain option\n");
4614module_param(y_tnr_mot2alp_nrm_gain, uint, 0664);
4615MODULE_PARM_DESC(y_tnr_mot2alp_nrm_gain, "\n y_tnr_mot2alp_nrm_gain option\n");
4616module_param(y_tnr_mot2alp_dis_gain, uint, 0664);
4617MODULE_PARM_DESC(y_tnr_mot2alp_dis_gain, "\n y_tnr_mot2alp_dis_gain option\n");
4618module_param(y_tnr_mot2alp_dis_ofst, uint, 0664);
4619MODULE_PARM_DESC(y_tnr_mot2alp_dis_ofst, "\n y_tnr_mot2alp_dis_ofst option\n");
4620module_param(y_tnr_alpha_min, uint, 0664);
4621MODULE_PARM_DESC(y_tnr_alpha_min, "\n y_tnr_alpha_min option\n");
4622module_param(y_tnr_alpha_max, uint, 0664);
4623MODULE_PARM_DESC(y_tnr_alpha_max, "\n y_tnr_alpha_max option\n");
4624module_param(y_tnr_deghost_os, uint, 0664);
4625MODULE_PARM_DESC(y_tnr_deghost_os, "\n y_tnr_deghost_os option\n");
4626
4627module_param(c_tnr_mc_en, uint, 0664);
4628MODULE_PARM_DESC(c_tnr_mc_en, "\n c_tnr_mc_en option\n");
4629module_param(c_tnr_txt_mode, uint, 0664);
4630MODULE_PARM_DESC(c_tnr_txt_mode, "\n c_tnr_txt_mode option\n");
4631module_param(c_tnr_mot_sad_margin, uint, 0664);
4632MODULE_PARM_DESC(c_tnr_mot_sad_margin, "\n c_tnr_mot_sad_margin option\n");
4633module_param(c_tnr_mot_cortxt_rate, uint, 0664);
4634MODULE_PARM_DESC(c_tnr_mot_cortxt_rate, "\n c_tnr_mot_cortxt_rate option\n");
4635module_param(c_tnr_mot_distxt_ofst, uint, 0664);
4636MODULE_PARM_DESC(c_tnr_mot_distxt_ofst, "\n c_tnr_mot_distxt_ofst option\n");
4637module_param(c_tnr_mot_distxt_rate, uint, 0664);
4638MODULE_PARM_DESC(c_tnr_mot_distxt_rate, "\n c_tnr_mot_distxt_rate option\n");
4639module_param(c_tnr_mot_dismot_ofst, uint, 0664);
4640MODULE_PARM_DESC(c_tnr_mot_dismot_ofst, "\n c_tnr_mot_dismot_ofst option\n");
4641module_param(c_tnr_mot_frcsad_lock, uint, 0664);
4642MODULE_PARM_DESC(c_tnr_mot_frcsad_lock, "\n c_tnr_mot_frcsad_lock option\n");
4643module_param(c_tnr_mot2alp_frc_gain, uint, 0664);
4644MODULE_PARM_DESC(c_tnr_mot2alp_frc_gain, "\n c_tnr_mot2alp_frc_gain option\n");
4645module_param(c_tnr_mot2alp_nrm_gain, uint, 0664);
4646MODULE_PARM_DESC(c_tnr_mot2alp_nrm_gain, "\n c_tnr_mot2alp_nrm_gain option\n");
4647module_param(c_tnr_mot2alp_dis_gain, uint, 0664);
4648MODULE_PARM_DESC(c_tnr_mot2alp_dis_gain, "\n c_tnr_mot2alp_dis_gain option\n");
4649module_param(c_tnr_mot2alp_dis_ofst, uint, 0664);
4650MODULE_PARM_DESC(c_tnr_mot2alp_dis_ofst, "\n c_tnr_mot2alp_dis_ofst option\n");
4651module_param(c_tnr_alpha_min, uint, 0664);
4652MODULE_PARM_DESC(c_tnr_alpha_min, "\n c_tnr_alpha_min option\n");
4653module_param(c_tnr_alpha_max, uint, 0664);
4654MODULE_PARM_DESC(c_tnr_alpha_max, "\n c_tnr_alpha_max option\n");
4655module_param(c_tnr_deghost_os, uint, 0664);
4656MODULE_PARM_DESC(c_tnr_deghost_os, "\n c_tnr_deghost_os option\n");
4657
4658module_param(y_snr_err_norm, uint, 0664);
4659MODULE_PARM_DESC(y_snr_err_norm, "\n y_snr_err_norm option\n");
4660module_param(y_snr_gau_bld_core, uint, 0664);
4661MODULE_PARM_DESC(y_snr_gau_bld_core, "\n y_snr_gau_bld_core option\n");
4662module_param(y_snr_gau_bld_ofst, int, 0664);
4663MODULE_PARM_DESC(y_snr_gau_bld_ofst, "\n y_snr_gau_bld_ofst option\n");
4664module_param(y_snr_gau_bld_rate, uint, 0664);
4665MODULE_PARM_DESC(y_snr_gau_bld_rate, "\n y_snr_gau_bld_rate option\n");
4666module_param(y_snr_gau_alp0_min, uint, 0664);
4667MODULE_PARM_DESC(y_snr_gau_alp0_min, "\n y_snr_gau_alp0_min option\n");
4668module_param(y_snr_gau_alp0_max, uint, 0664);
4669MODULE_PARM_DESC(y_snr_gau_alp0_max, "\n y_snr_gau_alp0_max option\n");
4670module_param(y_bld_beta2alp_rate, uint, 0664);
4671MODULE_PARM_DESC(y_bld_beta2alp_rate, "\n y_bld_beta2alp_rate option\n");
4672module_param(y_bld_beta_min, uint, 0664);
4673MODULE_PARM_DESC(y_bld_beta_min, "\n y_bld_beta_min option\n");
4674module_param(y_bld_beta_max, uint, 0664);
4675MODULE_PARM_DESC(y_bld_beta_max, "\n y_bld_beta_max option\n");
4676
4677module_param(c_snr_err_norm, uint, 0664);
4678MODULE_PARM_DESC(c_snr_err_norm, "\n c_snr_err_norm option\n");
4679module_param(c_snr_gau_bld_core, uint, 0664);
4680MODULE_PARM_DESC(c_snr_gau_bld_core, "\n c_snr_gau_bld_core option\n");
4681module_param(c_snr_gau_bld_ofst, int, 0664);
4682MODULE_PARM_DESC(c_snr_gau_bld_ofst, "\n c_snr_gau_bld_ofst option\n");
4683module_param(c_snr_gau_bld_rate, uint, 0664);
4684MODULE_PARM_DESC(c_snr_gau_bld_rate, "\n c_snr_gau_bld_rate option\n");
4685module_param(c_snr_gau_alp0_min, uint, 0664);
4686MODULE_PARM_DESC(c_snr_gau_alp0_min, "\n c_snr_gau_alp0_min option\n");
4687module_param(c_snr_gau_alp0_max, uint, 0664);
4688MODULE_PARM_DESC(c_snr_gau_alp0_max, "\n c_snr_gau_alp0_max option\n");
4689module_param(c_bld_beta2alp_rate, uint, 0664);
4690MODULE_PARM_DESC(c_bld_beta2alp_rate, "\n c_bld_beta2alp_rate option\n");
4691module_param(c_bld_beta_min, uint, 0664);
4692MODULE_PARM_DESC(c_bld_beta_min, "\n c_bld_beta_min option\n");
4693module_param(c_bld_beta_max, uint, 0664);
4694MODULE_PARM_DESC(c_bld_beta_max, "\n c_bld_beta_max option\n");
4695#endif
4696
4697module_init(amvenc_avc_driver_init_module);
4698module_exit(amvenc_avc_driver_remove_module);
4699RESERVEDMEM_OF_DECLARE(amvenc_avc, "amlogic, amvenc-memory", avc_mem_setup);
4700
4701MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver");
4702MODULE_LICENSE("GPL");
4703MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
4704