summaryrefslogtreecommitdiff
path: root/drivers/frame_sink/encoder/h264/encoder.c (plain)
blob: 358866dc35307c5ada3b8363dd6ca322c1d3fffb
1/*
2 * drivers/amlogic/amports/encoder.c
3 *
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16*/
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/timer.h>
23#include <linux/fs.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spinlock.h>
29#include <linux/ctype.h>
30#include <linux/amlogic/media/frame_sync/ptsserv.h>
31#include <linux/amlogic/media/utils/amstream.h>
32#include <linux/amlogic/media/canvas/canvas.h>
33#include <linux/amlogic/media/canvas/canvas_mgr.h>
34#include <linux/amlogic/media/codec_mm/codec_mm.h>
35
36#include <linux/amlogic/media/utils/vdec_reg.h>
37#include "../../../frame_provider/decoder/utils/vdec.h"
38#include <linux/delay.h>
39#include <linux/poll.h>
40#include <linux/of.h>
41#include <linux/of_fdt.h>
42#include <linux/dma-contiguous.h>
43#include <linux/kthread.h>
44#include <linux/sched/rt.h>
45#include <linux/amlogic/media/utils/amports_config.h>
46#include "encoder.h"
47#include "../../../frame_provider/decoder/utils/amvdec.h"
48#include <linux/amlogic/media/utils/amlog.h>
49#include "../../../stream_input/amports/amports_priv.h"
50#include "../../../frame_provider/decoder/utils/firmware.h"
51#include <linux/of_reserved_mem.h>
52
53
54#ifdef CONFIG_AM_JPEG_ENCODER
55#include "jpegenc.h"
56#endif
57
58#define ENCODE_NAME "encoder"
59#define AMVENC_CANVAS_INDEX 0xE4
60#define AMVENC_CANVAS_MAX_INDEX 0xEF
61
62#define MIN_SIZE amvenc_buffspec[0].min_buffsize
63#define DUMP_INFO_BYTES_PER_MB 80
64
65#define ADJUSTED_QP_FLAG 64
66
67static s32 avc_device_major;
68static struct device *amvenc_avc_dev;
69#define DRIVER_NAME "amvenc_avc"
70#define CLASS_NAME "amvenc_avc"
71#define DEVICE_NAME "amvenc_avc"
72
73static struct encode_manager_s encode_manager;
74
75#define MULTI_SLICE_MC
76#define H264_ENC_CBR
77/* #define MORE_MODULE_PARAM */
78
79#define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX
80
81#define UCODE_MODE_FULL 0
82
83/* #define ENABLE_IGNORE_FUNCTION */
84
85static u32 ie_me_mb_type;
86static u32 ie_me_mode;
87static u32 ie_pippeline_block = 3;
88static u32 ie_cur_ref_sel;
89/* static u32 avc_endian = 6; */
90static u32 clock_level = 5;
91
92static u32 encode_print_level = LOG_DEBUG;
93static u32 no_timeout;
94static int nr_mode = -1;
95static u32 qp_table_debug;
96
97#ifdef H264_ENC_SVC
98static u32 svc_enable = 0; /* Enable sac feature or not */
99static u32 svc_ref_conf = 0; /* Continuous no reference numbers */
100#endif
101
102static u32 me_mv_merge_ctl =
103 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
104 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
105 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
106 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
107 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
108 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
109 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
110 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
111 /* [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged */
112 (0x12 << 18) |
113 /* [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged */
114 (0x2b << 12) |
115 /* [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV */
116 (0x80 << 0);
117 /* ( 0x4 << 18) |
118 * // [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged
119 */
120 /* ( 0x3f << 12) |
121 * // [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged
122 */
123 /* ( 0xc0 << 0);
124 * // [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV
125 */
126
127static u32 me_mv_weight_01 = (0x40 << 24) | (0x30 << 16) | (0x20 << 8) | 0x30;
128static u32 me_mv_weight_23 = (0x40 << 8) | 0x30;
129static u32 me_sad_range_inc = 0x03030303;
130static u32 me_step0_close_mv = 0x003ffc21;
131static u32 me_f_skip_sad;
132static u32 me_f_skip_weight;
133static u32 me_sad_enough_01;/* 0x00018010; */
134static u32 me_sad_enough_23;/* 0x00000020; */
135
136/* [31:0] NUM_ROWS_PER_SLICE_P */
137/* [15:0] NUM_ROWS_PER_SLICE_I */
138static u32 fixed_slice_cfg;
139
140/* y tnr */
141static unsigned int y_tnr_mc_en = 1;
142static unsigned int y_tnr_txt_mode;
143static unsigned int y_tnr_mot_sad_margin = 1;
144static unsigned int y_tnr_mot_cortxt_rate = 1;
145static unsigned int y_tnr_mot_distxt_ofst = 5;
146static unsigned int y_tnr_mot_distxt_rate = 4;
147static unsigned int y_tnr_mot_dismot_ofst = 4;
148static unsigned int y_tnr_mot_frcsad_lock = 8;
149static unsigned int y_tnr_mot2alp_frc_gain = 10;
150static unsigned int y_tnr_mot2alp_nrm_gain = 216;
151static unsigned int y_tnr_mot2alp_dis_gain = 128;
152static unsigned int y_tnr_mot2alp_dis_ofst = 32;
153static unsigned int y_tnr_alpha_min = 32;
154static unsigned int y_tnr_alpha_max = 63;
155static unsigned int y_tnr_deghost_os;
156/* c tnr */
157static unsigned int c_tnr_mc_en = 1;
158static unsigned int c_tnr_txt_mode;
159static unsigned int c_tnr_mot_sad_margin = 1;
160static unsigned int c_tnr_mot_cortxt_rate = 1;
161static unsigned int c_tnr_mot_distxt_ofst = 5;
162static unsigned int c_tnr_mot_distxt_rate = 4;
163static unsigned int c_tnr_mot_dismot_ofst = 4;
164static unsigned int c_tnr_mot_frcsad_lock = 8;
165static unsigned int c_tnr_mot2alp_frc_gain = 10;
166static unsigned int c_tnr_mot2alp_nrm_gain = 216;
167static unsigned int c_tnr_mot2alp_dis_gain = 128;
168static unsigned int c_tnr_mot2alp_dis_ofst = 32;
169static unsigned int c_tnr_alpha_min = 32;
170static unsigned int c_tnr_alpha_max = 63;
171static unsigned int c_tnr_deghost_os;
172/* y snr */
173static unsigned int y_snr_err_norm = 1;
174static unsigned int y_snr_gau_bld_core = 1;
175static int y_snr_gau_bld_ofst = -1;
176static unsigned int y_snr_gau_bld_rate = 48;
177static unsigned int y_snr_gau_alp0_min;
178static unsigned int y_snr_gau_alp0_max = 63;
179static unsigned int y_bld_beta2alp_rate = 16;
180static unsigned int y_bld_beta_min;
181static unsigned int y_bld_beta_max = 63;
182/* c snr */
183static unsigned int c_snr_err_norm = 1;
184static unsigned int c_snr_gau_bld_core = 1;
185static int c_snr_gau_bld_ofst = -1;
186static unsigned int c_snr_gau_bld_rate = 48;
187static unsigned int c_snr_gau_alp0_min;
188static unsigned int c_snr_gau_alp0_max = 63;
189static unsigned int c_bld_beta2alp_rate = 16;
190static unsigned int c_bld_beta_min;
191static unsigned int c_bld_beta_max = 63;
192static unsigned int qp_mode;
193
194static DEFINE_SPINLOCK(lock);
195
196#define ADV_MV_LARGE_16x8 1
197#define ADV_MV_LARGE_8x16 1
198#define ADV_MV_LARGE_16x16 1
199
200/* me weight offset should not very small, it used by v1 me module. */
201/* the min real sad for me is 16 by hardware. */
202#define ME_WEIGHT_OFFSET 0x520
203#define I4MB_WEIGHT_OFFSET 0x655
204#define I16MB_WEIGHT_OFFSET 0x560
205
206#define ADV_MV_16x16_WEIGHT 0x080
207#define ADV_MV_16_8_WEIGHT 0x0e0
208#define ADV_MV_8x8_WEIGHT 0x240
209#define ADV_MV_4x4x4_WEIGHT 0x3000
210
211#define IE_SAD_SHIFT_I16 0x001
212#define IE_SAD_SHIFT_I4 0x001
213#define ME_SAD_SHIFT_INTER 0x001
214
215#define STEP_2_SKIP_SAD 0
216#define STEP_1_SKIP_SAD 0
217#define STEP_0_SKIP_SAD 0
218#define STEP_2_SKIP_WEIGHT 0
219#define STEP_1_SKIP_WEIGHT 0
220#define STEP_0_SKIP_WEIGHT 0
221
222#define ME_SAD_RANGE_0 0x1 /* 0x0 */
223#define ME_SAD_RANGE_1 0x0
224#define ME_SAD_RANGE_2 0x0
225#define ME_SAD_RANGE_3 0x0
226
227/* use 0 for v3, 0x18 for v2 */
228#define ME_MV_PRE_WEIGHT_0 0x18
229/* use 0 for v3, 0x18 for v2 */
230#define ME_MV_PRE_WEIGHT_1 0x18
231#define ME_MV_PRE_WEIGHT_2 0x0
232#define ME_MV_PRE_WEIGHT_3 0x0
233
234/* use 0 for v3, 0x18 for v2 */
235#define ME_MV_STEP_WEIGHT_0 0x18
236/* use 0 for v3, 0x18 for v2 */
237#define ME_MV_STEP_WEIGHT_1 0x18
238#define ME_MV_STEP_WEIGHT_2 0x0
239#define ME_MV_STEP_WEIGHT_3 0x0
240
241#define ME_SAD_ENOUGH_0_DATA 0x00
242#define ME_SAD_ENOUGH_1_DATA 0x04
243#define ME_SAD_ENOUGH_2_DATA 0x11
244#define ADV_MV_8x8_ENOUGH_DATA 0x20
245
246/* V4_COLOR_BLOCK_FIX */
247#define V3_FORCE_SKIP_SAD_0 0x10
248/* 4 Blocks */
249#define V3_FORCE_SKIP_SAD_1 0x60
250/* 16 Blocks + V3_SKIP_WEIGHT_2 */
251#define V3_FORCE_SKIP_SAD_2 0x250
252/* almost disable it -- use t_lac_coeff_2 output to F_ZERO is better */
253#define V3_ME_F_ZERO_SAD (ME_WEIGHT_OFFSET + 0x10)
254
255#define V3_IE_F_ZERO_SAD_I16 (I16MB_WEIGHT_OFFSET + 0x10)
256#define V3_IE_F_ZERO_SAD_I4 (I4MB_WEIGHT_OFFSET + 0x20)
257
258#define V3_SKIP_WEIGHT_0 0x10
259/* 4 Blocks 8 separate search sad can be very low */
260#define V3_SKIP_WEIGHT_1 0x8 /* (4 * ME_MV_STEP_WEIGHT_1 + 0x100) */
261#define V3_SKIP_WEIGHT_2 0x3
262
263#define V3_LEVEL_1_F_SKIP_MAX_SAD 0x0
264#define V3_LEVEL_1_SKIP_MAX_SAD 0x6
265
266#define I4_ipred_weight_most 0x18
267#define I4_ipred_weight_else 0x28
268
269#define C_ipred_weight_V 0x04
270#define C_ipred_weight_H 0x08
271#define C_ipred_weight_DC 0x0c
272
273#define I16_ipred_weight_V 0x04
274#define I16_ipred_weight_H 0x08
275#define I16_ipred_weight_DC 0x0c
276
277/* 0x00 same as disable */
278#define v3_left_small_max_ie_sad 0x00
279#define v3_left_small_max_me_sad 0x40
280
281#define v5_use_small_diff_cnt 0
282#define v5_simple_mb_inter_all_en 1
283#define v5_simple_mb_inter_8x8_en 1
284#define v5_simple_mb_inter_16_8_en 1
285#define v5_simple_mb_inter_16x16_en 1
286#define v5_simple_mb_intra_en 1
287#define v5_simple_mb_C_en 0
288#define v5_simple_mb_Y_en 1
289#define v5_small_diff_Y 0x10
290#define v5_small_diff_C 0x18
291/* shift 8-bits, 2, 1, 0, -1, -2, -3, -4 */
292#define v5_simple_dq_setting 0x43210fed
293#define v5_simple_me_weight_setting 0
294
295#ifdef H264_ENC_CBR
296#define CBR_TABLE_SIZE 0x800
297#define CBR_SHORT_SHIFT 12 /* same as disable */
298#define CBR_LONG_MB_NUM 2
299#define START_TABLE_ID 8
300#define CBR_LONG_THRESH 4
301#endif
302
303static u32 v3_mv_sad[64] = {
304 /* For step0 */
305 0x00000004,
306 0x00010008,
307 0x00020010,
308 0x00030018,
309 0x00040020,
310 0x00050028,
311 0x00060038,
312 0x00070048,
313 0x00080058,
314 0x00090068,
315 0x000a0080,
316 0x000b0098,
317 0x000c00b0,
318 0x000d00c8,
319 0x000e00e8,
320 0x000f0110,
321 /* For step1 */
322 0x00100002,
323 0x00110004,
324 0x00120008,
325 0x0013000c,
326 0x00140010,
327 0x00150014,
328 0x0016001c,
329 0x00170024,
330 0x0018002c,
331 0x00190034,
332 0x001a0044,
333 0x001b0054,
334 0x001c0064,
335 0x001d0074,
336 0x001e0094,
337 0x001f00b4,
338 /* For step2 */
339 0x00200006,
340 0x0021000c,
341 0x0022000c,
342 0x00230018,
343 0x00240018,
344 0x00250018,
345 0x00260018,
346 0x00270030,
347 0x00280030,
348 0x00290030,
349 0x002a0030,
350 0x002b0030,
351 0x002c0030,
352 0x002d0030,
353 0x002e0030,
354 0x002f0050,
355 /* For step2 4x4-8x8 */
356 0x00300001,
357 0x00310002,
358 0x00320002,
359 0x00330004,
360 0x00340004,
361 0x00350004,
362 0x00360004,
363 0x00370006,
364 0x00380006,
365 0x00390006,
366 0x003a0006,
367 0x003b0006,
368 0x003c0006,
369 0x003d0006,
370 0x003e0006,
371 0x003f0006
372};
373
374static struct BuffInfo_s amvenc_buffspec[] = {
375 {
376 .lev_id = 0,
377 .max_width = 1920,
378 .max_height = 1088,
379 .min_buffsize = 0x1400000,
380 .dct = {
381 .buf_start = 0,
382 .buf_size = 0x800000, /* 1920x1088x4 */
383 },
384 .dec0_y = {
385 .buf_start = 0x800000,
386 .buf_size = 0x300000,
387 },
388 .dec1_y = {
389 .buf_start = 0xb00000,
390 .buf_size = 0x300000,
391 },
392 .assit = {
393 .buf_start = 0xe10000,
394 .buf_size = 0xc0000,
395 },
396 .bitstream = {
397 .buf_start = 0xf00000,
398 .buf_size = 0x100000,
399 },
400 .scale_buff = {
401 .buf_start = 0x1000000,
402 .buf_size = 0x300000,
403 },
404 .dump_info = {
405 .buf_start = 0x1300000,
406 .buf_size = 0xa0000, /* (1920x1088/256)x80 */
407 },
408 .cbr_info = {
409 .buf_start = 0x13b0000,
410 .buf_size = 0x2000,
411 }
412 }
413};
414
415enum ucode_type_e {
416 UCODE_GXL,
417 UCODE_TXL,
418 UCODE_G12A,
419 UCODE_MAX
420};
421
422const char *ucode_name[] = {
423 "gxl_h264_enc",
424 "txl_h264_enc_cavlc",
425 "ga_h264_enc_cabac",
426};
427
428static void dma_flush(u32 buf_start, u32 buf_size);
429static void cache_flush(u32 buf_start, u32 buf_size);
430static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr);
431static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg);
432
433static const char *select_ucode(u32 ucode_index)
434{
435 enum ucode_type_e ucode = UCODE_GXL;
436
437 switch (ucode_index) {
438 case UCODE_MODE_FULL:
439 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
440 ucode = UCODE_G12A;
441 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL)
442 ucode = UCODE_TXL;
443 else /* (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) */
444 ucode = UCODE_GXL;
445 break;
446 break;
447 default:
448 break;
449 }
450 return (const char *)ucode_name[ucode];
451}
452
453static void hcodec_prog_qtbl(struct encode_wq_s *wq)
454{
455 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
456 (0 << 23) | /* quant_table_addr */
457 (1 << 22)); /* quant_table_addr_update */
458
459 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
460 wq->quant_tbl_i4[0]);
461 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
462 wq->quant_tbl_i4[1]);
463 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
464 wq->quant_tbl_i4[2]);
465 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
466 wq->quant_tbl_i4[3]);
467 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
468 wq->quant_tbl_i4[4]);
469 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
470 wq->quant_tbl_i4[5]);
471 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
472 wq->quant_tbl_i4[6]);
473 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
474 wq->quant_tbl_i4[7]);
475
476 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
477 (8 << 23) | /* quant_table_addr */
478 (1 << 22)); /* quant_table_addr_update */
479
480 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
481 wq->quant_tbl_i16[0]);
482 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
483 wq->quant_tbl_i16[1]);
484 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
485 wq->quant_tbl_i16[2]);
486 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
487 wq->quant_tbl_i16[3]);
488 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
489 wq->quant_tbl_i16[4]);
490 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
491 wq->quant_tbl_i16[5]);
492 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
493 wq->quant_tbl_i16[6]);
494 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
495 wq->quant_tbl_i16[7]);
496
497 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
498 (16 << 23) | /* quant_table_addr */
499 (1 << 22)); /* quant_table_addr_update */
500
501 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
502 wq->quant_tbl_me[0]);
503 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
504 wq->quant_tbl_me[1]);
505 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
506 wq->quant_tbl_me[2]);
507 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
508 wq->quant_tbl_me[3]);
509 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
510 wq->quant_tbl_me[4]);
511 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
512 wq->quant_tbl_me[5]);
513 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
514 wq->quant_tbl_me[6]);
515 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
516 wq->quant_tbl_me[7]);
517}
518
519static void InitEncodeWeight(void)
520{
521 me_mv_merge_ctl =
522 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
523 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
524 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
525 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
526 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
527 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
528 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
529 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
530 (0x12 << 18) |
531 /* [23:18] me_merge_mv_diff_16 - MV diff
532 * <= n pixel can be merged
533 */
534 (0x2b << 12) |
535 /* [17:12] me_merge_mv_diff_8 - MV diff
536 * <= n pixel can be merged
537 */
538 (0x80 << 0);
539 /* [11:0] me_merge_min_sad - SAD
540 * >= 0x180 can be merged with other MV
541 */
542
543 me_mv_weight_01 = (ME_MV_STEP_WEIGHT_1 << 24) |
544 (ME_MV_PRE_WEIGHT_1 << 16) |
545 (ME_MV_STEP_WEIGHT_0 << 8) |
546 (ME_MV_PRE_WEIGHT_0 << 0);
547
548 me_mv_weight_23 = (ME_MV_STEP_WEIGHT_3 << 24) |
549 (ME_MV_PRE_WEIGHT_3 << 16) |
550 (ME_MV_STEP_WEIGHT_2 << 8) |
551 (ME_MV_PRE_WEIGHT_2 << 0);
552
553 me_sad_range_inc = (ME_SAD_RANGE_3 << 24) |
554 (ME_SAD_RANGE_2 << 16) |
555 (ME_SAD_RANGE_1 << 8) |
556 (ME_SAD_RANGE_0 << 0);
557
558 me_step0_close_mv = (0x100 << 10) |
559 /* me_step0_big_sad -- two MV sad
560 * diff bigger will use use 1
561 */
562 (2 << 5) | /* me_step0_close_mv_y */
563 (2 << 0); /* me_step0_close_mv_x */
564
565 me_f_skip_sad = (0x00 << 24) | /* force_skip_sad_3 */
566 (STEP_2_SKIP_SAD << 16) | /* force_skip_sad_2 */
567 (STEP_1_SKIP_SAD << 8) | /* force_skip_sad_1 */
568 (STEP_0_SKIP_SAD << 0); /* force_skip_sad_0 */
569
570 me_f_skip_weight = (0x00 << 24) | /* force_skip_weight_3 */
571 /* force_skip_weight_2 */
572 (STEP_2_SKIP_WEIGHT << 16) |
573 /* force_skip_weight_1 */
574 (STEP_1_SKIP_WEIGHT << 8) |
575 /* force_skip_weight_0 */
576 (STEP_0_SKIP_WEIGHT << 0);
577
578 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
579 me_f_skip_sad = 0;
580 me_f_skip_weight = 0;
581 me_mv_weight_01 = 0;
582 me_mv_weight_23 = 0;
583 }
584
585 me_sad_enough_01 = (ME_SAD_ENOUGH_1_DATA << 12) |
586 /* me_sad_enough_1 */
587 (ME_SAD_ENOUGH_0_DATA << 0) |
588 /* me_sad_enough_0 */
589 (0 << 12) | /* me_sad_enough_1 */
590 (0 << 0); /* me_sad_enough_0 */
591
592 me_sad_enough_23 = (ADV_MV_8x8_ENOUGH_DATA << 12) |
593 /* adv_mv_8x8_enough */
594 (ME_SAD_ENOUGH_2_DATA << 0) |
595 /* me_sad_enough_2 */
596 (0 << 12) | /* me_sad_enough_3 */
597 (0 << 0); /* me_sad_enough_2 */
598}
599
600/*output stream buffer setting*/
601static void avc_init_output_buffer(struct encode_wq_s *wq)
602{
603 WRITE_HREG(HCODEC_VLC_VB_MEM_CTL,
604 ((1 << 31) | (0x3f << 24) |
605 (0x20 << 16) | (2 << 0)));
606 WRITE_HREG(HCODEC_VLC_VB_START_PTR,
607 wq->mem.BitstreamStart);
608 WRITE_HREG(HCODEC_VLC_VB_WR_PTR,
609 wq->mem.BitstreamStart);
610 WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR,
611 wq->mem.BitstreamStart);
612 WRITE_HREG(HCODEC_VLC_VB_END_PTR,
613 wq->mem.BitstreamEnd);
614 WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1);
615 WRITE_HREG(HCODEC_VLC_VB_CONTROL,
616 ((0 << 14) | (7 << 3) |
617 (1 << 1) | (0 << 0)));
618}
619
620/*input dct buffer setting*/
621static void avc_init_input_buffer(struct encode_wq_s *wq)
622{
623 WRITE_HREG(HCODEC_QDCT_MB_START_PTR,
624 wq->mem.dct_buff_start_addr);
625 WRITE_HREG(HCODEC_QDCT_MB_END_PTR,
626 wq->mem.dct_buff_end_addr);
627 WRITE_HREG(HCODEC_QDCT_MB_WR_PTR,
628 wq->mem.dct_buff_start_addr);
629 WRITE_HREG(HCODEC_QDCT_MB_RD_PTR,
630 wq->mem.dct_buff_start_addr);
631 WRITE_HREG(HCODEC_QDCT_MB_BUFF, 0);
632}
633
634/*input reference buffer setting*/
635static void avc_init_reference_buffer(s32 canvas)
636{
637 WRITE_HREG(HCODEC_ANC0_CANVAS_ADDR, canvas);
638 WRITE_HREG(HCODEC_VLC_HCMD_CONFIG, 0);
639}
640
641static void avc_init_assit_buffer(struct encode_wq_s *wq)
642{
643 WRITE_HREG(MEM_OFFSET_REG, wq->mem.assit_buffer_offset);
644}
645
646/*deblock buffer setting, same as INI_CANVAS*/
647static void avc_init_dblk_buffer(s32 canvas)
648{
649 WRITE_HREG(HCODEC_REC_CANVAS_ADDR, canvas);
650 WRITE_HREG(HCODEC_DBKR_CANVAS_ADDR, canvas);
651 WRITE_HREG(HCODEC_DBKW_CANVAS_ADDR, canvas);
652}
653
654static void avc_init_encoder(struct encode_wq_s *wq, bool idr)
655{
656 WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0);
657 WRITE_HREG(HCODEC_VLC_CONFIG, 0x07);
658 WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0);
659
660 WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15);
661 WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8);
662 WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14);
663
664 WRITE_HREG(IDR_PIC_ID, wq->pic.idr_pic_id);
665 WRITE_HREG(FRAME_NUMBER,
666 (idr == true) ? 0 : wq->pic.frame_number);
667 WRITE_HREG(PIC_ORDER_CNT_LSB,
668 (idr == true) ? 0 : wq->pic.pic_order_cnt_lsb);
669
670 WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB,
671 wq->pic.log2_max_pic_order_cnt_lsb);
672 WRITE_HREG(LOG2_MAX_FRAME_NUM,
673 wq->pic.log2_max_frame_num);
674 WRITE_HREG(ANC0_BUFFER_ID, 0);
675 WRITE_HREG(QPPICTURE, wq->pic.init_qppicture);
676}
677
678static void avc_canvas_init(struct encode_wq_s *wq)
679{
680 u32 canvas_width, canvas_height;
681 u32 start_addr = wq->mem.buf_start;
682
683 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
684 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
685
686 canvas_config(ENC_CANVAS_OFFSET,
687 start_addr + wq->mem.bufspec.dec0_y.buf_start,
688 canvas_width, canvas_height,
689 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
690 canvas_config(1 + ENC_CANVAS_OFFSET,
691 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
692 canvas_width, canvas_height / 2,
693 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
694 /*here the third plane use the same address as the second plane*/
695 canvas_config(2 + ENC_CANVAS_OFFSET,
696 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
697 canvas_width, canvas_height / 2,
698 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
699
700 canvas_config(3 + ENC_CANVAS_OFFSET,
701 start_addr + wq->mem.bufspec.dec1_y.buf_start,
702 canvas_width, canvas_height,
703 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
704 canvas_config(4 + ENC_CANVAS_OFFSET,
705 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
706 canvas_width, canvas_height / 2,
707 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
708 /*here the third plane use the same address as the second plane*/
709 canvas_config(5 + ENC_CANVAS_OFFSET,
710 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
711 canvas_width, canvas_height / 2,
712 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
713}
714
715static void avc_buffspec_init(struct encode_wq_s *wq)
716{
717 u32 canvas_width, canvas_height;
718 u32 start_addr = wq->mem.buf_start;
719 u32 mb_w = (wq->pic.encoder_width + 15) >> 4;
720 u32 mb_h = (wq->pic.encoder_height + 15) >> 4;
721 u32 mbs = mb_w * mb_h;
722
723 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
724 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
725
726 wq->mem.dct_buff_start_addr = start_addr +
727 wq->mem.bufspec.dct.buf_start;
728 wq->mem.dct_buff_end_addr =
729 wq->mem.dct_buff_start_addr +
730 wq->mem.bufspec.dct.buf_size - 1;
731 enc_pr(LOG_INFO, "dct_buff_start_addr is 0x%x, wq:%p.\n",
732 wq->mem.dct_buff_start_addr, (void *)wq);
733
734 wq->mem.bufspec.dec0_uv.buf_start =
735 wq->mem.bufspec.dec0_y.buf_start +
736 canvas_width * canvas_height;
737 wq->mem.bufspec.dec0_uv.buf_size = canvas_width * canvas_height / 2;
738 wq->mem.bufspec.dec1_uv.buf_start =
739 wq->mem.bufspec.dec1_y.buf_start +
740 canvas_width * canvas_height;
741 wq->mem.bufspec.dec1_uv.buf_size = canvas_width * canvas_height / 2;
742 wq->mem.assit_buffer_offset = start_addr +
743 wq->mem.bufspec.assit.buf_start;
744 enc_pr(LOG_INFO, "assit_buffer_offset is 0x%x, wq: %p.\n",
745 wq->mem.assit_buffer_offset, (void *)wq);
746 /*output stream buffer config*/
747 wq->mem.BitstreamStart = start_addr +
748 wq->mem.bufspec.bitstream.buf_start;
749 wq->mem.BitstreamEnd =
750 wq->mem.BitstreamStart +
751 wq->mem.bufspec.bitstream.buf_size - 1;
752 enc_pr(LOG_INFO, "BitstreamStart is 0x%x, wq: %p.\n",
753 wq->mem.BitstreamStart, (void *)wq);
754
755 wq->mem.scaler_buff_start_addr =
756 wq->mem.buf_start + wq->mem.bufspec.scale_buff.buf_start;
757 wq->mem.dump_info_ddr_start_addr =
758 wq->mem.buf_start + wq->mem.bufspec.dump_info.buf_start;
759 enc_pr(LOG_INFO,
760 "CBR: dump_info_ddr_start_addr:%x.\n",
761 wq->mem.dump_info_ddr_start_addr);
762 enc_pr(LOG_INFO, "CBR: buf_start :%d.\n",
763 wq->mem.buf_start);
764 enc_pr(LOG_INFO, "CBR: dump_info.buf_start :%d.\n",
765 wq->mem.bufspec.dump_info.buf_start);
766 wq->mem.dump_info_ddr_size =
767 DUMP_INFO_BYTES_PER_MB * mbs;
768 wq->mem.dump_info_ddr_size =
769 (wq->mem.dump_info_ddr_size + PAGE_SIZE - 1)
770 & ~(PAGE_SIZE - 1);
771 wq->mem.cbr_info_ddr_start_addr =
772 wq->mem.buf_start + wq->mem.bufspec.cbr_info.buf_start;
773 wq->mem.cbr_info_ddr_size =
774 wq->mem.bufspec.cbr_info.buf_size;
775 wq->mem.cbr_info_ddr_virt_addr =
776 codec_mm_vmap(wq->mem.cbr_info_ddr_start_addr,
777 wq->mem.bufspec.cbr_info.buf_size);
778
779 wq->mem.dblk_buf_canvas =
780 ((ENC_CANVAS_OFFSET + 2) << 16) |
781 ((ENC_CANVAS_OFFSET + 1) << 8) |
782 (ENC_CANVAS_OFFSET);
783 wq->mem.ref_buf_canvas =
784 ((ENC_CANVAS_OFFSET + 5) << 16) |
785 ((ENC_CANVAS_OFFSET + 4) << 8) |
786 (ENC_CANVAS_OFFSET + 3);
787}
788
789static void avc_init_ie_me_parameter(struct encode_wq_s *wq, u32 quant)
790{
791 ie_cur_ref_sel = 0;
792 ie_pippeline_block = 12;
793 /* currently disable half and sub pixel */
794 ie_me_mode =
795 (ie_pippeline_block & IE_PIPPELINE_BLOCK_MASK) <<
796 IE_PIPPELINE_BLOCK_SHIFT;
797
798 WRITE_HREG(IE_ME_MODE, ie_me_mode);
799 WRITE_HREG(IE_REF_SEL, ie_cur_ref_sel);
800 WRITE_HREG(IE_ME_MB_TYPE, ie_me_mb_type);
801#ifdef MULTI_SLICE_MC
802 if (fixed_slice_cfg)
803 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
804 else if (wq->pic.rows_per_slice !=
805 (wq->pic.encoder_height + 15) >> 4) {
806 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
807
808 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
809 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
810 } else
811 WRITE_HREG(FIXED_SLICE_CFG, 0);
812#else
813 WRITE_HREG(FIXED_SLICE_CFG, 0);
814#endif
815}
816
817/* for temp */
818#define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1)
819#define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2)
820#define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3)
821#define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4)
822#define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5)
823#define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6)
824#define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7)
825#define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8)
826#define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9)
827#define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa)
828#define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb)
829
830static void mfdin_basic(u32 input, u8 iformat,
831 u8 oformat, u32 picsize_x, u32 picsize_y,
832 u8 r2y_en, u8 nr, u8 ifmt_extra)
833{
834 u8 dsample_en; /* Downsample Enable */
835 u8 interp_en; /* Interpolation Enable */
836 u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */
837 u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */
838 /* mfdin_reg3_canv[25:24];
839 * // bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3
840 */
841 u8 canv_idx0_bppx;
842 /* mfdin_reg3_canv[27:26];
843 * // bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3
844 */
845 u8 canv_idx1_bppx;
846 /* mfdin_reg3_canv[29:28];
847 * // bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3
848 */
849 u8 canv_idx0_bppy;
850 /* mfdin_reg3_canv[31:30];
851 * // bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3
852 */
853 u8 canv_idx1_bppy;
854 u8 ifmt444, ifmt422, ifmt420, linear_bytes4p;
855 u8 nr_enable;
856 u8 cfg_y_snr_en;
857 u8 cfg_y_tnr_en;
858 u8 cfg_c_snr_en;
859 u8 cfg_c_tnr_en;
860 u32 linear_bytesperline;
861 s32 reg_offset;
862 bool linear_enable = false;
863 bool format_err = false;
864
865 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
866 if ((iformat == 7) && (ifmt_extra > 2))
867 format_err = true;
868 } else if (iformat == 7)
869 format_err = true;
870
871 if (format_err) {
872 enc_pr(LOG_ERROR,
873 "mfdin format err, iformat:%d, ifmt_extra:%d\n",
874 iformat, ifmt_extra);
875 return;
876 }
877 if (iformat != 7)
878 ifmt_extra = 0;
879
880 ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) ||
881 (iformat == 9) || (iformat == 12)) ? 1 : 0;
882 if (iformat == 7 && ifmt_extra == 1)
883 ifmt444 = 1;
884 ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0;
885 if (iformat == 7 && ifmt_extra != 1)
886 ifmt422 = 1;
887 ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) ||
888 (iformat == 11)) ? 1 : 0;
889 dsample_en = ((ifmt444 && (oformat != 2)) ||
890 (ifmt422 && (oformat == 0))) ? 1 : 0;
891 interp_en = ((ifmt422 && (oformat == 2)) ||
892 (ifmt420 && (oformat != 0))) ? 1 : 0;
893 y_size = (oformat != 0) ? 1 : 0;
894 if (iformat == 12)
895 y_size = 0;
896 r2y_mode = (r2y_en == 1) ? 1 : 0; /* Fixed to 1 (TODO) */
897 canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1;
898 canv_idx1_bppx = (iformat == 4) ? 0 : 1;
899 canv_idx0_bppy = 1;
900 canv_idx1_bppy = (iformat == 5) ? 1 : 0;
901
902 if ((iformat == 8) || (iformat == 9) || (iformat == 12))
903 linear_bytes4p = 3;
904 else if (iformat == 10)
905 linear_bytes4p = 2;
906 else if (iformat == 11)
907 linear_bytes4p = 1;
908 else
909 linear_bytes4p = 0;
910 if (iformat == 12)
911 linear_bytesperline = picsize_x * 4;
912 else
913 linear_bytesperline = picsize_x * linear_bytes4p;
914
915 if (iformat < 8)
916 linear_enable = false;
917 else
918 linear_enable = true;
919
920 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXBB) {
921 reg_offset = -8;
922 /* nr_mode: 0:Disabled 1:SNR Only 2:TNR Only 3:3DNR */
923 nr_enable = (nr) ? 1 : 0;
924 cfg_y_snr_en = ((nr == 1) || (nr == 3)) ? 1 : 0;
925 cfg_y_tnr_en = ((nr == 2) || (nr == 3)) ? 1 : 0;
926 cfg_c_snr_en = cfg_y_snr_en;
927 /* cfg_c_tnr_en = cfg_y_tnr_en; */
928 cfg_c_tnr_en = 0;
929
930 /* NR For Y */
931 WRITE_HREG((HCODEC_MFDIN_REG0D + reg_offset),
932 ((cfg_y_snr_en << 0) |
933 (y_snr_err_norm << 1) |
934 (y_snr_gau_bld_core << 2) |
935 (((y_snr_gau_bld_ofst) & 0xff) << 6) |
936 (y_snr_gau_bld_rate << 14) |
937 (y_snr_gau_alp0_min << 20) |
938 (y_snr_gau_alp0_max << 26)));
939 WRITE_HREG((HCODEC_MFDIN_REG0E + reg_offset),
940 ((cfg_y_tnr_en << 0) |
941 (y_tnr_mc_en << 1) |
942 (y_tnr_txt_mode << 2) |
943 (y_tnr_mot_sad_margin << 3) |
944 (y_tnr_alpha_min << 7) |
945 (y_tnr_alpha_max << 13) |
946 (y_tnr_deghost_os << 19)));
947 WRITE_HREG((HCODEC_MFDIN_REG0F + reg_offset),
948 ((y_tnr_mot_cortxt_rate << 0) |
949 (y_tnr_mot_distxt_ofst << 8) |
950 (y_tnr_mot_distxt_rate << 4) |
951 (y_tnr_mot_dismot_ofst << 16) |
952 (y_tnr_mot_frcsad_lock << 24)));
953 WRITE_HREG((HCODEC_MFDIN_REG10 + reg_offset),
954 ((y_tnr_mot2alp_frc_gain << 0) |
955 (y_tnr_mot2alp_nrm_gain << 8) |
956 (y_tnr_mot2alp_dis_gain << 16) |
957 (y_tnr_mot2alp_dis_ofst << 24)));
958 WRITE_HREG((HCODEC_MFDIN_REG11 + reg_offset),
959 ((y_bld_beta2alp_rate << 0) |
960 (y_bld_beta_min << 8) |
961 (y_bld_beta_max << 14)));
962
963 /* NR For C */
964 WRITE_HREG((HCODEC_MFDIN_REG12 + reg_offset),
965 ((cfg_y_snr_en << 0) |
966 (c_snr_err_norm << 1) |
967 (c_snr_gau_bld_core << 2) |
968 (((c_snr_gau_bld_ofst) & 0xff) << 6) |
969 (c_snr_gau_bld_rate << 14) |
970 (c_snr_gau_alp0_min << 20) |
971 (c_snr_gau_alp0_max << 26)));
972
973 WRITE_HREG((HCODEC_MFDIN_REG13 + reg_offset),
974 ((cfg_c_tnr_en << 0) |
975 (c_tnr_mc_en << 1) |
976 (c_tnr_txt_mode << 2) |
977 (c_tnr_mot_sad_margin << 3) |
978 (c_tnr_alpha_min << 7) |
979 (c_tnr_alpha_max << 13) |
980 (c_tnr_deghost_os << 19)));
981 WRITE_HREG((HCODEC_MFDIN_REG14 + reg_offset),
982 ((c_tnr_mot_cortxt_rate << 0) |
983 (c_tnr_mot_distxt_ofst << 8) |
984 (c_tnr_mot_distxt_rate << 4) |
985 (c_tnr_mot_dismot_ofst << 16) |
986 (c_tnr_mot_frcsad_lock << 24)));
987 WRITE_HREG((HCODEC_MFDIN_REG15 + reg_offset),
988 ((c_tnr_mot2alp_frc_gain << 0) |
989 (c_tnr_mot2alp_nrm_gain << 8) |
990 (c_tnr_mot2alp_dis_gain << 16) |
991 (c_tnr_mot2alp_dis_ofst << 24)));
992
993 WRITE_HREG((HCODEC_MFDIN_REG16 + reg_offset),
994 ((c_bld_beta2alp_rate << 0) |
995 (c_bld_beta_min << 8) |
996 (c_bld_beta_max << 14)));
997
998 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
999 (iformat << 0) | (oformat << 4) |
1000 (dsample_en << 6) | (y_size << 8) |
1001 (interp_en << 9) | (r2y_en << 12) |
1002 (r2y_mode << 13) | (ifmt_extra << 16) |
1003 (nr_enable << 19));
1004 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1005 (picsize_x << 14) | (picsize_y << 0));
1006 } else {
1007 reg_offset = 0;
1008 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
1009 (iformat << 0) | (oformat << 4) |
1010 (dsample_en << 6) | (y_size << 8) |
1011 (interp_en << 9) | (r2y_en << 12) |
1012 (r2y_mode << 13));
1013 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1014 (picsize_x << 12) | (picsize_y << 0));
1015 }
1016
1017 if (linear_enable == false) {
1018 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1019 (input & 0xffffff) |
1020 (canv_idx1_bppy << 30) |
1021 (canv_idx0_bppy << 28) |
1022 (canv_idx1_bppx << 26) |
1023 (canv_idx0_bppx << 24));
1024 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1025 (0 << 16) | (0 << 0));
1026 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0);
1027 } else {
1028 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1029 (canv_idx1_bppy << 30) |
1030 (canv_idx0_bppy << 28) |
1031 (canv_idx1_bppx << 26) |
1032 (canv_idx0_bppx << 24));
1033 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1034 (linear_bytes4p << 16) | (linear_bytesperline << 0));
1035 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input);
1036 }
1037
1038 if (iformat == 12)
1039 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1040 (2 << 0) | (1 << 3) | (0 << 6) |
1041 (3 << 9) | (6 << 12) | (5 << 15) |
1042 (4 << 18) | (7 << 21));
1043 else
1044 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1045 (7 << 0) | (6 << 3) | (5 << 6) |
1046 (4 << 9) | (3 << 12) | (2 << 15) |
1047 (1 << 18) | (0 << 21));
1048}
1049
1050#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1051static int scale_frame(struct encode_wq_s *wq,
1052 struct encode_request_s *request,
1053 struct config_para_ex_s *ge2d_config,
1054 u32 src_addr, bool canvas)
1055{
1056 struct ge2d_context_s *context = encode_manager.context;
1057 int src_top, src_left, src_width, src_height;
1058 struct canvas_s cs0, cs1, cs2, cd;
1059 u32 src_canvas, dst_canvas;
1060 u32 src_canvas_w, dst_canvas_w;
1061 u32 src_h = request->src_h;
1062 u32 dst_w = ((wq->pic.encoder_width + 15) >> 4) << 4;
1063 u32 dst_h = ((wq->pic.encoder_height + 15) >> 4) << 4;
1064 int input_format = GE2D_FORMAT_M24_NV21;
1065
1066 src_top = request->crop_top;
1067 src_left = request->crop_left;
1068 src_width = request->src_w - src_left - request->crop_right;
1069 src_height = request->src_h - src_top - request->crop_bottom;
1070 pr_err("request->fmt=%d, %d %d, canvas=%d\n", request->fmt, FMT_NV21, FMT_BGR888, canvas);
1071
1072 if (canvas) {
1073 if ((request->fmt == FMT_NV21)
1074 || (request->fmt == FMT_NV12)) {
1075 src_canvas = src_addr & 0xffff;
1076 input_format = GE2D_FORMAT_M24_NV21;
1077 } else if (request->fmt == FMT_BGR888) {
1078 src_canvas = src_addr & 0xffffff;
1079 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1080 } else if (request->fmt == FMT_RGBA8888) {
1081 src_canvas = src_addr & 0xffffff;
1082 input_format = GE2D_FORMAT_S32_ABGR;
1083 } else {
1084 src_canvas = src_addr & 0xffffff;
1085 input_format = GE2D_FORMAT_M24_YUV420;
1086 }
1087 } else {
1088 if ((request->fmt == FMT_NV21)
1089 || (request->fmt == FMT_NV12)) {
1090 src_canvas_w =
1091 ((request->src_w + 31) >> 5) << 5;
1092 canvas_config(ENC_CANVAS_OFFSET + 9,
1093 src_addr,
1094 src_canvas_w, src_h,
1095 CANVAS_ADDR_NOWRAP,
1096 CANVAS_BLKMODE_LINEAR);
1097 canvas_config(ENC_CANVAS_OFFSET + 10,
1098 src_addr + src_canvas_w * src_h,
1099 src_canvas_w, src_h / 2,
1100 CANVAS_ADDR_NOWRAP,
1101 CANVAS_BLKMODE_LINEAR);
1102 src_canvas =
1103 ((ENC_CANVAS_OFFSET + 10) << 8)
1104 | (ENC_CANVAS_OFFSET + 9);
1105 input_format = GE2D_FORMAT_M24_NV21;
1106 } else if (request->fmt == FMT_BGR888) {
1107 src_canvas_w =
1108 ((request->src_w + 31) >> 5) << 5;
1109
1110 canvas_config(ENC_CANVAS_OFFSET + 9,
1111 src_addr,
1112 src_canvas_w * 3, src_h,
1113 CANVAS_ADDR_NOWRAP,
1114 CANVAS_BLKMODE_LINEAR);
1115 src_canvas = ENC_CANVAS_OFFSET + 9;
1116 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1117 } else if (request->fmt == FMT_RGBA8888) {
1118 src_canvas_w =
1119 ((request->src_w + 31) >> 5) << 5;
1120 canvas_config(
1121 ENC_CANVAS_OFFSET + 9,
1122 src_addr,
1123 src_canvas_w * 4,
1124 src_h,
1125 CANVAS_ADDR_NOWRAP,
1126 CANVAS_BLKMODE_LINEAR);
1127 src_canvas = ENC_CANVAS_OFFSET + 9;
1128 input_format = GE2D_FORMAT_S32_ABGR; //Opposite color after ge2d
1129 } else {
1130 src_canvas_w =
1131 ((request->src_w + 63) >> 6) << 6;
1132 canvas_config(ENC_CANVAS_OFFSET + 9,
1133 src_addr,
1134 src_canvas_w, src_h,
1135 CANVAS_ADDR_NOWRAP,
1136 CANVAS_BLKMODE_LINEAR);
1137 canvas_config(ENC_CANVAS_OFFSET + 10,
1138 src_addr + src_canvas_w * src_h,
1139 src_canvas_w / 2, src_h / 2,
1140 CANVAS_ADDR_NOWRAP,
1141 CANVAS_BLKMODE_LINEAR);
1142 canvas_config(ENC_CANVAS_OFFSET + 11,
1143 src_addr + src_canvas_w * src_h * 5 / 4,
1144 src_canvas_w / 2, src_h / 2,
1145 CANVAS_ADDR_NOWRAP,
1146 CANVAS_BLKMODE_LINEAR);
1147 src_canvas =
1148 ((ENC_CANVAS_OFFSET + 11) << 16) |
1149 ((ENC_CANVAS_OFFSET + 10) << 8) |
1150 (ENC_CANVAS_OFFSET + 9);
1151 input_format = GE2D_FORMAT_M24_YUV420;
1152 }
1153 }
1154
1155 dst_canvas_w = ((dst_w + 31) >> 5) << 5;
1156
1157 canvas_config(ENC_CANVAS_OFFSET + 6,
1158 wq->mem.scaler_buff_start_addr,
1159 dst_canvas_w, dst_h,
1160 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1161
1162 canvas_config(ENC_CANVAS_OFFSET + 7,
1163 wq->mem.scaler_buff_start_addr + dst_canvas_w * dst_h,
1164 dst_canvas_w, dst_h / 2,
1165 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1166
1167 dst_canvas = ((ENC_CANVAS_OFFSET + 7) << 8) |
1168 (ENC_CANVAS_OFFSET + 6);
1169
1170 ge2d_config->alu_const_color = 0;
1171 ge2d_config->bitmask_en = 0;
1172 ge2d_config->src1_gb_alpha = 0;
1173 ge2d_config->dst_xy_swap = 0;
1174 canvas_read(src_canvas & 0xff, &cs0);
1175 canvas_read((src_canvas >> 8) & 0xff, &cs1);
1176 canvas_read((src_canvas >> 16) & 0xff, &cs2);
1177 ge2d_config->src_planes[0].addr = cs0.addr;
1178 ge2d_config->src_planes[0].w = dst_w * 4;//cs0.width;
1179 ge2d_config->src_planes[0].h = dst_h;//cs0.height;
1180 ge2d_config->src_planes[1].addr = cs1.addr;
1181 ge2d_config->src_planes[1].w = cs1.width;
1182 ge2d_config->src_planes[1].h = cs1.height;
1183 ge2d_config->src_planes[2].addr = cs2.addr;
1184 ge2d_config->src_planes[2].w = cs2.width;
1185 ge2d_config->src_planes[2].h = cs2.height;
1186
1187 canvas_read(dst_canvas & 0xff, &cd);
1188
1189 ge2d_config->dst_planes[0].addr = cd.addr;
1190 ge2d_config->dst_planes[0].w = dst_w * 4;//cd.width;
1191 ge2d_config->dst_planes[0].h = dst_h;//cd.height;
1192 ge2d_config->src_key.key_enable = 0;
1193 ge2d_config->src_key.key_mask = 0;
1194 ge2d_config->src_key.key_mode = 0;
1195 ge2d_config->src_para.canvas_index = src_canvas;
1196 ge2d_config->src_para.mem_type = CANVAS_TYPE_INVALID;
1197 ge2d_config->src_para.format = input_format | GE2D_LITTLE_ENDIAN;
1198 ge2d_config->src_para.fill_color_en = 0;
1199 ge2d_config->src_para.fill_mode = 0;
1200 ge2d_config->src_para.x_rev = 0;
1201 ge2d_config->src_para.y_rev = 0;
1202 ge2d_config->src_para.color = 0xffffffff;
1203 ge2d_config->src_para.top = 0;
1204 ge2d_config->src_para.left = 0;
1205 ge2d_config->src_para.width = dst_w;//request->src_w;
1206 ge2d_config->src_para.height = dst_h;//request->src_h;
1207 ge2d_config->src2_para.mem_type = CANVAS_TYPE_INVALID;
1208 ge2d_config->dst_para.canvas_index = dst_canvas;
1209 ge2d_config->dst_para.mem_type = CANVAS_TYPE_INVALID;
1210 ge2d_config->dst_para.format =
1211 GE2D_FORMAT_M24_NV21 | GE2D_LITTLE_ENDIAN;
1212
1213 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720) {
1214 ge2d_config->dst_para.format |= GE2D_FORMAT_BT_STANDARD;
1215 }
1216
1217 ge2d_config->dst_para.fill_color_en = 0;
1218 ge2d_config->dst_para.fill_mode = 0;
1219 ge2d_config->dst_para.x_rev = 0;
1220 ge2d_config->dst_para.y_rev = 0;
1221 ge2d_config->dst_para.color = 0;
1222 ge2d_config->dst_para.top = 0;
1223 ge2d_config->dst_para.left = 0;
1224 ge2d_config->dst_para.width = dst_w;
1225 ge2d_config->dst_para.height = dst_h;
1226 ge2d_config->dst_para.x_rev = 0;
1227 ge2d_config->dst_para.y_rev = 0;
1228
1229
1230 if (ge2d_context_config_ex(context, ge2d_config) < 0) {
1231 pr_err("++ge2d configing error.\n");
1232 return -1;
1233 }
1234 stretchblt_noalpha(context, src_left, src_top, src_width, src_height,
1235 0, 0, wq->pic.encoder_width, wq->pic.encoder_height);
1236 return dst_canvas_w*dst_h * 3 / 2;
1237}
1238#endif
1239
1240static s32 set_input_format(struct encode_wq_s *wq,
1241 struct encode_request_s *request)
1242{
1243 s32 ret = 0;
1244 u8 iformat = MAX_FRAME_FMT, oformat = MAX_FRAME_FMT, r2y_en = 0;
1245 u32 picsize_x, picsize_y, src_addr;
1246 u32 canvas_w = 0;
1247 u32 input = request->src;
1248 u32 input_y = 0;
1249 u32 input_u = 0;
1250 u32 input_v = 0;
1251 u8 ifmt_extra = 0;
1252
1253 if ((request->fmt == FMT_RGB565) || (request->fmt >= MAX_FRAME_FMT))
1254 return -1;
1255
1256 picsize_x = ((wq->pic.encoder_width + 15) >> 4) << 4;
1257 picsize_y = ((wq->pic.encoder_height + 15) >> 4) << 4;
1258 oformat = 0;
1259
1260 if ((request->type == LOCAL_BUFF)
1261 || (request->type == PHYSICAL_BUFF)
1262 || (request->type == DMA_BUFF)) {
1263 if ((request->type == LOCAL_BUFF) &&
1264 (request->flush_flag & AMVENC_FLUSH_FLAG_INPUT))
1265 dma_flush(wq->mem.dct_buff_start_addr,
1266 request->framesize);
1267 if (request->type == LOCAL_BUFF) {
1268 input = wq->mem.dct_buff_start_addr;
1269 src_addr =
1270 wq->mem.dct_buff_start_addr;
1271 } else if (request->type == DMA_BUFF) {
1272 if (request->plane_num == 3) {
1273 input_y = (unsigned long)request->dma_cfg[0].paddr;
1274 input_u = (unsigned long)request->dma_cfg[1].paddr;
1275 input_v = (unsigned long)request->dma_cfg[2].paddr;
1276 } else if (request->plane_num == 2) {
1277 input_y = (unsigned long)request->dma_cfg[0].paddr;
1278 input_u = (unsigned long)request->dma_cfg[1].paddr;
1279 input_v = input_u;
1280 } else if (request->plane_num == 1) {
1281 input_y = (unsigned long)request->dma_cfg[0].paddr;
1282 if (request->fmt == FMT_NV21
1283 || request->fmt == FMT_NV12) {
1284 input_u = input_y + picsize_x * picsize_y;
1285 input_v = input_u;
1286 }
1287 if (request->fmt == FMT_YUV420) {
1288 input_u = input_y + picsize_x * picsize_y;
1289 input_v = input_u + picsize_x * picsize_y / 4;
1290 }
1291 }
1292 src_addr = input_y;
1293 picsize_y = wq->pic.encoder_height;
1294 enc_pr(LOG_INFO, "dma addr[0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx]\n",
1295 (unsigned long)request->dma_cfg[0].vaddr,
1296 (unsigned long)request->dma_cfg[0].paddr,
1297 (unsigned long)request->dma_cfg[1].vaddr,
1298 (unsigned long)request->dma_cfg[1].paddr,
1299 (unsigned long)request->dma_cfg[2].vaddr,
1300 (unsigned long)request->dma_cfg[2].paddr);
1301 } else {
1302 src_addr = input;
1303 picsize_y = wq->pic.encoder_height;
1304 }
1305 if (request->scale_enable) {
1306#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1307 struct config_para_ex_s ge2d_config;
1308
1309 memset(&ge2d_config, 0,
1310 sizeof(struct config_para_ex_s));
1311 scale_frame(
1312 wq, request,
1313 &ge2d_config,
1314 src_addr,
1315 false);
1316 iformat = 2;
1317 r2y_en = 0;
1318 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1319 (ENC_CANVAS_OFFSET + 6);
1320 ret = 0;
1321 goto MFDIN;
1322#else
1323 enc_pr(LOG_ERROR,
1324 "Warning: need enable ge2d for scale frame!\n");
1325 return -1;
1326#endif
1327 }
1328 if ((request->fmt <= FMT_YUV444_PLANE) ||
1329 (request->fmt >= FMT_YUV422_12BIT))
1330 r2y_en = 0;
1331 else
1332 r2y_en = 1;
1333
1334 if (request->fmt >= FMT_YUV422_12BIT) {
1335 iformat = 7;
1336 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1337 if (request->fmt == FMT_YUV422_12BIT)
1338 canvas_w = picsize_x * 24 / 8;
1339 else if (request->fmt == FMT_YUV444_10BIT)
1340 canvas_w = picsize_x * 32 / 8;
1341 else
1342 canvas_w = (picsize_x * 20 + 7) / 8;
1343 canvas_w = ((canvas_w + 31) >> 5) << 5;
1344 canvas_config(ENC_CANVAS_OFFSET + 6,
1345 input,
1346 canvas_w, picsize_y,
1347 CANVAS_ADDR_NOWRAP,
1348 CANVAS_BLKMODE_LINEAR);
1349 input = ENC_CANVAS_OFFSET + 6;
1350 input = input & 0xff;
1351 } else if (request->fmt == FMT_YUV422_SINGLE)
1352 iformat = 10;
1353 else if ((request->fmt == FMT_YUV444_SINGLE)
1354 || (request->fmt == FMT_RGB888)) {
1355 iformat = 1;
1356 if (request->fmt == FMT_RGB888)
1357 r2y_en = 1;
1358 canvas_w = picsize_x * 3;
1359 canvas_w = ((canvas_w + 31) >> 5) << 5;
1360 canvas_config(ENC_CANVAS_OFFSET + 6,
1361 input,
1362 canvas_w, picsize_y,
1363 CANVAS_ADDR_NOWRAP,
1364 CANVAS_BLKMODE_LINEAR);
1365 input = ENC_CANVAS_OFFSET + 6;
1366 } else if ((request->fmt == FMT_NV21)
1367 || (request->fmt == FMT_NV12)) {
1368 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1369 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1370 if (request->type == DMA_BUFF) {
1371 canvas_config(ENC_CANVAS_OFFSET + 6,
1372 input_y,
1373 canvas_w, picsize_y,
1374 CANVAS_ADDR_NOWRAP,
1375 CANVAS_BLKMODE_LINEAR);
1376 canvas_config(ENC_CANVAS_OFFSET + 7,
1377 input_u,
1378 canvas_w, picsize_y / 2,
1379 CANVAS_ADDR_NOWRAP,
1380 CANVAS_BLKMODE_LINEAR);
1381 } else {
1382 canvas_config(ENC_CANVAS_OFFSET + 6,
1383 input,
1384 canvas_w, picsize_y,
1385 CANVAS_ADDR_NOWRAP,
1386 CANVAS_BLKMODE_LINEAR);
1387 canvas_config(ENC_CANVAS_OFFSET + 7,
1388 input + canvas_w * picsize_y,
1389 canvas_w, picsize_y / 2,
1390 CANVAS_ADDR_NOWRAP,
1391 CANVAS_BLKMODE_LINEAR);
1392 }
1393 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1394 (ENC_CANVAS_OFFSET + 6);
1395 } else if (request->fmt == FMT_YUV420) {
1396 iformat = 4;
1397 canvas_w = ((wq->pic.encoder_width + 63) >> 6) << 6;
1398 if (request->type == DMA_BUFF) {
1399 canvas_config(ENC_CANVAS_OFFSET + 6,
1400 input_y,
1401 canvas_w, picsize_y,
1402 CANVAS_ADDR_NOWRAP,
1403 CANVAS_BLKMODE_LINEAR);
1404 canvas_config(ENC_CANVAS_OFFSET + 7,
1405 input_u,
1406 canvas_w / 2, picsize_y / 2,
1407 CANVAS_ADDR_NOWRAP,
1408 CANVAS_BLKMODE_LINEAR);
1409 canvas_config(ENC_CANVAS_OFFSET + 8,
1410 input_v,
1411 canvas_w / 2, picsize_y / 2,
1412 CANVAS_ADDR_NOWRAP,
1413 CANVAS_BLKMODE_LINEAR);
1414 } else {
1415 canvas_config(ENC_CANVAS_OFFSET + 6,
1416 input,
1417 canvas_w, picsize_y,
1418 CANVAS_ADDR_NOWRAP,
1419 CANVAS_BLKMODE_LINEAR);
1420 canvas_config(ENC_CANVAS_OFFSET + 7,
1421 input + canvas_w * picsize_y,
1422 canvas_w / 2, picsize_y / 2,
1423 CANVAS_ADDR_NOWRAP,
1424 CANVAS_BLKMODE_LINEAR);
1425 canvas_config(ENC_CANVAS_OFFSET + 8,
1426 input + canvas_w * picsize_y * 5 / 4,
1427 canvas_w / 2, picsize_y / 2,
1428 CANVAS_ADDR_NOWRAP,
1429 CANVAS_BLKMODE_LINEAR);
1430
1431 }
1432 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1433 ((ENC_CANVAS_OFFSET + 7) << 8) |
1434 (ENC_CANVAS_OFFSET + 6);
1435 } else if ((request->fmt == FMT_YUV444_PLANE)
1436 || (request->fmt == FMT_RGB888_PLANE)) {
1437 if (request->fmt == FMT_RGB888_PLANE)
1438 r2y_en = 1;
1439 iformat = 5;
1440 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1441 canvas_config(ENC_CANVAS_OFFSET + 6,
1442 input,
1443 canvas_w, picsize_y,
1444 CANVAS_ADDR_NOWRAP,
1445 CANVAS_BLKMODE_LINEAR);
1446 canvas_config(ENC_CANVAS_OFFSET + 7,
1447 input + canvas_w * picsize_y,
1448 canvas_w, picsize_y,
1449 CANVAS_ADDR_NOWRAP,
1450 CANVAS_BLKMODE_LINEAR);
1451 canvas_config(ENC_CANVAS_OFFSET + 8,
1452 input + canvas_w * picsize_y * 2,
1453 canvas_w, picsize_y,
1454 CANVAS_ADDR_NOWRAP,
1455 CANVAS_BLKMODE_LINEAR);
1456 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1457 ((ENC_CANVAS_OFFSET + 7) << 8) |
1458 (ENC_CANVAS_OFFSET + 6);
1459 } else if (request->fmt == FMT_RGBA8888) {
1460 r2y_en = 1;
1461 iformat = 12;
1462 }
1463 ret = 0;
1464 } else if (request->type == CANVAS_BUFF) {
1465 r2y_en = 0;
1466 if (request->scale_enable) {
1467#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1468 struct config_para_ex_s ge2d_config;
1469 memset(&ge2d_config, 0,
1470 sizeof(struct config_para_ex_s));
1471 scale_frame(
1472 wq, request,
1473 &ge2d_config,
1474 input, true);
1475 iformat = 2;
1476 r2y_en = 0;
1477 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1478 (ENC_CANVAS_OFFSET + 6);
1479 ret = 0;
1480 goto MFDIN;
1481#else
1482 enc_pr(LOG_ERROR,
1483 "Warning: need enable ge2d for scale frame!\n");
1484 return -1;
1485#endif
1486 }
1487 if (request->fmt == FMT_YUV422_SINGLE) {
1488 iformat = 0;
1489 input = input & 0xff;
1490 } else if (request->fmt == FMT_YUV444_SINGLE) {
1491 iformat = 1;
1492 input = input & 0xff;
1493 } else if ((request->fmt == FMT_NV21)
1494 || (request->fmt == FMT_NV12)) {
1495 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1496 input = input & 0xffff;
1497 } else if (request->fmt == FMT_YUV420) {
1498 iformat = 4;
1499 input = input & 0xffffff;
1500 } else if ((request->fmt == FMT_YUV444_PLANE)
1501 || (request->fmt == FMT_RGB888_PLANE)) {
1502 if (request->fmt == FMT_RGB888_PLANE)
1503 r2y_en = 1;
1504 iformat = 5;
1505 input = input & 0xffffff;
1506 } else if ((request->fmt == FMT_YUV422_12BIT)
1507 || (request->fmt == FMT_YUV444_10BIT)
1508 || (request->fmt == FMT_YUV422_10BIT)) {
1509 iformat = 7;
1510 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1511 input = input & 0xff;
1512 } else
1513 ret = -1;
1514 }
1515#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1516MFDIN:
1517#endif
1518 if (ret == 0)
1519 mfdin_basic(input, iformat, oformat,
1520 picsize_x, picsize_y, r2y_en,
1521 request->nr_mode, ifmt_extra);
1522 return ret;
1523}
1524
1525#ifdef H264_ENC_CBR
1526static void ConvertTable2Risc(void *table, u32 len)
1527{
1528 u32 i, j;
1529 u16 temp;
1530 u16 *tbl = (u16 *)table;
1531
1532 if ((len < 8) || (len % 8) || (!table)) {
1533 enc_pr(LOG_ERROR, "ConvertTable2Risc tbl %p, len %d error\n",
1534 table, len);
1535 return;
1536 }
1537 for (i = 0; i < len / 8; i++) {
1538 j = i << 2;
1539 temp = tbl[j];
1540 tbl[j] = tbl[j + 3];
1541 tbl[j + 3] = temp;
1542
1543 temp = tbl[j + 1];
1544 tbl[j + 1] = tbl[j + 2];
1545 tbl[j + 2] = temp;
1546 }
1547
1548}
1549#endif
1550
1551static void avc_prot_init(struct encode_wq_s *wq,
1552 struct encode_request_s *request, u32 quant, bool IDR)
1553{
1554 u32 data32;
1555 u32 pic_width, pic_height;
1556 u32 pic_mb_nr;
1557 u32 pic_mbx, pic_mby;
1558 u32 i_pic_qp, p_pic_qp;
1559 u32 i_pic_qp_c, p_pic_qp_c;
1560 u32 pic_width_in_mb;
1561 u32 slice_qp;
1562
1563 pic_width = wq->pic.encoder_width;
1564 pic_height = wq->pic.encoder_height;
1565 pic_mb_nr = 0;
1566 pic_mbx = 0;
1567 pic_mby = 0;
1568 i_pic_qp = quant;
1569 p_pic_qp = quant;
1570
1571 pic_width_in_mb = (pic_width + 15) / 16;
1572 WRITE_HREG(HCODEC_HDEC_MC_OMEM_AUTO,
1573 (1 << 31) | /* use_omem_mb_xy */
1574 ((pic_width_in_mb - 1) << 16)); /* omem_max_mb_x */
1575
1576 WRITE_HREG(HCODEC_VLC_ADV_CONFIG,
1577 /* early_mix_mc_hcmd -- will enable in P Picture */
1578 (0 << 10) |
1579 (1 << 9) | /* update_top_left_mix */
1580 (1 << 8) | /* p_top_left_mix */
1581 /* mv_cal_mixed_type -- will enable in P Picture */
1582 (0 << 7) |
1583 /* mc_hcmd_mixed_type -- will enable in P Picture */
1584 (0 << 6) |
1585 (1 << 5) | /* use_separate_int_control */
1586 (1 << 4) | /* hcmd_intra_use_q_info */
1587 (1 << 3) | /* hcmd_left_use_prev_info */
1588 (1 << 2) | /* hcmd_use_q_info */
1589 (1 << 1) | /* use_q_delta_quant */
1590 /* detect_I16_from_I4 use qdct detected mb_type */
1591 (0 << 0));
1592
1593 WRITE_HREG(HCODEC_QDCT_ADV_CONFIG,
1594 (1 << 29) | /* mb_info_latch_no_I16_pred_mode */
1595 (1 << 28) | /* ie_dma_mbxy_use_i_pred */
1596 (1 << 27) | /* ie_dma_read_write_use_ip_idx */
1597 (1 << 26) | /* ie_start_use_top_dma_count */
1598 (1 << 25) | /* i_pred_top_dma_rd_mbbot */
1599 (1 << 24) | /* i_pred_top_dma_wr_disable */
1600 /* i_pred_mix -- will enable in P Picture */
1601 (0 << 23) |
1602 (1 << 22) | /* me_ab_rd_when_intra_in_p */
1603 (1 << 21) | /* force_mb_skip_run_when_intra */
1604 /* mc_out_mixed_type -- will enable in P Picture */
1605 (0 << 20) |
1606 (1 << 19) | /* ie_start_when_quant_not_full */
1607 (1 << 18) | /* mb_info_state_mix */
1608 /* mb_type_use_mix_result -- will enable in P Picture */
1609 (0 << 17) |
1610 /* me_cb_ie_read_enable -- will enable in P Picture */
1611 (0 << 16) |
1612 /* ie_cur_data_from_me -- will enable in P Picture */
1613 (0 << 15) |
1614 (1 << 14) | /* rem_per_use_table */
1615 (0 << 13) | /* q_latch_int_enable */
1616 (1 << 12) | /* q_use_table */
1617 (0 << 11) | /* q_start_wait */
1618 (1 << 10) | /* LUMA_16_LEFT_use_cur */
1619 (1 << 9) | /* DC_16_LEFT_SUM_use_cur */
1620 (1 << 8) | /* c_ref_ie_sel_cur */
1621 (0 << 7) | /* c_ipred_perfect_mode */
1622 (1 << 6) | /* ref_ie_ul_sel */
1623 (1 << 5) | /* mb_type_use_ie_result */
1624 (1 << 4) | /* detect_I16_from_I4 */
1625 (1 << 3) | /* ie_not_wait_ref_busy */
1626 (1 << 2) | /* ie_I16_enable */
1627 (3 << 0)); /* ie_done_sel // fastest when waiting */
1628
1629 if (request != NULL) {
1630 WRITE_HREG(HCODEC_IE_WEIGHT,
1631 (request->i16_weight << 16) |
1632 (request->i4_weight << 0));
1633 WRITE_HREG(HCODEC_ME_WEIGHT,
1634 (request->me_weight << 0));
1635 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1636 /* ie_sad_offset_I16 */
1637 (request->i16_weight << 16) |
1638 /* ie_sad_offset_I4 */
1639 (request->i4_weight << 0));
1640 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1641 /* ie_sad_shift_I16 */
1642 (IE_SAD_SHIFT_I16 << 24) |
1643 /* ie_sad_shift_I4 */
1644 (IE_SAD_SHIFT_I4 << 20) |
1645 /* me_sad_shift_INTER */
1646 (ME_SAD_SHIFT_INTER << 16) |
1647 /* me_sad_offset_INTER */
1648 (request->me_weight << 0));
1649 wq->me_weight = request->me_weight;
1650 wq->i4_weight = request->i4_weight;
1651 wq->i16_weight = request->i16_weight;
1652 } else {
1653 WRITE_HREG(HCODEC_IE_WEIGHT,
1654 (I16MB_WEIGHT_OFFSET << 16) |
1655 (I4MB_WEIGHT_OFFSET << 0));
1656 WRITE_HREG(HCODEC_ME_WEIGHT,
1657 (ME_WEIGHT_OFFSET << 0));
1658 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1659 /* ie_sad_offset_I16 */
1660 (I16MB_WEIGHT_OFFSET << 16) |
1661 /* ie_sad_offset_I4 */
1662 (I4MB_WEIGHT_OFFSET << 0));
1663 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1664 /* ie_sad_shift_I16 */
1665 (IE_SAD_SHIFT_I16 << 24) |
1666 /* ie_sad_shift_I4 */
1667 (IE_SAD_SHIFT_I4 << 20) |
1668 /* me_sad_shift_INTER */
1669 (ME_SAD_SHIFT_INTER << 16) |
1670 /* me_sad_offset_INTER */
1671 (ME_WEIGHT_OFFSET << 0));
1672 }
1673
1674 WRITE_HREG(HCODEC_ADV_MV_CTL0,
1675 (ADV_MV_LARGE_16x8 << 31) |
1676 (ADV_MV_LARGE_8x16 << 30) |
1677 (ADV_MV_8x8_WEIGHT << 16) | /* adv_mv_8x8_weight */
1678 /* adv_mv_4x4x4_weight should be set bigger */
1679 (ADV_MV_4x4x4_WEIGHT << 0));
1680 WRITE_HREG(HCODEC_ADV_MV_CTL1,
1681 /* adv_mv_16x16_weight */
1682 (ADV_MV_16x16_WEIGHT << 16) |
1683 (ADV_MV_LARGE_16x16 << 15) |
1684 (ADV_MV_16_8_WEIGHT << 0)); /* adv_mv_16_8_weight */
1685
1686 hcodec_prog_qtbl(wq);
1687 if (IDR) {
1688 i_pic_qp =
1689 wq->quant_tbl_i4[0] & 0xff;
1690 i_pic_qp +=
1691 wq->quant_tbl_i16[0] & 0xff;
1692 i_pic_qp /= 2;
1693 p_pic_qp = i_pic_qp;
1694 } else {
1695 i_pic_qp =
1696 wq->quant_tbl_i4[0] & 0xff;
1697 i_pic_qp +=
1698 wq->quant_tbl_i16[0] & 0xff;
1699 p_pic_qp = wq->quant_tbl_me[0] & 0xff;
1700 slice_qp = (i_pic_qp + p_pic_qp) / 3;
1701 i_pic_qp = slice_qp;
1702 p_pic_qp = i_pic_qp;
1703 }
1704#ifdef H264_ENC_CBR
1705 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
1706 data32 = READ_HREG(HCODEC_SAD_CONTROL_1);
1707 data32 = data32 & 0xffff; /* remove sad shift */
1708 WRITE_HREG(HCODEC_SAD_CONTROL_1, data32);
1709 WRITE_HREG(H264_ENC_CBR_TABLE_ADDR,
1710 wq->mem.cbr_info_ddr_start_addr);
1711 WRITE_HREG(H264_ENC_CBR_MB_SIZE_ADDR,
1712 wq->mem.cbr_info_ddr_start_addr
1713 + CBR_TABLE_SIZE);
1714 WRITE_HREG(H264_ENC_CBR_CTL,
1715 (wq->cbr_info.start_tbl_id << 28) |
1716 (wq->cbr_info.short_shift << 24) |
1717 (wq->cbr_info.long_mb_num << 16) |
1718 (wq->cbr_info.long_th << 0));
1719 WRITE_HREG(H264_ENC_CBR_REGION_SIZE,
1720 (wq->cbr_info.block_w << 16) |
1721 (wq->cbr_info.block_h << 0));
1722 }
1723#endif
1724 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_0,
1725 (0 << 19) | /* vlc_delta_quant_1 */
1726 (i_pic_qp << 13) | /* vlc_quant_1 */
1727 (0 << 6) | /* vlc_delta_quant_0 */
1728 (i_pic_qp << 0)); /* vlc_quant_0 */
1729 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_1,
1730 (14 << 6) | /* vlc_max_delta_q_neg */
1731 (13 << 0)); /* vlc_max_delta_q_pos */
1732 WRITE_HREG(HCODEC_VLC_PIC_SIZE,
1733 pic_width | (pic_height << 16));
1734 WRITE_HREG(HCODEC_VLC_PIC_POSITION,
1735 (pic_mb_nr << 16) |
1736 (pic_mby << 8) |
1737 (pic_mbx << 0));
1738
1739 /* synopsys parallel_case full_case */
1740 switch (i_pic_qp) {
1741 case 0:
1742 i_pic_qp_c = 0;
1743 break;
1744 case 1:
1745 i_pic_qp_c = 1;
1746 break;
1747 case 2:
1748 i_pic_qp_c = 2;
1749 break;
1750 case 3:
1751 i_pic_qp_c = 3;
1752 break;
1753 case 4:
1754 i_pic_qp_c = 4;
1755 break;
1756 case 5:
1757 i_pic_qp_c = 5;
1758 break;
1759 case 6:
1760 i_pic_qp_c = 6;
1761 break;
1762 case 7:
1763 i_pic_qp_c = 7;
1764 break;
1765 case 8:
1766 i_pic_qp_c = 8;
1767 break;
1768 case 9:
1769 i_pic_qp_c = 9;
1770 break;
1771 case 10:
1772 i_pic_qp_c = 10;
1773 break;
1774 case 11:
1775 i_pic_qp_c = 11;
1776 break;
1777 case 12:
1778 i_pic_qp_c = 12;
1779 break;
1780 case 13:
1781 i_pic_qp_c = 13;
1782 break;
1783 case 14:
1784 i_pic_qp_c = 14;
1785 break;
1786 case 15:
1787 i_pic_qp_c = 15;
1788 break;
1789 case 16:
1790 i_pic_qp_c = 16;
1791 break;
1792 case 17:
1793 i_pic_qp_c = 17;
1794 break;
1795 case 18:
1796 i_pic_qp_c = 18;
1797 break;
1798 case 19:
1799 i_pic_qp_c = 19;
1800 break;
1801 case 20:
1802 i_pic_qp_c = 20;
1803 break;
1804 case 21:
1805 i_pic_qp_c = 21;
1806 break;
1807 case 22:
1808 i_pic_qp_c = 22;
1809 break;
1810 case 23:
1811 i_pic_qp_c = 23;
1812 break;
1813 case 24:
1814 i_pic_qp_c = 24;
1815 break;
1816 case 25:
1817 i_pic_qp_c = 25;
1818 break;
1819 case 26:
1820 i_pic_qp_c = 26;
1821 break;
1822 case 27:
1823 i_pic_qp_c = 27;
1824 break;
1825 case 28:
1826 i_pic_qp_c = 28;
1827 break;
1828 case 29:
1829 i_pic_qp_c = 29;
1830 break;
1831 case 30:
1832 i_pic_qp_c = 29;
1833 break;
1834 case 31:
1835 i_pic_qp_c = 30;
1836 break;
1837 case 32:
1838 i_pic_qp_c = 31;
1839 break;
1840 case 33:
1841 i_pic_qp_c = 32;
1842 break;
1843 case 34:
1844 i_pic_qp_c = 32;
1845 break;
1846 case 35:
1847 i_pic_qp_c = 33;
1848 break;
1849 case 36:
1850 i_pic_qp_c = 34;
1851 break;
1852 case 37:
1853 i_pic_qp_c = 34;
1854 break;
1855 case 38:
1856 i_pic_qp_c = 35;
1857 break;
1858 case 39:
1859 i_pic_qp_c = 35;
1860 break;
1861 case 40:
1862 i_pic_qp_c = 36;
1863 break;
1864 case 41:
1865 i_pic_qp_c = 36;
1866 break;
1867 case 42:
1868 i_pic_qp_c = 37;
1869 break;
1870 case 43:
1871 i_pic_qp_c = 37;
1872 break;
1873 case 44:
1874 i_pic_qp_c = 37;
1875 break;
1876 case 45:
1877 i_pic_qp_c = 38;
1878 break;
1879 case 46:
1880 i_pic_qp_c = 38;
1881 break;
1882 case 47:
1883 i_pic_qp_c = 38;
1884 break;
1885 case 48:
1886 i_pic_qp_c = 39;
1887 break;
1888 case 49:
1889 i_pic_qp_c = 39;
1890 break;
1891 case 50:
1892 i_pic_qp_c = 39;
1893 break;
1894 default:
1895 i_pic_qp_c = 39;
1896 break;
1897 }
1898
1899 /* synopsys parallel_case full_case */
1900 switch (p_pic_qp) {
1901 case 0:
1902 p_pic_qp_c = 0;
1903 break;
1904 case 1:
1905 p_pic_qp_c = 1;
1906 break;
1907 case 2:
1908 p_pic_qp_c = 2;
1909 break;
1910 case 3:
1911 p_pic_qp_c = 3;
1912 break;
1913 case 4:
1914 p_pic_qp_c = 4;
1915 break;
1916 case 5:
1917 p_pic_qp_c = 5;
1918 break;
1919 case 6:
1920 p_pic_qp_c = 6;
1921 break;
1922 case 7:
1923 p_pic_qp_c = 7;
1924 break;
1925 case 8:
1926 p_pic_qp_c = 8;
1927 break;
1928 case 9:
1929 p_pic_qp_c = 9;
1930 break;
1931 case 10:
1932 p_pic_qp_c = 10;
1933 break;
1934 case 11:
1935 p_pic_qp_c = 11;
1936 break;
1937 case 12:
1938 p_pic_qp_c = 12;
1939 break;
1940 case 13:
1941 p_pic_qp_c = 13;
1942 break;
1943 case 14:
1944 p_pic_qp_c = 14;
1945 break;
1946 case 15:
1947 p_pic_qp_c = 15;
1948 break;
1949 case 16:
1950 p_pic_qp_c = 16;
1951 break;
1952 case 17:
1953 p_pic_qp_c = 17;
1954 break;
1955 case 18:
1956 p_pic_qp_c = 18;
1957 break;
1958 case 19:
1959 p_pic_qp_c = 19;
1960 break;
1961 case 20:
1962 p_pic_qp_c = 20;
1963 break;
1964 case 21:
1965 p_pic_qp_c = 21;
1966 break;
1967 case 22:
1968 p_pic_qp_c = 22;
1969 break;
1970 case 23:
1971 p_pic_qp_c = 23;
1972 break;
1973 case 24:
1974 p_pic_qp_c = 24;
1975 break;
1976 case 25:
1977 p_pic_qp_c = 25;
1978 break;
1979 case 26:
1980 p_pic_qp_c = 26;
1981 break;
1982 case 27:
1983 p_pic_qp_c = 27;
1984 break;
1985 case 28:
1986 p_pic_qp_c = 28;
1987 break;
1988 case 29:
1989 p_pic_qp_c = 29;
1990 break;
1991 case 30:
1992 p_pic_qp_c = 29;
1993 break;
1994 case 31:
1995 p_pic_qp_c = 30;
1996 break;
1997 case 32:
1998 p_pic_qp_c = 31;
1999 break;
2000 case 33:
2001 p_pic_qp_c = 32;
2002 break;
2003 case 34:
2004 p_pic_qp_c = 32;
2005 break;
2006 case 35:
2007 p_pic_qp_c = 33;
2008 break;
2009 case 36:
2010 p_pic_qp_c = 34;
2011 break;
2012 case 37:
2013 p_pic_qp_c = 34;
2014 break;
2015 case 38:
2016 p_pic_qp_c = 35;
2017 break;
2018 case 39:
2019 p_pic_qp_c = 35;
2020 break;
2021 case 40:
2022 p_pic_qp_c = 36;
2023 break;
2024 case 41:
2025 p_pic_qp_c = 36;
2026 break;
2027 case 42:
2028 p_pic_qp_c = 37;
2029 break;
2030 case 43:
2031 p_pic_qp_c = 37;
2032 break;
2033 case 44:
2034 p_pic_qp_c = 37;
2035 break;
2036 case 45:
2037 p_pic_qp_c = 38;
2038 break;
2039 case 46:
2040 p_pic_qp_c = 38;
2041 break;
2042 case 47:
2043 p_pic_qp_c = 38;
2044 break;
2045 case 48:
2046 p_pic_qp_c = 39;
2047 break;
2048 case 49:
2049 p_pic_qp_c = 39;
2050 break;
2051 case 50:
2052 p_pic_qp_c = 39;
2053 break;
2054 default:
2055 p_pic_qp_c = 39;
2056 break;
2057 }
2058 WRITE_HREG(HCODEC_QDCT_Q_QUANT_I,
2059 (i_pic_qp_c << 22) |
2060 (i_pic_qp << 16) |
2061 ((i_pic_qp_c % 6) << 12) |
2062 ((i_pic_qp_c / 6) << 8) |
2063 ((i_pic_qp % 6) << 4) |
2064 ((i_pic_qp / 6) << 0));
2065
2066 WRITE_HREG(HCODEC_QDCT_Q_QUANT_P,
2067 (p_pic_qp_c << 22) |
2068 (p_pic_qp << 16) |
2069 ((p_pic_qp_c % 6) << 12) |
2070 ((p_pic_qp_c / 6) << 8) |
2071 ((p_pic_qp % 6) << 4) |
2072 ((p_pic_qp / 6) << 0));
2073
2074#ifdef ENABLE_IGNORE_FUNCTION
2075 WRITE_HREG(HCODEC_IGNORE_CONFIG,
2076 (1 << 31) | /* ignore_lac_coeff_en */
2077 (1 << 26) | /* ignore_lac_coeff_else (<1) */
2078 (1 << 21) | /* ignore_lac_coeff_2 (<1) */
2079 (2 << 16) | /* ignore_lac_coeff_1 (<2) */
2080 (1 << 15) | /* ignore_cac_coeff_en */
2081 (1 << 10) | /* ignore_cac_coeff_else (<1) */
2082 (1 << 5) | /* ignore_cac_coeff_2 (<1) */
2083 (3 << 0)); /* ignore_cac_coeff_1 (<2) */
2084
2085 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB)
2086 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2087 (1 << 31) | /* ignore_t_lac_coeff_en */
2088 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2089 (2 << 21) | /* ignore_t_lac_coeff_2 (<2) */
2090 (6 << 16) | /* ignore_t_lac_coeff_1 (<6) */
2091 (1<<15) | /* ignore_cdc_coeff_en */
2092 (0<<14) | /* ignore_t_lac_coeff_else_le_3 */
2093 (1<<13) | /* ignore_t_lac_coeff_else_le_4 */
2094 (1<<12) | /* ignore_cdc_only_when_empty_cac_inter */
2095 (1<<11) | /* ignore_cdc_only_when_one_empty_inter */
2096 /* ignore_cdc_range_max_inter 0-0, 1-1, 2-2, 3-3 */
2097 (2<<9) |
2098 /* ignore_cdc_abs_max_inter 0-1, 1-2, 2-3, 3-4 */
2099 (0<<7) |
2100 /* ignore_cdc_only_when_empty_cac_intra */
2101 (1<<5) |
2102 /* ignore_cdc_only_when_one_empty_intra */
2103 (1<<4) |
2104 /* ignore_cdc_range_max_intra 0-0, 1-1, 2-2, 3-3 */
2105 (1<<2) |
2106 /* ignore_cdc_abs_max_intra 0-1, 1-2, 2-3, 3-4 */
2107 (0<<0));
2108 else
2109 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2110 (1 << 31) | /* ignore_t_lac_coeff_en */
2111 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2112 (1 << 21) | /* ignore_t_lac_coeff_2 (<1) */
2113 (5 << 16) | /* ignore_t_lac_coeff_1 (<5) */
2114 (0 << 0));
2115#else
2116 WRITE_HREG(HCODEC_IGNORE_CONFIG, 0);
2117 WRITE_HREG(HCODEC_IGNORE_CONFIG_2, 0);
2118#endif
2119
2120 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2121 (1 << 9) | /* mb_info_soft_reset */
2122 (1 << 0)); /* mb read buffer soft reset */
2123
2124 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2125 (1 << 28) | /* ignore_t_p8x8 */
2126 (0 << 27) | /* zero_mc_out_null_non_skipped_mb */
2127 (0 << 26) | /* no_mc_out_null_non_skipped_mb */
2128 (0 << 25) | /* mc_out_even_skipped_mb */
2129 (0 << 24) | /* mc_out_wait_cbp_ready */
2130 (0 << 23) | /* mc_out_wait_mb_type_ready */
2131 (1 << 29) | /* ie_start_int_enable */
2132 (1 << 19) | /* i_pred_enable */
2133 (1 << 20) | /* ie_sub_enable */
2134 (1 << 18) | /* iq_enable */
2135 (1 << 17) | /* idct_enable */
2136 (1 << 14) | /* mb_pause_enable */
2137 (1 << 13) | /* q_enable */
2138 (1 << 12) | /* dct_enable */
2139 (1 << 10) | /* mb_info_en */
2140 (0 << 3) | /* endian */
2141 (0 << 1) | /* mb_read_en */
2142 (0 << 0)); /* soft reset */
2143
2144 WRITE_HREG(HCODEC_SAD_CONTROL,
2145 (0 << 3) | /* ie_result_buff_enable */
2146 (1 << 2) | /* ie_result_buff_soft_reset */
2147 (0 << 1) | /* sad_enable */
2148 (1 << 0)); /* sad soft reset */
2149 WRITE_HREG(HCODEC_IE_RESULT_BUFFER, 0);
2150
2151 WRITE_HREG(HCODEC_SAD_CONTROL,
2152 (1 << 3) | /* ie_result_buff_enable */
2153 (0 << 2) | /* ie_result_buff_soft_reset */
2154 (1 << 1) | /* sad_enable */
2155 (0 << 0)); /* sad soft reset */
2156
2157 WRITE_HREG(HCODEC_IE_CONTROL,
2158 (1 << 30) | /* active_ul_block */
2159 (0 << 1) | /* ie_enable */
2160 (1 << 0)); /* ie soft reset */
2161
2162 WRITE_HREG(HCODEC_IE_CONTROL,
2163 (1 << 30) | /* active_ul_block */
2164 (0 << 1) | /* ie_enable */
2165 (0 << 0)); /* ie soft reset */
2166
2167 WRITE_HREG(HCODEC_ME_SKIP_LINE,
2168 (8 << 24) | /* step_3_skip_line */
2169 (8 << 18) | /* step_2_skip_line */
2170 (2 << 12) | /* step_1_skip_line */
2171 (0 << 6) | /* step_0_skip_line */
2172 (0 << 0));
2173
2174 WRITE_HREG(HCODEC_ME_MV_MERGE_CTL, me_mv_merge_ctl);
2175 WRITE_HREG(HCODEC_ME_STEP0_CLOSE_MV, me_step0_close_mv);
2176 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_01, me_sad_enough_01);
2177 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_23, me_sad_enough_23);
2178 WRITE_HREG(HCODEC_ME_F_SKIP_SAD, me_f_skip_sad);
2179 WRITE_HREG(HCODEC_ME_F_SKIP_WEIGHT, me_f_skip_weight);
2180 WRITE_HREG(HCODEC_ME_MV_WEIGHT_01, me_mv_weight_01);
2181 WRITE_HREG(HCODEC_ME_MV_WEIGHT_23, me_mv_weight_23);
2182 WRITE_HREG(HCODEC_ME_SAD_RANGE_INC, me_sad_range_inc);
2183
2184 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
2185 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, 0);
2186 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL,
2187 (v5_use_small_diff_cnt << 7) |
2188 (v5_simple_mb_inter_all_en << 6) |
2189 (v5_simple_mb_inter_8x8_en << 5) |
2190 (v5_simple_mb_inter_16_8_en << 4) |
2191 (v5_simple_mb_inter_16x16_en << 3) |
2192 (v5_simple_mb_intra_en << 2) |
2193 (v5_simple_mb_C_en << 1) |
2194 (v5_simple_mb_Y_en << 0));
2195 WRITE_HREG(HCODEC_V5_MB_DIFF_SUM, 0);
2196 WRITE_HREG(HCODEC_V5_SMALL_DIFF_CNT,
2197 (v5_small_diff_C<<16) |
2198 (v5_small_diff_Y<<0));
2199 if (qp_mode == 1) {
2200 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2201 0);
2202 } else {
2203 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2204 v5_simple_dq_setting);
2205 }
2206 WRITE_HREG(HCODEC_V5_SIMPLE_MB_ME_WEIGHT,
2207 v5_simple_me_weight_setting);
2208 /* txlx can remove it */
2209 WRITE_HREG(HCODEC_QDCT_CONFIG, 1 << 0);
2210 }
2211
2212 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
2213 WRITE_HREG(HCODEC_V4_FORCE_SKIP_CFG,
2214 (i_pic_qp << 26) | /* v4_force_q_r_intra */
2215 (i_pic_qp << 20) | /* v4_force_q_r_inter */
2216 (0 << 19) | /* v4_force_q_y_enable */
2217 (5 << 16) | /* v4_force_qr_y */
2218 (6 << 12) | /* v4_force_qp_y */
2219 (0 << 0)); /* v4_force_skip_sad */
2220
2221 /* V3 Force skip */
2222 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2223 (1 << 31) | /* v3_skip_enable */
2224 (0 << 30) | /* v3_step_1_weight_enable */
2225 (1 << 28) | /* v3_mv_sad_weight_enable */
2226 (1 << 27) | /* v3_ipred_type_enable */
2227 (V3_FORCE_SKIP_SAD_1 << 12) |
2228 (V3_FORCE_SKIP_SAD_0 << 0));
2229 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2230 (V3_SKIP_WEIGHT_1 << 16) |
2231 (V3_SKIP_WEIGHT_0 << 0));
2232 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2233 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2234 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2235 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2236 (V3_FORCE_SKIP_SAD_2 << 16) |
2237 (V3_SKIP_WEIGHT_2 << 0));
2238 if (request != NULL) {
2239 unsigned int off1, off2;
2240
2241 off1 = V3_IE_F_ZERO_SAD_I4 - I4MB_WEIGHT_OFFSET;
2242 off2 = V3_IE_F_ZERO_SAD_I16
2243 - I16MB_WEIGHT_OFFSET;
2244 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2245 ((request->i16_weight + off2) << 16) |
2246 ((request->i4_weight + off1) << 0));
2247 off1 = V3_ME_F_ZERO_SAD - ME_WEIGHT_OFFSET;
2248 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2249 (0 << 25) |
2250 /* v3_no_ver_when_top_zero_en */
2251 (0 << 24) |
2252 /* v3_no_hor_when_left_zero_en */
2253 (3 << 16) | /* type_hor break */
2254 ((request->me_weight + off1) << 0));
2255 } else {
2256 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2257 (V3_IE_F_ZERO_SAD_I16 << 16) |
2258 (V3_IE_F_ZERO_SAD_I4 << 0));
2259 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2260 (0 << 25) |
2261 /* v3_no_ver_when_top_zero_en */
2262 (0 << 24) |
2263 /* v3_no_hor_when_left_zero_en */
2264 (3 << 16) | /* type_hor break */
2265 (V3_ME_F_ZERO_SAD << 0));
2266 }
2267 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2268 /* V3 Force skip */
2269 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2270 (1 << 31) | /* v3_skip_enable */
2271 (0 << 30) | /* v3_step_1_weight_enable */
2272 (1 << 28) | /* v3_mv_sad_weight_enable */
2273 (1 << 27) | /* v3_ipred_type_enable */
2274 (0 << 12) | /* V3_FORCE_SKIP_SAD_1 */
2275 (0 << 0)); /* V3_FORCE_SKIP_SAD_0 */
2276 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2277 (V3_SKIP_WEIGHT_1 << 16) |
2278 (V3_SKIP_WEIGHT_0 << 0));
2279 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2280 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2281 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2282 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2283 (0 << 16) | /* V3_FORCE_SKIP_SAD_2 */
2284 (V3_SKIP_WEIGHT_2 << 0));
2285 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2286 (0 << 16) | /* V3_IE_F_ZERO_SAD_I16 */
2287 (0 << 0)); /* V3_IE_F_ZERO_SAD_I4 */
2288 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2289 (0 << 25) | /* v3_no_ver_when_top_zero_en */
2290 (0 << 24) | /* v3_no_hor_when_left_zero_en */
2291 (3 << 16) | /* type_hor break */
2292 (0 << 0)); /* V3_ME_F_ZERO_SAD */
2293 }
2294 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2295 int i;
2296 /* MV SAD Table */
2297 for (i = 0; i < 64; i++)
2298 WRITE_HREG(HCODEC_V3_MV_SAD_TABLE,
2299 v3_mv_sad[i]);
2300
2301 /* IE PRED SAD Table*/
2302 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_0,
2303 (C_ipred_weight_H << 24) |
2304 (C_ipred_weight_V << 16) |
2305 (I4_ipred_weight_else << 8) |
2306 (I4_ipred_weight_most << 0));
2307 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_1,
2308 (I16_ipred_weight_DC << 24) |
2309 (I16_ipred_weight_H << 16) |
2310 (I16_ipred_weight_V << 8) |
2311 (C_ipred_weight_DC << 0));
2312 WRITE_HREG(HCODEC_V3_LEFT_SMALL_MAX_SAD,
2313 (v3_left_small_max_me_sad << 16) |
2314 (v3_left_small_max_ie_sad << 0));
2315 }
2316 WRITE_HREG(HCODEC_IE_DATA_FEED_BUFF_INFO, 0);
2317 WRITE_HREG(HCODEC_CURR_CANVAS_CTRL, 0);
2318 data32 = READ_HREG(HCODEC_VLC_CONFIG);
2319 data32 = data32 | (1 << 0); /* set pop_coeff_even_all_zero */
2320 WRITE_HREG(HCODEC_VLC_CONFIG, data32);
2321
2322 WRITE_HREG(INFO_DUMP_START_ADDR,
2323 wq->mem.dump_info_ddr_start_addr);
2324
2325 /* clear mailbox interrupt */
2326 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2327
2328 /* enable mailbox interrupt */
2329 WRITE_HREG(HCODEC_IRQ_MBOX_MASK, 1);
2330}
2331
2332void amvenc_reset(void)
2333{
2334 READ_VREG(DOS_SW_RESET1);
2335 READ_VREG(DOS_SW_RESET1);
2336 READ_VREG(DOS_SW_RESET1);
2337 WRITE_VREG(DOS_SW_RESET1,
2338 (1 << 2) | (1 << 6) |
2339 (1 << 7) | (1 << 8) |
2340 (1 << 14) | (1 << 16) |
2341 (1 << 17));
2342 WRITE_VREG(DOS_SW_RESET1, 0);
2343 READ_VREG(DOS_SW_RESET1);
2344 READ_VREG(DOS_SW_RESET1);
2345 READ_VREG(DOS_SW_RESET1);
2346}
2347
2348void amvenc_start(void)
2349{
2350 READ_VREG(DOS_SW_RESET1);
2351 READ_VREG(DOS_SW_RESET1);
2352 READ_VREG(DOS_SW_RESET1);
2353 WRITE_VREG(DOS_SW_RESET1,
2354 (1 << 12) | (1 << 11));
2355 WRITE_VREG(DOS_SW_RESET1, 0);
2356
2357 READ_VREG(DOS_SW_RESET1);
2358 READ_VREG(DOS_SW_RESET1);
2359 READ_VREG(DOS_SW_RESET1);
2360
2361 WRITE_HREG(HCODEC_MPSR, 0x0001);
2362}
2363
2364void amvenc_stop(void)
2365{
2366 ulong timeout = jiffies + HZ;
2367
2368 WRITE_HREG(HCODEC_MPSR, 0);
2369 WRITE_HREG(HCODEC_CPSR, 0);
2370 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2371 if (time_after(jiffies, timeout))
2372 break;
2373 }
2374 READ_VREG(DOS_SW_RESET1);
2375 READ_VREG(DOS_SW_RESET1);
2376 READ_VREG(DOS_SW_RESET1);
2377
2378 WRITE_VREG(DOS_SW_RESET1,
2379 (1 << 12) | (1 << 11) |
2380 (1 << 2) | (1 << 6) |
2381 (1 << 7) | (1 << 8) |
2382 (1 << 14) | (1 << 16) |
2383 (1 << 17));
2384
2385 WRITE_VREG(DOS_SW_RESET1, 0);
2386
2387 READ_VREG(DOS_SW_RESET1);
2388 READ_VREG(DOS_SW_RESET1);
2389 READ_VREG(DOS_SW_RESET1);
2390}
2391
2392static void __iomem *mc_addr;
2393static u32 mc_addr_map;
2394#define MC_SIZE (4096 * 8)
2395s32 amvenc_loadmc(const char *p, struct encode_wq_s *wq)
2396{
2397 ulong timeout;
2398 s32 ret = 0;
2399
2400 /* use static mempry*/
2401 if (mc_addr == NULL) {
2402 mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
2403 if (!mc_addr) {
2404 enc_pr(LOG_ERROR, "avc loadmc iomap mc addr error.\n");
2405 return -ENOMEM;
2406 }
2407 }
2408
2409 enc_pr(LOG_ALL, "avc encode ucode name is %s\n", p);
2410 ret = get_data_from_name(p, (u8 *)mc_addr);
2411 if (ret < 0) {
2412 enc_pr(LOG_ERROR,
2413 "avc microcode fail ret=%d, name: %s, wq:%p.\n",
2414 ret, p, (void *)wq);
2415 }
2416
2417 mc_addr_map = dma_map_single(
2418 &encode_manager.this_pdev->dev,
2419 mc_addr, MC_SIZE, DMA_TO_DEVICE);
2420
2421 /* mc_addr_map = wq->mem.assit_buffer_offset; */
2422 /* mc_addr = ioremap_wc(mc_addr_map, MC_SIZE); */
2423 /* memcpy(mc_addr, p, MC_SIZE); */
2424 enc_pr(LOG_ALL, "address 0 is 0x%x\n", *((u32 *)mc_addr));
2425 enc_pr(LOG_ALL, "address 1 is 0x%x\n", *((u32 *)mc_addr + 1));
2426 enc_pr(LOG_ALL, "address 2 is 0x%x\n", *((u32 *)mc_addr + 2));
2427 enc_pr(LOG_ALL, "address 3 is 0x%x\n", *((u32 *)mc_addr + 3));
2428 WRITE_HREG(HCODEC_MPSR, 0);
2429 WRITE_HREG(HCODEC_CPSR, 0);
2430
2431 /* Read CBUS register for timing */
2432 timeout = READ_HREG(HCODEC_MPSR);
2433 timeout = READ_HREG(HCODEC_MPSR);
2434
2435 timeout = jiffies + HZ;
2436
2437 WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map);
2438 WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000);
2439 WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
2440
2441 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2442 if (time_before(jiffies, timeout))
2443 schedule();
2444 else {
2445 enc_pr(LOG_ERROR, "hcodec load mc error\n");
2446 ret = -EBUSY;
2447 break;
2448 }
2449 }
2450 dma_unmap_single(
2451 &encode_manager.this_pdev->dev,
2452 mc_addr_map, MC_SIZE, DMA_TO_DEVICE);
2453 return ret;
2454}
2455
2456const u32 fix_mc[] __aligned(8) = {
2457 0x0809c05a, 0x06696000, 0x0c780000, 0x00000000
2458};
2459
2460
2461/*
2462 * DOS top level register access fix.
2463 * When hcodec is running, a protocol register HCODEC_CCPU_INTR_MSK
2464 * is set to make hcodec access one CBUS out of DOS domain once
2465 * to work around a HW bug for 4k2k dual decoder implementation.
2466 * If hcodec is not running, then a ucode is loaded and executed
2467 * instead.
2468 */
2469void amvenc_dos_top_reg_fix(void)
2470{
2471 bool hcodec_on;
2472 ulong flags;
2473
2474 spin_lock_irqsave(&lock, flags);
2475
2476 hcodec_on = vdec_on(VDEC_HCODEC);
2477
2478 if ((hcodec_on) && (READ_VREG(HCODEC_MPSR) & 1)) {
2479 WRITE_HREG(HCODEC_CCPU_INTR_MSK, 1);
2480 spin_unlock_irqrestore(&lock, flags);
2481 return;
2482 }
2483
2484 if (!hcodec_on)
2485 vdec_poweron(VDEC_HCODEC);
2486
2487 amhcodec_loadmc(fix_mc);
2488
2489 amhcodec_start();
2490
2491 udelay(1000);
2492
2493 amhcodec_stop();
2494
2495 if (!hcodec_on)
2496 vdec_poweroff(VDEC_HCODEC);
2497
2498 spin_unlock_irqrestore(&lock, flags);
2499}
2500
2501bool amvenc_avc_on(void)
2502{
2503 bool hcodec_on;
2504 ulong flags;
2505
2506 spin_lock_irqsave(&lock, flags);
2507
2508 hcodec_on = vdec_on(VDEC_HCODEC);
2509 hcodec_on &= (encode_manager.wq_count > 0);
2510
2511 spin_unlock_irqrestore(&lock, flags);
2512 return hcodec_on;
2513}
2514
2515static s32 avc_poweron(u32 clock)
2516{
2517 ulong flags;
2518 u32 data32;
2519
2520 data32 = 0;
2521
2522 amports_switch_gate("vdec", 1);
2523
2524 spin_lock_irqsave(&lock, flags);
2525
2526 WRITE_AOREG(AO_RTI_PWR_CNTL_REG0,
2527 (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18)));
2528 udelay(10);
2529 /* Powerup HCODEC */
2530 /* [1:0] HCODEC */
2531 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2532 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2533 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2534 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2535 ? ~0x1 : ~0x3));
2536
2537 udelay(10);
2538
2539 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2540 WRITE_VREG(DOS_SW_RESET1, 0);
2541
2542 /* Enable Dos internal clock gating */
2543 hvdec_clock_enable(clock);
2544
2545 /* Powerup HCODEC memories */
2546 WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0);
2547
2548 /* Remove HCODEC ISO */
2549 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2550 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2551 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2552 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2553 ? ~0x1 : ~0x30));
2554
2555 udelay(10);
2556 /* Disable auto-clock gate */
2557 WRITE_VREG(DOS_GEN_CTRL0,
2558 (READ_VREG(DOS_GEN_CTRL0) | 0x1));
2559 WRITE_VREG(DOS_GEN_CTRL0,
2560 (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE));
2561
2562 spin_unlock_irqrestore(&lock, flags);
2563
2564 mdelay(10);
2565 return 0;
2566}
2567
2568static s32 avc_poweroff(void)
2569{
2570 ulong flags;
2571
2572 spin_lock_irqsave(&lock, flags);
2573
2574 /* enable HCODEC isolation */
2575 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2576 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2577 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2578 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2579 ? 0x1 : 0x30));
2580
2581 /* power off HCODEC memories */
2582 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2583
2584 /* disable HCODEC clock */
2585 hvdec_clock_disable();
2586
2587 /* HCODEC power off */
2588 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2589 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2590 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2591 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2592 ? 0x1 : 0x3));
2593
2594 spin_unlock_irqrestore(&lock, flags);
2595
2596 /* release DOS clk81 clock gating */
2597 amports_switch_gate("vdec", 0);
2598 return 0;
2599}
2600
2601static s32 reload_mc(struct encode_wq_s *wq)
2602{
2603 const char *p = select_ucode(encode_manager.ucode_index);
2604
2605 amvenc_stop();
2606
2607 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2608 WRITE_VREG(DOS_SW_RESET1, 0);
2609
2610 udelay(10);
2611
2612 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2613 enc_pr(LOG_INFO, "reload microcode\n");
2614
2615 if (amvenc_loadmc(p, wq) < 0)
2616 return -EBUSY;
2617 return 0;
2618}
2619
2620static void encode_isr_tasklet(ulong data)
2621{
2622 struct encode_manager_s *manager = (struct encode_manager_s *)data;
2623
2624 enc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status);
2625 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2626 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2627 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2628 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2629 && (manager->process_irq)) {
2630 wake_up_interruptible(&manager->event.hw_complete);
2631 }
2632}
2633
2634/* irq function */
2635static irqreturn_t enc_isr(s32 irq_number, void *para)
2636{
2637 struct encode_manager_s *manager = (struct encode_manager_s *)para;
2638
2639 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2640
2641 manager->encode_hw_status = READ_HREG(ENCODER_STATUS);
2642 if ((manager->encode_hw_status == ENCODER_IDR_DONE)
2643 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2644 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2645 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
2646 enc_pr(LOG_ALL, "encoder stage is %d\n",
2647 manager->encode_hw_status);
2648 }
2649
2650 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2651 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2652 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2653 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2654 && (!manager->process_irq)) {
2655 manager->process_irq = true;
2656 if (manager->encode_hw_status != ENCODER_SEQUENCE_DONE)
2657 manager->need_reset = true;
2658 tasklet_schedule(&manager->encode_tasklet);
2659 }
2660 return IRQ_HANDLED;
2661}
2662
2663static s32 convert_request(struct encode_wq_s *wq, u32 *cmd_info)
2664{
2665 int i = 0;
2666 u8 *ptr;
2667 u32 data_offset;
2668 u32 cmd = cmd_info[0];
2669 unsigned long paddr = 0;
2670 struct enc_dma_cfg *cfg = NULL;
2671 s32 ret = 0;
2672 struct platform_device *pdev;
2673
2674 if (!wq)
2675 return -1;
2676 memset(&wq->request, 0, sizeof(struct encode_request_s));
2677 wq->request.me_weight = ME_WEIGHT_OFFSET;
2678 wq->request.i4_weight = I4MB_WEIGHT_OFFSET;
2679 wq->request.i16_weight = I16MB_WEIGHT_OFFSET;
2680
2681 if (cmd == ENCODER_SEQUENCE) {
2682 wq->request.cmd = cmd;
2683 wq->request.ucode_mode = cmd_info[1];
2684 wq->request.quant = cmd_info[2];
2685 wq->request.flush_flag = cmd_info[3];
2686 wq->request.timeout = cmd_info[4];
2687 wq->request.timeout = 5000; /* 5000 ms */
2688 } else if ((cmd == ENCODER_IDR) || (cmd == ENCODER_NON_IDR)) {
2689 wq->request.cmd = cmd;
2690 wq->request.ucode_mode = cmd_info[1];
2691 wq->request.type = cmd_info[2];
2692 wq->request.fmt = cmd_info[3];
2693 wq->request.src = cmd_info[4];
2694 wq->request.framesize = cmd_info[5];
2695 wq->request.quant = cmd_info[6];
2696 wq->request.flush_flag = cmd_info[7];
2697 wq->request.timeout = cmd_info[8];
2698 wq->request.crop_top = cmd_info[9];
2699 wq->request.crop_bottom = cmd_info[10];
2700 wq->request.crop_left = cmd_info[11];
2701 wq->request.crop_right = cmd_info[12];
2702 wq->request.src_w = cmd_info[13];
2703 wq->request.src_h = cmd_info[14];
2704 wq->request.scale_enable = cmd_info[15];
2705
2706 enc_pr(LOG_INFO, "hwenc: wq->pic.encoder_width %d, ",
2707 wq->pic.encoder_width);
2708 enc_pr(LOG_INFO, "wq->pic.encoder_height:%d, request fmt=%d\n",
2709 wq->pic.encoder_height, wq->request.fmt);
2710
2711 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720 && wq->request.fmt == FMT_RGBA8888) {
2712 wq->request.scale_enable = 1;
2713 wq->request.src_w = wq->pic.encoder_width;
2714 wq->request.src_h = wq->pic.encoder_height;
2715 pr_err("hwenc: force wq->request.scale_enable=%d\n", wq->request.scale_enable);
2716 }
2717
2718 wq->request.nr_mode =
2719 (nr_mode > 0) ? nr_mode : cmd_info[16];
2720 if (cmd == ENCODER_IDR)
2721 wq->request.nr_mode = 0;
2722
2723 data_offset = 17 +
2724 (sizeof(wq->quant_tbl_i4)
2725 + sizeof(wq->quant_tbl_i16)
2726 + sizeof(wq->quant_tbl_me)) / 4;
2727
2728 if (wq->request.quant == ADJUSTED_QP_FLAG) {
2729 ptr = (u8 *) &cmd_info[17];
2730 memcpy(wq->quant_tbl_i4, ptr,
2731 sizeof(wq->quant_tbl_i4));
2732 ptr += sizeof(wq->quant_tbl_i4);
2733 memcpy(wq->quant_tbl_i16, ptr,
2734 sizeof(wq->quant_tbl_i16));
2735 ptr += sizeof(wq->quant_tbl_i16);
2736 memcpy(wq->quant_tbl_me, ptr,
2737 sizeof(wq->quant_tbl_me));
2738 wq->request.i4_weight -=
2739 cmd_info[data_offset++];
2740 wq->request.i16_weight -=
2741 cmd_info[data_offset++];
2742 wq->request.me_weight -=
2743 cmd_info[data_offset++];
2744 if (qp_table_debug) {
2745 u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]);
2746
2747 for (i = 0; i < 32; i++) {
2748 enc_pr(LOG_INFO, "%d ", *qp_tb);
2749 qp_tb++;
2750 }
2751 enc_pr(LOG_INFO, "\n");
2752
2753 qp_tb = (u8 *)(&wq->quant_tbl_i16[0]);
2754 for (i = 0; i < 32; i++) {
2755 enc_pr(LOG_INFO, "%d ", *qp_tb);
2756 qp_tb++;
2757 }
2758 enc_pr(LOG_INFO, "\n");
2759
2760 qp_tb = (u8 *)(&wq->quant_tbl_me[0]);
2761 for (i = 0; i < 32; i++) {
2762 enc_pr(LOG_INFO, "%d ", *qp_tb);
2763 qp_tb++;
2764 }
2765 enc_pr(LOG_INFO, "\n");
2766 }
2767 } else {
2768 memset(wq->quant_tbl_me, wq->request.quant,
2769 sizeof(wq->quant_tbl_me));
2770 memset(wq->quant_tbl_i4, wq->request.quant,
2771 sizeof(wq->quant_tbl_i4));
2772 memset(wq->quant_tbl_i16, wq->request.quant,
2773 sizeof(wq->quant_tbl_i16));
2774 data_offset += 3;
2775 }
2776#ifdef H264_ENC_CBR
2777 wq->cbr_info.block_w = cmd_info[data_offset++];
2778 wq->cbr_info.block_h = cmd_info[data_offset++];
2779 wq->cbr_info.long_th = cmd_info[data_offset++];
2780 wq->cbr_info.start_tbl_id = cmd_info[data_offset++];
2781 wq->cbr_info.short_shift = CBR_SHORT_SHIFT;
2782 wq->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
2783#endif
2784 data_offset = 17 +
2785 (sizeof(wq->quant_tbl_i4)
2786 + sizeof(wq->quant_tbl_i16)
2787 + sizeof(wq->quant_tbl_me)) / 4 + 7;
2788
2789 if (wq->request.type == DMA_BUFF) {
2790 wq->request.plane_num = cmd_info[data_offset++];
2791 enc_pr(LOG_INFO, "wq->request.plane_num %d\n",
2792 wq->request.plane_num);
2793 if (wq->request.fmt == FMT_NV12 ||
2794 wq->request.fmt == FMT_NV21 ||
2795 wq->request.fmt == FMT_YUV420) {
2796 for (i = 0; i < wq->request.plane_num; i++) {
2797 cfg = &wq->request.dma_cfg[i];
2798 cfg->dir = DMA_TO_DEVICE;
2799 cfg->fd = cmd_info[data_offset++];
2800 pdev = encode_manager.this_pdev;
2801 cfg->dev = &(pdev->dev);
2802
2803 ret = enc_dma_buf_get_phys(cfg, &paddr);
2804 if (ret < 0) {
2805 enc_pr(LOG_ERROR,
2806 "import fd %d failed\n",
2807 cfg->fd);
2808 cfg->paddr = NULL;
2809 cfg->vaddr = NULL;
2810 return -1;
2811 }
2812 cfg->paddr = (void *)paddr;
2813 enc_pr(LOG_INFO, "vaddr %p\n",
2814 cfg->vaddr);
2815 }
2816 } else {
2817 enc_pr(LOG_ERROR, "error fmt = %d\n",
2818 wq->request.fmt);
2819 }
2820 }
2821
2822 } else {
2823 enc_pr(LOG_ERROR, "error cmd = %d, wq: %p.\n",
2824 cmd, (void *)wq);
2825 return -1;
2826 }
2827 wq->request.parent = wq;
2828 return 0;
2829}
2830
2831void amvenc_avc_start_cmd(struct encode_wq_s *wq,
2832 struct encode_request_s *request)
2833{
2834 u32 reload_flag = 0;
2835
2836 if (request->ucode_mode != encode_manager.ucode_index) {
2837 encode_manager.ucode_index = request->ucode_mode;
2838 if (reload_mc(wq)) {
2839 enc_pr(LOG_ERROR,
2840 "reload mc fail, wq:%p\n", (void *)wq);
2841 return;
2842 }
2843 reload_flag = 1;
2844 encode_manager.need_reset = true;
2845 }
2846
2847 wq->hw_status = 0;
2848 wq->output_size = 0;
2849 wq->ucode_index = encode_manager.ucode_index;
2850
2851 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
2852 if (encode_manager.need_reset) {
2853 amvenc_stop();
2854 reload_flag = 1;
2855 encode_manager.need_reset = false;
2856 encode_manager.encode_hw_status = ENCODER_IDLE;
2857 amvenc_reset();
2858 avc_canvas_init(wq);
2859 avc_init_encoder(wq,
2860 (request->cmd == ENCODER_IDR) ? true : false);
2861 avc_init_input_buffer(wq);
2862 avc_init_output_buffer(wq);
2863 avc_prot_init(wq, request, request->quant,
2864 (request->cmd == ENCODER_IDR) ? true : false);
2865 avc_init_assit_buffer(wq);
2866 enc_pr(LOG_INFO,
2867 "begin to new frame, request->cmd: %d, ucode mode: %d, wq:%p\n",
2868 request->cmd, request->ucode_mode, (void *)wq);
2869 }
2870 if ((request->cmd == ENCODER_IDR) ||
2871 (request->cmd == ENCODER_NON_IDR)) {
2872#ifdef H264_ENC_SVC
2873 /* encode non reference frame or not */
2874 if (request->cmd == ENCODER_IDR)
2875 wq->pic.non_ref_cnt = 0; //IDR reset counter
2876 if (wq->pic.enable_svc && wq->pic.non_ref_cnt) {
2877 enc_pr(LOG_INFO,
2878 "PIC is NON REF cmd %d cnt %d value 0x%x\n",
2879 request->cmd, wq->pic.non_ref_cnt,
2880 ENC_SLC_NON_REF);
2881 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_NON_REF);
2882 } else {
2883 enc_pr(LOG_INFO,
2884 "PIC is REF cmd %d cnt %d val 0x%x\n",
2885 request->cmd, wq->pic.non_ref_cnt,
2886 ENC_SLC_REF);
2887 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2888 }
2889#else
2890 /* if FW defined but not defined SVC in driver here*/
2891 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2892#endif
2893 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
2894 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
2895 }
2896 if ((request->cmd == ENCODER_IDR) ||
2897 (request->cmd == ENCODER_NON_IDR))
2898 set_input_format(wq, request);
2899
2900 if (request->cmd == ENCODER_IDR)
2901 ie_me_mb_type = HENC_MB_Type_I4MB;
2902 else if (request->cmd == ENCODER_NON_IDR)
2903 ie_me_mb_type =
2904 (HENC_SKIP_RUN_AUTO << 16) |
2905 (HENC_MB_Type_AUTO << 4) |
2906 (HENC_MB_Type_AUTO << 0);
2907 else
2908 ie_me_mb_type = 0;
2909 avc_init_ie_me_parameter(wq, request->quant);
2910
2911#ifdef MULTI_SLICE_MC
2912 if (fixed_slice_cfg)
2913 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
2914 else if (wq->pic.rows_per_slice !=
2915 (wq->pic.encoder_height + 15) >> 4) {
2916 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
2917
2918 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
2919 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
2920 } else
2921 WRITE_HREG(FIXED_SLICE_CFG, 0);
2922#else
2923 WRITE_HREG(FIXED_SLICE_CFG, 0);
2924#endif
2925
2926 encode_manager.encode_hw_status = request->cmd;
2927 wq->hw_status = request->cmd;
2928 WRITE_HREG(ENCODER_STATUS, request->cmd);
2929 if ((request->cmd == ENCODER_IDR)
2930 || (request->cmd == ENCODER_NON_IDR)
2931 || (request->cmd == ENCODER_SEQUENCE)
2932 || (request->cmd == ENCODER_PICTURE))
2933 encode_manager.process_irq = false;
2934
2935 if (reload_flag)
2936 amvenc_start();
2937 enc_pr(LOG_ALL, "amvenc_avc_start cmd out, request:%p.\n", (void*)request);
2938}
2939
2940static void dma_flush(u32 buf_start, u32 buf_size)
2941{
2942 if ((buf_start == 0) || (buf_size == 0))
2943 return;
2944 dma_sync_single_for_device(
2945 &encode_manager.this_pdev->dev, buf_start,
2946 buf_size, DMA_TO_DEVICE);
2947}
2948
2949static void cache_flush(u32 buf_start, u32 buf_size)
2950{
2951 if ((buf_start == 0) || (buf_size == 0))
2952 return;
2953 dma_sync_single_for_cpu(
2954 &encode_manager.this_pdev->dev, buf_start,
2955 buf_size, DMA_FROM_DEVICE);
2956}
2957
2958static u32 getbuffer(struct encode_wq_s *wq, u32 type)
2959{
2960 u32 ret = 0;
2961
2962 switch (type) {
2963 case ENCODER_BUFFER_INPUT:
2964 ret = wq->mem.dct_buff_start_addr;
2965 break;
2966 case ENCODER_BUFFER_REF0:
2967 ret = wq->mem.dct_buff_start_addr +
2968 wq->mem.bufspec.dec0_y.buf_start;
2969 break;
2970 case ENCODER_BUFFER_REF1:
2971 ret = wq->mem.dct_buff_start_addr +
2972 wq->mem.bufspec.dec1_y.buf_start;
2973 break;
2974 case ENCODER_BUFFER_OUTPUT:
2975 ret = wq->mem.BitstreamStart;
2976 break;
2977 case ENCODER_BUFFER_DUMP:
2978 ret = wq->mem.dump_info_ddr_start_addr;
2979 break;
2980 case ENCODER_BUFFER_CBR:
2981 ret = wq->mem.cbr_info_ddr_start_addr;
2982 break;
2983 default:
2984 break;
2985 }
2986 return ret;
2987}
2988
2989s32 amvenc_avc_start(struct encode_wq_s *wq, u32 clock)
2990{
2991 const char *p = select_ucode(encode_manager.ucode_index);
2992
2993 avc_poweron(clock);
2994 avc_canvas_init(wq);
2995
2996 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2997
2998 if (amvenc_loadmc(p, wq) < 0)
2999 return -EBUSY;
3000
3001 encode_manager.need_reset = true;
3002 encode_manager.process_irq = false;
3003 encode_manager.encode_hw_status = ENCODER_IDLE;
3004 amvenc_reset();
3005 avc_init_encoder(wq, true);
3006 avc_init_input_buffer(wq); /* dct buffer setting */
3007 avc_init_output_buffer(wq); /* output stream buffer */
3008
3009 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
3010 avc_prot_init(wq, NULL, wq->pic.init_qppicture, true);
3011 if (request_irq(encode_manager.irq_num, enc_isr, IRQF_SHARED,
3012 "enc-irq", (void *)&encode_manager) == 0)
3013 encode_manager.irq_requested = true;
3014 else
3015 encode_manager.irq_requested = false;
3016
3017 /* decoder buffer , need set before each frame start */
3018 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
3019 /* reference buffer , need set before each frame start */
3020 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
3021 avc_init_assit_buffer(wq); /* assitant buffer for microcode */
3022 ie_me_mb_type = 0;
3023 avc_init_ie_me_parameter(wq, wq->pic.init_qppicture);
3024 WRITE_HREG(ENCODER_STATUS, ENCODER_IDLE);
3025
3026#ifdef MULTI_SLICE_MC
3027 if (fixed_slice_cfg)
3028 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
3029 else if (wq->pic.rows_per_slice !=
3030 (wq->pic.encoder_height + 15) >> 4) {
3031 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
3032
3033 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
3034 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
3035 } else
3036 WRITE_HREG(FIXED_SLICE_CFG, 0);
3037#else
3038 WRITE_HREG(FIXED_SLICE_CFG, 0);
3039#endif
3040 amvenc_start();
3041 return 0;
3042}
3043
3044void amvenc_avc_stop(void)
3045{
3046 if ((encode_manager.irq_num >= 0) &&
3047 (encode_manager.irq_requested == true)) {
3048 free_irq(encode_manager.irq_num, &encode_manager);
3049 encode_manager.irq_requested = false;
3050 }
3051 amvenc_stop();
3052 avc_poweroff();
3053}
3054
3055static s32 avc_init(struct encode_wq_s *wq)
3056{
3057 s32 r = 0;
3058
3059 encode_manager.ucode_index = wq->ucode_index;
3060 r = amvenc_avc_start(wq, clock_level);
3061
3062 enc_pr(LOG_DEBUG,
3063 "init avc encode. microcode %d, ret=%d, wq:%p.\n",
3064 encode_manager.ucode_index, r, (void *)wq);
3065 return 0;
3066}
3067
3068static s32 amvenc_avc_light_reset(struct encode_wq_s *wq, u32 value)
3069{
3070 s32 r = 0;
3071
3072 amvenc_avc_stop();
3073
3074 mdelay(value);
3075
3076 encode_manager.ucode_index = UCODE_MODE_FULL;
3077 r = amvenc_avc_start(wq, clock_level);
3078
3079 enc_pr(LOG_DEBUG,
3080 "amvenc_avc_light_reset finish, wq:%p. ret=%d\n",
3081 (void *)wq, r);
3082 return r;
3083}
3084
3085#ifdef CONFIG_CMA
3086static u32 checkCMA(void)
3087{
3088 u32 ret;
3089
3090 if (encode_manager.cma_pool_size > 0) {
3091 ret = encode_manager.cma_pool_size;
3092 ret = ret / MIN_SIZE;
3093 } else
3094 ret = 0;
3095 return ret;
3096}
3097#endif
3098
3099/* file operation */
3100static s32 amvenc_avc_open(struct inode *inode, struct file *file)
3101{
3102 s32 r = 0;
3103 struct encode_wq_s *wq = NULL;
3104
3105 file->private_data = NULL;
3106 enc_pr(LOG_DEBUG, "avc open\n");
3107#ifdef CONFIG_AM_JPEG_ENCODER
3108 if (jpegenc_on() == true) {
3109 enc_pr(LOG_ERROR,
3110 "hcodec in use for JPEG Encode now.\n");
3111 return -EBUSY;
3112 }
3113#endif
3114
3115#ifdef CONFIG_CMA
3116 if ((encode_manager.use_reserve == false) &&
3117 (encode_manager.check_cma == false)) {
3118 encode_manager.max_instance = checkCMA();
3119 if (encode_manager.max_instance > 0) {
3120 enc_pr(LOG_DEBUG,
3121 "amvenc_avc check CMA pool success, max instance: %d.\n",
3122 encode_manager.max_instance);
3123 } else {
3124 enc_pr(LOG_ERROR,
3125 "amvenc_avc CMA pool too small.\n");
3126 }
3127 encode_manager.check_cma = true;
3128 }
3129#endif
3130
3131 wq = create_encode_work_queue();
3132 if (wq == NULL) {
3133 enc_pr(LOG_ERROR, "amvenc_avc create instance fail.\n");
3134 return -EBUSY;
3135 }
3136
3137#ifdef CONFIG_CMA
3138 if (encode_manager.use_reserve == false) {
3139 wq->mem.buf_start = codec_mm_alloc_for_dma(ENCODE_NAME,
3140 MIN_SIZE >> PAGE_SHIFT, 0,
3141 CODEC_MM_FLAGS_CPU);
3142 if (wq->mem.buf_start) {
3143 wq->mem.buf_size = MIN_SIZE;
3144 enc_pr(LOG_DEBUG,
3145 "allocating phys 0x%x, size %dk, wq:%p.\n",
3146 wq->mem.buf_start,
3147 wq->mem.buf_size >> 10, (void *)wq);
3148 } else {
3149 enc_pr(LOG_ERROR,
3150 "CMA failed to allocate dma buffer for %s, wq:%p.\n",
3151 encode_manager.this_pdev->name,
3152 (void *)wq);
3153 destroy_encode_work_queue(wq);
3154 return -ENOMEM;
3155 }
3156 }
3157#endif
3158
3159 if (wq->mem.buf_start == 0 ||
3160 wq->mem.buf_size < MIN_SIZE) {
3161 enc_pr(LOG_ERROR,
3162 "alloc mem failed, start: 0x%x, size:0x%x, wq:%p.\n",
3163 wq->mem.buf_start,
3164 wq->mem.buf_size, (void *)wq);
3165 destroy_encode_work_queue(wq);
3166 return -ENOMEM;
3167 }
3168
3169 memcpy(&wq->mem.bufspec, &amvenc_buffspec[0],
3170 sizeof(struct BuffInfo_s));
3171
3172 enc_pr(LOG_DEBUG,
3173 "amvenc_avc memory config success, buff start:0x%x, size is 0x%x, wq:%p.\n",
3174 wq->mem.buf_start, wq->mem.buf_size, (void *)wq);
3175
3176 file->private_data = (void *) wq;
3177 return r;
3178}
3179
3180static s32 amvenc_avc_release(struct inode *inode, struct file *file)
3181{
3182 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3183
3184 if (wq) {
3185 enc_pr(LOG_DEBUG, "avc release, wq:%p\n", (void *)wq);
3186 destroy_encode_work_queue(wq);
3187 }
3188 return 0;
3189}
3190
3191static long amvenc_avc_ioctl(struct file *file, u32 cmd, ulong arg)
3192{
3193 long r = 0;
3194 u32 amrisc_cmd = 0;
3195 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3196#define MAX_ADDR_INFO_SIZE 52
3197 u32 addr_info[MAX_ADDR_INFO_SIZE + 4];
3198 ulong argV;
3199 u32 buf_start;
3200 s32 canvas = -1;
3201 struct canvas_s dst;
3202
3203 switch (cmd) {
3204 case AMVENC_AVC_IOC_GET_ADDR:
3205 if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET))
3206 put_user(1, (u32 *)arg);
3207 else
3208 put_user(2, (u32 *)arg);
3209 break;
3210 case AMVENC_AVC_IOC_INPUT_UPDATE:
3211 break;
3212 case AMVENC_AVC_IOC_NEW_CMD:
3213 if (copy_from_user(addr_info, (void *)arg,
3214 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3215 enc_pr(LOG_ERROR,
3216 "avc get new cmd error, wq:%p.\n", (void *)wq);
3217 return -1;
3218 }
3219 r = convert_request(wq, addr_info);
3220 if (r == 0)
3221 r = encode_wq_add_request(wq);
3222 if (r) {
3223 enc_pr(LOG_ERROR,
3224 "avc add new request error, wq:%p.\n",
3225 (void *)wq);
3226 }
3227 break;
3228 case AMVENC_AVC_IOC_GET_STAGE:
3229 put_user(wq->hw_status, (u32 *)arg);
3230 break;
3231 case AMVENC_AVC_IOC_GET_OUTPUT_SIZE:
3232 addr_info[0] = wq->output_size;
3233 addr_info[1] = wq->me_weight;
3234 addr_info[2] = wq->i4_weight;
3235 addr_info[3] = wq->i16_weight;
3236 r = copy_to_user((u32 *)arg,
3237 addr_info, 4 * sizeof(u32));
3238 break;
3239 case AMVENC_AVC_IOC_CONFIG_INIT:
3240 if (copy_from_user(addr_info, (void *)arg,
3241 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3242 enc_pr(LOG_ERROR,
3243 "avc config init error, wq:%p.\n", (void *)wq);
3244 return -1;
3245 }
3246 wq->ucode_index = UCODE_MODE_FULL;
3247#ifdef MULTI_SLICE_MC
3248 wq->pic.rows_per_slice = addr_info[1];
3249 enc_pr(LOG_DEBUG,
3250 "avc init -- rows_per_slice: %d, wq: %p.\n",
3251 wq->pic.rows_per_slice, (void *)wq);
3252#endif
3253 enc_pr(LOG_DEBUG,
3254 "avc init as mode %d, wq: %p.\n",
3255 wq->ucode_index, (void *)wq);
3256
3257 if (addr_info[2] > wq->mem.bufspec.max_width ||
3258 addr_info[3] > wq->mem.bufspec.max_height) {
3259 enc_pr(LOG_ERROR,
3260 "avc config init- encode size %dx%d is larger than supported (%dx%d). wq:%p.\n",
3261 addr_info[2], addr_info[3],
3262 wq->mem.bufspec.max_width,
3263 wq->mem.bufspec.max_height, (void *)wq);
3264 return -1;
3265 }
3266 pr_err("hwenc: AMVENC_AVC_IOC_CONFIG_INIT: w:%d, h:%d\n", wq->pic.encoder_width, wq->pic.encoder_height);
3267 wq->pic.encoder_width = addr_info[2];
3268 wq->pic.encoder_height = addr_info[3];
3269 if (wq->pic.encoder_width *
3270 wq->pic.encoder_height >= 1280 * 720)
3271 clock_level = 6;
3272 else
3273 clock_level = 5;
3274 avc_buffspec_init(wq);
3275 complete(&encode_manager.event.request_in_com);
3276 addr_info[1] = wq->mem.bufspec.dct.buf_start;
3277 addr_info[2] = wq->mem.bufspec.dct.buf_size;
3278 addr_info[3] = wq->mem.bufspec.bitstream.buf_start;
3279 addr_info[4] = wq->mem.bufspec.bitstream.buf_size;
3280 addr_info[5] = wq->mem.bufspec.scale_buff.buf_start;
3281 addr_info[6] = wq->mem.bufspec.scale_buff.buf_size;
3282 addr_info[7] = wq->mem.bufspec.dump_info.buf_start;
3283 addr_info[8] = wq->mem.bufspec.dump_info.buf_size;
3284 addr_info[9] = wq->mem.bufspec.cbr_info.buf_start;
3285 addr_info[10] = wq->mem.bufspec.cbr_info.buf_size;
3286 r = copy_to_user((u32 *)arg, addr_info, 11*sizeof(u32));
3287 break;
3288 case AMVENC_AVC_IOC_FLUSH_CACHE:
3289 if (copy_from_user(addr_info, (void *)arg,
3290 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3291 enc_pr(LOG_ERROR,
3292 "avc flush cache error, wq: %p.\n", (void *)wq);
3293 return -1;
3294 }
3295 buf_start = getbuffer(wq, addr_info[0]);
3296 dma_flush(buf_start + addr_info[1],
3297 addr_info[2] - addr_info[1]);
3298 break;
3299 case AMVENC_AVC_IOC_FLUSH_DMA:
3300 if (copy_from_user(addr_info, (void *)arg,
3301 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3302 enc_pr(LOG_ERROR,
3303 "avc flush dma error, wq:%p.\n", (void *)wq);
3304 return -1;
3305 }
3306 buf_start = getbuffer(wq, addr_info[0]);
3307 cache_flush(buf_start + addr_info[1],
3308 addr_info[2] - addr_info[1]);
3309 break;
3310 case AMVENC_AVC_IOC_GET_BUFFINFO:
3311 put_user(wq->mem.buf_size, (u32 *)arg);
3312 break;
3313 case AMVENC_AVC_IOC_GET_DEVINFO:
3314 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
3315 /* send the same id as GXTVBB to upper*/
3316 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3317 strlen(AMVENC_DEVINFO_GXTVBB));
3318 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) {
3319 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3320 strlen(AMVENC_DEVINFO_GXTVBB));
3321 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) {
3322 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXBB,
3323 strlen(AMVENC_DEVINFO_GXBB));
3324 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_MG9TV) {
3325 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_G9,
3326 strlen(AMVENC_DEVINFO_G9));
3327 } else {
3328 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_M8,
3329 strlen(AMVENC_DEVINFO_M8));
3330 }
3331 break;
3332 case AMVENC_AVC_IOC_SUBMIT:
3333 get_user(amrisc_cmd, ((u32 *)arg));
3334 if (amrisc_cmd == ENCODER_IDR) {
3335 wq->pic.idr_pic_id++;
3336 if (wq->pic.idr_pic_id > 65535)
3337 wq->pic.idr_pic_id = 0;
3338 wq->pic.pic_order_cnt_lsb = 2;
3339 wq->pic.frame_number = 1;
3340 } else if (amrisc_cmd == ENCODER_NON_IDR) {
3341#ifdef H264_ENC_SVC
3342 /* only update when there is reference frame */
3343 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3344 wq->pic.frame_number++;
3345 enc_pr(LOG_INFO, "Increase frame_num to %d\n",
3346 wq->pic.frame_number);
3347 }
3348#else
3349 wq->pic.frame_number++;
3350#endif
3351
3352 wq->pic.pic_order_cnt_lsb += 2;
3353 if (wq->pic.frame_number > 65535)
3354 wq->pic.frame_number = 0;
3355 }
3356#ifdef H264_ENC_SVC
3357 /* only update when there is reference frame */
3358 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3359 amrisc_cmd = wq->mem.dblk_buf_canvas;
3360 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3361 /* current dblk buffer as next reference buffer */
3362 wq->mem.ref_buf_canvas = amrisc_cmd;
3363 enc_pr(LOG_INFO,
3364 "switch buffer enable %d cnt %d\n",
3365 wq->pic.enable_svc, wq->pic.non_ref_cnt);
3366 }
3367 if (wq->pic.enable_svc) {
3368 wq->pic.non_ref_cnt ++;
3369 if (wq->pic.non_ref_cnt > wq->pic.non_ref_limit) {
3370 enc_pr(LOG_INFO, "Svc clear cnt %d conf %d\n",
3371 wq->pic.non_ref_cnt,
3372 wq->pic.non_ref_limit);
3373 wq->pic.non_ref_cnt = 0;
3374 } else
3375 enc_pr(LOG_INFO,"Svc increase non ref counter to %d\n",
3376 wq->pic.non_ref_cnt );
3377 }
3378#else
3379 amrisc_cmd = wq->mem.dblk_buf_canvas;
3380 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3381 /* current dblk buffer as next reference buffer */
3382 wq->mem.ref_buf_canvas = amrisc_cmd;
3383#endif
3384 break;
3385 case AMVENC_AVC_IOC_READ_CANVAS:
3386 get_user(argV, ((u32 *)arg));
3387 canvas = argV;
3388 if (canvas & 0xff) {
3389 canvas_read(canvas & 0xff, &dst);
3390 addr_info[0] = dst.addr;
3391 if ((canvas & 0xff00) >> 8)
3392 canvas_read((canvas & 0xff00) >> 8, &dst);
3393 if ((canvas & 0xff0000) >> 16)
3394 canvas_read((canvas & 0xff0000) >> 16, &dst);
3395 addr_info[1] = dst.addr - addr_info[0] +
3396 dst.width * dst.height;
3397 } else {
3398 addr_info[0] = 0;
3399 addr_info[1] = 0;
3400 }
3401 dma_flush(dst.addr, dst.width * dst.height * 3 / 2);
3402 r = copy_to_user((u32 *)arg, addr_info, 2 * sizeof(u32));
3403 break;
3404 case AMVENC_AVC_IOC_MAX_INSTANCE:
3405 put_user(encode_manager.max_instance, (u32 *)arg);
3406 break;
3407 case AMVENC_AVC_IOC_QP_MODE:
3408 get_user(qp_mode, ((u32 *)arg));
3409 pr_info("qp_mode %d\n", qp_mode);
3410 break;
3411 default:
3412 r = -1;
3413 break;
3414 }
3415 return r;
3416}
3417
3418#ifdef CONFIG_COMPAT
3419static long amvenc_avc_compat_ioctl(struct file *filp,
3420 unsigned int cmd, unsigned long args)
3421{
3422 unsigned long ret;
3423
3424 args = (unsigned long)compat_ptr(args);
3425 ret = amvenc_avc_ioctl(filp, cmd, args);
3426 return ret;
3427}
3428#endif
3429
3430static s32 avc_mmap(struct file *filp, struct vm_area_struct *vma)
3431{
3432 struct encode_wq_s *wq = (struct encode_wq_s *)filp->private_data;
3433 ulong off = vma->vm_pgoff << PAGE_SHIFT;
3434 ulong vma_size = vma->vm_end - vma->vm_start;
3435
3436 if (vma_size == 0) {
3437 enc_pr(LOG_ERROR, "vma_size is 0, wq:%p.\n", (void *)wq);
3438 return -EAGAIN;
3439 }
3440 if (!off)
3441 off += wq->mem.buf_start;
3442 enc_pr(LOG_ALL,
3443 "vma_size is %ld , off is %ld, wq:%p.\n",
3444 vma_size, off, (void *)wq);
3445 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
3446 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
3447 if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
3448 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
3449 enc_pr(LOG_ERROR,
3450 "set_cached: failed remap_pfn_range, wq:%p.\n",
3451 (void *)wq);
3452 return -EAGAIN;
3453 }
3454 return 0;
3455}
3456
3457static u32 amvenc_avc_poll(struct file *file, poll_table *wait_table)
3458{
3459 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3460
3461 poll_wait(file, &wq->request_complete, wait_table);
3462
3463 if (atomic_read(&wq->request_ready)) {
3464 atomic_dec(&wq->request_ready);
3465 return POLLIN | POLLRDNORM;
3466 }
3467 return 0;
3468}
3469
3470static const struct file_operations amvenc_avc_fops = {
3471 .owner = THIS_MODULE,
3472 .open = amvenc_avc_open,
3473 .mmap = avc_mmap,
3474 .release = amvenc_avc_release,
3475 .unlocked_ioctl = amvenc_avc_ioctl,
3476#ifdef CONFIG_COMPAT
3477 .compat_ioctl = amvenc_avc_compat_ioctl,
3478#endif
3479 .poll = amvenc_avc_poll,
3480};
3481
3482/* work queue function */
3483static s32 encode_process_request(struct encode_manager_s *manager,
3484 struct encode_queue_item_s *pitem)
3485{
3486 s32 ret = 0;
3487 struct encode_wq_s *wq = pitem->request.parent;
3488 struct encode_request_s *request = &pitem->request;
3489 u32 timeout = (request->timeout == 0) ?
3490 1 : msecs_to_jiffies(request->timeout);
3491 u32 buf_start = 0;
3492 u32 size = 0;
3493 u32 flush_size = ((wq->pic.encoder_width + 31) >> 5 << 5) *
3494 ((wq->pic.encoder_height + 15) >> 4 << 4) * 3 / 2;
3495
3496 struct enc_dma_cfg *cfg = NULL;
3497 int i = 0;
3498
3499#ifdef H264_ENC_CBR
3500 if (request->cmd == ENCODER_IDR || request->cmd == ENCODER_NON_IDR) {
3501 if (request->flush_flag & AMVENC_FLUSH_FLAG_CBR
3502 && get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3503 void *vaddr = wq->mem.cbr_info_ddr_virt_addr;
3504 ConvertTable2Risc(vaddr, 0xa00);
3505 buf_start = getbuffer(wq, ENCODER_BUFFER_CBR);
3506 codec_mm_dma_flush(vaddr, wq->mem.cbr_info_ddr_size, DMA_TO_DEVICE);
3507 }
3508 }
3509#endif
3510
3511Again:
3512 amvenc_avc_start_cmd(wq, request);
3513
3514 if (no_timeout) {
3515 wait_event_interruptible(manager->event.hw_complete,
3516 (manager->encode_hw_status == ENCODER_IDR_DONE
3517 || manager->encode_hw_status == ENCODER_NON_IDR_DONE
3518 || manager->encode_hw_status == ENCODER_SEQUENCE_DONE
3519 || manager->encode_hw_status == ENCODER_PICTURE_DONE));
3520 } else {
3521 wait_event_interruptible_timeout(manager->event.hw_complete,
3522 ((manager->encode_hw_status == ENCODER_IDR_DONE)
3523 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
3524 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
3525 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)),
3526 timeout);
3527 }
3528
3529 if ((request->cmd == ENCODER_SEQUENCE) &&
3530 (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)) {
3531 wq->sps_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3532 wq->hw_status = manager->encode_hw_status;
3533 request->cmd = ENCODER_PICTURE;
3534 goto Again;
3535 } else if ((request->cmd == ENCODER_PICTURE) &&
3536 (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
3537 wq->pps_size =
3538 READ_HREG(HCODEC_VLC_TOTAL_BYTES) - wq->sps_size;
3539 wq->hw_status = manager->encode_hw_status;
3540 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3541 buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT);
3542 cache_flush(buf_start,
3543 wq->sps_size + wq->pps_size);
3544 }
3545 wq->output_size = (wq->sps_size << 16) | wq->pps_size;
3546 } else {
3547 wq->hw_status = manager->encode_hw_status;
3548 if ((manager->encode_hw_status == ENCODER_IDR_DONE) ||
3549 (manager->encode_hw_status == ENCODER_NON_IDR_DONE)) {
3550 wq->output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3551 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3552 buf_start = getbuffer(wq,
3553 ENCODER_BUFFER_OUTPUT);
3554 cache_flush(buf_start, wq->output_size);
3555 }
3556 if (request->flush_flag &
3557 AMVENC_FLUSH_FLAG_DUMP) {
3558 buf_start = getbuffer(wq,
3559 ENCODER_BUFFER_DUMP);
3560 size = wq->mem.dump_info_ddr_size;
3561 cache_flush(buf_start, size);
3562 //enc_pr(LOG_DEBUG, "CBR flush dump_info done");
3563 }
3564 if (request->flush_flag &
3565 AMVENC_FLUSH_FLAG_REFERENCE) {
3566 u32 ref_id = ENCODER_BUFFER_REF0;
3567
3568 if ((wq->mem.ref_buf_canvas & 0xff) ==
3569 (ENC_CANVAS_OFFSET))
3570 ref_id = ENCODER_BUFFER_REF0;
3571 else
3572 ref_id = ENCODER_BUFFER_REF1;
3573 buf_start = getbuffer(wq, ref_id);
3574 cache_flush(buf_start, flush_size);
3575 }
3576 } else {
3577 manager->encode_hw_status = ENCODER_ERROR;
3578 enc_pr(LOG_DEBUG, "avc encode light reset --- ");
3579 enc_pr(LOG_DEBUG,
3580 "frame type: %s, size: %dx%d, wq: %p\n",
3581 (request->cmd == ENCODER_IDR) ? "IDR" : "P",
3582 wq->pic.encoder_width,
3583 wq->pic.encoder_height, (void *)wq);
3584 enc_pr(LOG_DEBUG,
3585 "mb info: 0x%x, encode status: 0x%x, dct status: 0x%x ",
3586 READ_HREG(HCODEC_VLC_MB_INFO),
3587 READ_HREG(ENCODER_STATUS),
3588 READ_HREG(HCODEC_QDCT_STATUS_CTRL));
3589 enc_pr(LOG_DEBUG,
3590 "vlc status: 0x%x, me status: 0x%x, risc pc:0x%x, debug:0x%x\n",
3591 READ_HREG(HCODEC_VLC_STATUS_CTRL),
3592 READ_HREG(HCODEC_ME_STATUS),
3593 READ_HREG(HCODEC_MPC_E),
3594 READ_HREG(DEBUG_REG));
3595 amvenc_avc_light_reset(wq, 30);
3596 }
3597 for (i = 0; i < request->plane_num; i++) {
3598 cfg = &request->dma_cfg[i];
3599 enc_pr(LOG_INFO, "request vaddr %p, paddr %p\n",
3600 cfg->vaddr, cfg->paddr);
3601 if (cfg->fd >= 0 && cfg->vaddr != NULL)
3602 enc_dma_buf_unmap(cfg);
3603 }
3604 }
3605 atomic_inc(&wq->request_ready);
3606 wake_up_interruptible(&wq->request_complete);
3607 return ret;
3608}
3609
3610s32 encode_wq_add_request(struct encode_wq_s *wq)
3611{
3612 struct encode_queue_item_s *pitem = NULL;
3613 struct list_head *head = NULL;
3614 struct encode_wq_s *tmp = NULL;
3615 bool find = false;
3616
3617 spin_lock(&encode_manager.event.sem_lock);
3618
3619 head = &encode_manager.wq;
3620 list_for_each_entry(tmp, head, list) {
3621 if ((wq == tmp) && (wq != NULL)) {
3622 find = true;
3623 break;
3624 }
3625 }
3626
3627 if (find == false) {
3628 enc_pr(LOG_ERROR, "current wq (%p) doesn't register.\n",
3629 (void *)wq);
3630 goto error;
3631 }
3632
3633 if (list_empty(&encode_manager.free_queue)) {
3634 enc_pr(LOG_ERROR, "work queue no space, wq:%p.\n",
3635 (void *)wq);
3636 goto error;
3637 }
3638
3639 pitem = list_entry(encode_manager.free_queue.next,
3640 struct encode_queue_item_s, list);
3641 if (IS_ERR(pitem))
3642 goto error;
3643
3644 memcpy(&pitem->request, &wq->request, sizeof(struct encode_request_s));
3645
3646 enc_pr(LOG_INFO, "new work request %p, vaddr %p, paddr %p\n", &pitem->request,
3647 pitem->request.dma_cfg[0].vaddr,pitem->request.dma_cfg[0].paddr);
3648
3649 memset(&wq->request, 0, sizeof(struct encode_request_s));
3650 wq->request.dma_cfg[0].fd = -1;
3651 wq->request.dma_cfg[1].fd = -1;
3652 wq->request.dma_cfg[2].fd = -1;
3653 wq->hw_status = 0;
3654 wq->output_size = 0;
3655 pitem->request.parent = wq;
3656 list_move_tail(&pitem->list, &encode_manager.process_queue);
3657 spin_unlock(&encode_manager.event.sem_lock);
3658
3659 enc_pr(LOG_INFO,
3660 "add new work ok, cmd:%d, ucode mode: %d, wq:%p.\n",
3661 pitem->request.cmd, pitem->request.ucode_mode,
3662 (void *)wq);
3663 complete(&encode_manager.event.request_in_com);/* new cmd come in */
3664 return 0;
3665error:
3666 spin_unlock(&encode_manager.event.sem_lock);
3667 return -1;
3668}
3669
3670struct encode_wq_s *create_encode_work_queue(void)
3671{
3672 struct encode_wq_s *encode_work_queue = NULL;
3673 bool done = false;
3674 u32 i, max_instance;
3675 struct Buff_s *reserve_buff;
3676
3677 encode_work_queue = kzalloc(sizeof(struct encode_wq_s), GFP_KERNEL);
3678 if (IS_ERR(encode_work_queue)) {
3679 enc_pr(LOG_ERROR, "can't create work queue\n");
3680 return NULL;
3681 }
3682 max_instance = encode_manager.max_instance;
3683 encode_work_queue->pic.init_qppicture = 26;
3684 encode_work_queue->pic.log2_max_frame_num = 4;
3685 encode_work_queue->pic.log2_max_pic_order_cnt_lsb = 4;
3686 encode_work_queue->pic.idr_pic_id = 0;
3687 encode_work_queue->pic.frame_number = 0;
3688 encode_work_queue->pic.pic_order_cnt_lsb = 0;
3689#ifdef H264_ENC_SVC
3690 /* Get settings from the global*/
3691 encode_work_queue->pic.enable_svc = svc_enable;
3692 encode_work_queue->pic.non_ref_limit = svc_ref_conf;
3693 encode_work_queue->pic.non_ref_cnt = 0;
3694 enc_pr(LOG_INFO, "svc conf enable %d, duration %d\n",
3695 encode_work_queue->pic.enable_svc,
3696 encode_work_queue->pic.non_ref_limit);
3697#endif
3698 encode_work_queue->ucode_index = UCODE_MODE_FULL;
3699
3700#ifdef H264_ENC_CBR
3701 encode_work_queue->cbr_info.block_w = 16;
3702 encode_work_queue->cbr_info.block_h = 9;
3703 encode_work_queue->cbr_info.long_th = CBR_LONG_THRESH;
3704 encode_work_queue->cbr_info.start_tbl_id = START_TABLE_ID;
3705 encode_work_queue->cbr_info.short_shift = CBR_SHORT_SHIFT;
3706 encode_work_queue->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
3707#endif
3708 init_waitqueue_head(&encode_work_queue->request_complete);
3709 atomic_set(&encode_work_queue->request_ready, 0);
3710 spin_lock(&encode_manager.event.sem_lock);
3711 if (encode_manager.wq_count < encode_manager.max_instance) {
3712 list_add_tail(&encode_work_queue->list, &encode_manager.wq);
3713 encode_manager.wq_count++;
3714 if (encode_manager.use_reserve == true) {
3715 for (i = 0; i < max_instance; i++) {
3716 reserve_buff = &encode_manager.reserve_buff[i];
3717 if (reserve_buff->used == false) {
3718 encode_work_queue->mem.buf_start =
3719 reserve_buff->buf_start;
3720 encode_work_queue->mem.buf_size =
3721 reserve_buff->buf_size;
3722 reserve_buff->used = true;
3723 done = true;
3724 break;
3725 }
3726 }
3727 } else
3728 done = true;
3729 }
3730 spin_unlock(&encode_manager.event.sem_lock);
3731 if (done == false) {
3732 kfree(encode_work_queue);
3733 encode_work_queue = NULL;
3734 enc_pr(LOG_ERROR, "too many work queue!\n");
3735 }
3736 return encode_work_queue; /* find it */
3737}
3738
3739static void _destroy_encode_work_queue(struct encode_manager_s *manager,
3740 struct encode_wq_s **wq,
3741 struct encode_wq_s *encode_work_queue,
3742 bool *find)
3743{
3744 struct list_head *head;
3745 struct encode_wq_s *wp_tmp = NULL;
3746 u32 i, max_instance;
3747 struct Buff_s *reserve_buff;
3748 u32 buf_start = encode_work_queue->mem.buf_start;
3749
3750 max_instance = manager->max_instance;
3751 head = &manager->wq;
3752 list_for_each_entry_safe((*wq), wp_tmp, head, list) {
3753 if ((*wq) && (*wq == encode_work_queue)) {
3754 list_del(&(*wq)->list);
3755 if (manager->use_reserve == true) {
3756 for (i = 0; i < max_instance; i++) {
3757 reserve_buff =
3758 &manager->reserve_buff[i];
3759 if (reserve_buff->used == true &&
3760 buf_start ==
3761 reserve_buff->buf_start) {
3762 reserve_buff->used = false;
3763 break;
3764 }
3765 }
3766 }
3767 *find = true;
3768 manager->wq_count--;
3769 enc_pr(LOG_DEBUG,
3770 "remove encode_work_queue %p success, %s line %d.\n",
3771 (void *)encode_work_queue,
3772 __func__, __LINE__);
3773 break;
3774 }
3775 }
3776}
3777
3778s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue)
3779{
3780 struct encode_queue_item_s *pitem, *tmp;
3781 struct encode_wq_s *wq = NULL;
3782 bool find = false;
3783
3784 struct list_head *head;
3785
3786 if (encode_work_queue) {
3787 spin_lock(&encode_manager.event.sem_lock);
3788 if (encode_manager.current_wq == encode_work_queue) {
3789 encode_manager.remove_flag = true;
3790 spin_unlock(&encode_manager.event.sem_lock);
3791 enc_pr(LOG_DEBUG,
3792 "warning--Destroy the running queue, should not be here.\n");
3793 wait_for_completion(
3794 &encode_manager.event.process_complete);
3795 spin_lock(&encode_manager.event.sem_lock);
3796 } /* else we can delete it safely. */
3797
3798 head = &encode_manager.process_queue;
3799 list_for_each_entry_safe(pitem, tmp, head, list) {
3800 if (pitem && pitem->request.parent ==
3801 encode_work_queue) {
3802 pitem->request.parent = NULL;
3803 enc_pr(LOG_DEBUG,
3804 "warning--remove not process request, should not be here.\n");
3805 list_move_tail(&pitem->list,
3806 &encode_manager.free_queue);
3807 }
3808 }
3809
3810 _destroy_encode_work_queue(&encode_manager, &wq,
3811 encode_work_queue, &find);
3812 spin_unlock(&encode_manager.event.sem_lock);
3813#ifdef CONFIG_CMA
3814 if (encode_work_queue->mem.buf_start) {
3815 if (wq->mem.cbr_info_ddr_virt_addr != NULL) {
3816 codec_mm_unmap_phyaddr(wq->mem.cbr_info_ddr_virt_addr);
3817 wq->mem.cbr_info_ddr_virt_addr = NULL;
3818 }
3819 codec_mm_free_for_dma(
3820 ENCODE_NAME,
3821 encode_work_queue->mem.buf_start);
3822 encode_work_queue->mem.buf_start = 0;
3823
3824 }
3825#endif
3826 kfree(encode_work_queue);
3827 complete(&encode_manager.event.request_in_com);
3828 }
3829 return 0;
3830}
3831
3832static s32 encode_monitor_thread(void *data)
3833{
3834 struct encode_manager_s *manager = (struct encode_manager_s *)data;
3835 struct encode_queue_item_s *pitem = NULL;
3836 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
3837 s32 ret = 0;
3838
3839 enc_pr(LOG_DEBUG, "encode workqueue monitor start.\n");
3840 sched_setscheduler(current, SCHED_FIFO, &param);
3841 allow_signal(SIGTERM);
3842 /* setup current_wq here. */
3843 while (manager->process_queue_state != ENCODE_PROCESS_QUEUE_STOP) {
3844 if (kthread_should_stop())
3845 break;
3846
3847 ret = wait_for_completion_interruptible(
3848 &manager->event.request_in_com);
3849
3850 if (ret == -ERESTARTSYS)
3851 break;
3852
3853 if (kthread_should_stop())
3854 break;
3855 if (manager->inited == false) {
3856 spin_lock(&manager->event.sem_lock);
3857 if (!list_empty(&manager->wq)) {
3858 struct encode_wq_s *first_wq =
3859 list_entry(manager->wq.next,
3860 struct encode_wq_s, list);
3861 manager->current_wq = first_wq;
3862 spin_unlock(&manager->event.sem_lock);
3863 if (first_wq) {
3864#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3865 if (!manager->context)
3866 manager->context =
3867 create_ge2d_work_queue();
3868#endif
3869 avc_init(first_wq);
3870 manager->inited = true;
3871 }
3872 spin_lock(&manager->event.sem_lock);
3873 manager->current_wq = NULL;
3874 spin_unlock(&manager->event.sem_lock);
3875 if (manager->remove_flag) {
3876 complete(
3877 &manager
3878 ->event.process_complete);
3879 manager->remove_flag = false;
3880 }
3881 } else
3882 spin_unlock(&manager->event.sem_lock);
3883 continue;
3884 }
3885
3886 spin_lock(&manager->event.sem_lock);
3887 pitem = NULL;
3888 if (list_empty(&manager->wq)) {
3889 spin_unlock(&manager->event.sem_lock);
3890 manager->inited = false;
3891 amvenc_avc_stop();
3892#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3893 if (manager->context) {
3894 destroy_ge2d_work_queue(manager->context);
3895 manager->context = NULL;
3896 }
3897#endif
3898 enc_pr(LOG_DEBUG, "power off encode.\n");
3899 continue;
3900 } else if (!list_empty(&manager->process_queue)) {
3901 pitem = list_entry(manager->process_queue.next,
3902 struct encode_queue_item_s, list);
3903 list_del(&pitem->list);
3904 manager->current_item = pitem;
3905 manager->current_wq = pitem->request.parent;
3906 }
3907 spin_unlock(&manager->event.sem_lock);
3908
3909 if (pitem) {
3910 encode_process_request(manager, pitem);
3911 spin_lock(&manager->event.sem_lock);
3912 list_add_tail(&pitem->list, &manager->free_queue);
3913 manager->current_item = NULL;
3914 manager->last_wq = manager->current_wq;
3915 manager->current_wq = NULL;
3916 spin_unlock(&manager->event.sem_lock);
3917 }
3918 if (manager->remove_flag) {
3919 complete(&manager->event.process_complete);
3920 manager->remove_flag = false;
3921 }
3922 }
3923 while (!kthread_should_stop())
3924 msleep(20);
3925
3926 enc_pr(LOG_DEBUG, "exit encode_monitor_thread.\n");
3927 return 0;
3928}
3929
3930static s32 encode_start_monitor(void)
3931{
3932 s32 ret = 0;
3933
3934 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3935 y_tnr_mot2alp_nrm_gain = 216;
3936 y_tnr_mot2alp_dis_gain = 144;
3937 c_tnr_mot2alp_nrm_gain = 216;
3938 c_tnr_mot2alp_dis_gain = 144;
3939 } else {
3940 /* more tnr */
3941 y_tnr_mot2alp_nrm_gain = 144;
3942 y_tnr_mot2alp_dis_gain = 96;
3943 c_tnr_mot2alp_nrm_gain = 144;
3944 c_tnr_mot2alp_dis_gain = 96;
3945 }
3946
3947 enc_pr(LOG_DEBUG, "encode start monitor.\n");
3948 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_START;
3949 encode_manager.encode_thread = kthread_run(encode_monitor_thread,
3950 &encode_manager, "encode_monitor");
3951 if (IS_ERR(encode_manager.encode_thread)) {
3952 ret = PTR_ERR(encode_manager.encode_thread);
3953 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3954 enc_pr(LOG_ERROR,
3955 "encode monitor : failed to start kthread (%d)\n", ret);
3956 }
3957 return ret;
3958}
3959
3960static s32 encode_stop_monitor(void)
3961{
3962 enc_pr(LOG_DEBUG, "stop encode monitor thread\n");
3963 if (encode_manager.encode_thread) {
3964 spin_lock(&encode_manager.event.sem_lock);
3965 if (!list_empty(&encode_manager.wq)) {
3966 u32 count = encode_manager.wq_count;
3967
3968 spin_unlock(&encode_manager.event.sem_lock);
3969 enc_pr(LOG_ERROR,
3970 "stop encode monitor thread error, active wq (%d) is not 0.\n",
3971 count);
3972 return -1;
3973 }
3974 spin_unlock(&encode_manager.event.sem_lock);
3975 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3976 send_sig(SIGTERM, encode_manager.encode_thread, 1);
3977 complete(&encode_manager.event.request_in_com);
3978 kthread_stop(encode_manager.encode_thread);
3979 encode_manager.encode_thread = NULL;
3980 kfree(mc_addr);
3981 mc_addr = NULL;
3982 }
3983 return 0;
3984}
3985
3986static s32 encode_wq_init(void)
3987{
3988 u32 i = 0;
3989 struct encode_queue_item_s *pitem = NULL;
3990
3991 enc_pr(LOG_DEBUG, "encode_wq_init.\n");
3992 encode_manager.irq_requested = false;
3993
3994 spin_lock_init(&encode_manager.event.sem_lock);
3995 init_completion(&encode_manager.event.request_in_com);
3996 init_waitqueue_head(&encode_manager.event.hw_complete);
3997 init_completion(&encode_manager.event.process_complete);
3998 INIT_LIST_HEAD(&encode_manager.process_queue);
3999 INIT_LIST_HEAD(&encode_manager.free_queue);
4000 INIT_LIST_HEAD(&encode_manager.wq);
4001
4002 tasklet_init(&encode_manager.encode_tasklet,
4003 encode_isr_tasklet,
4004 (ulong)&encode_manager);
4005
4006 for (i = 0; i < MAX_ENCODE_REQUEST; i++) {
4007 pitem = kcalloc(1,
4008 sizeof(struct encode_queue_item_s),
4009 GFP_KERNEL);
4010 if (IS_ERR(pitem)) {
4011 enc_pr(LOG_ERROR, "can't request queue item memory.\n");
4012 return -1;
4013 }
4014 pitem->request.parent = NULL;
4015 list_add_tail(&pitem->list, &encode_manager.free_queue);
4016 }
4017 encode_manager.current_wq = NULL;
4018 encode_manager.last_wq = NULL;
4019 encode_manager.encode_thread = NULL;
4020 encode_manager.current_item = NULL;
4021 encode_manager.wq_count = 0;
4022 encode_manager.remove_flag = false;
4023 InitEncodeWeight();
4024 if (encode_start_monitor()) {
4025 enc_pr(LOG_ERROR, "encode create thread error.\n");
4026 return -1;
4027 }
4028 return 0;
4029}
4030
4031static s32 encode_wq_uninit(void)
4032{
4033 struct encode_queue_item_s *pitem, *tmp;
4034 struct list_head *head;
4035 u32 count = 0;
4036 s32 r = -1;
4037
4038 enc_pr(LOG_DEBUG, "uninit encode wq.\n");
4039 if (encode_stop_monitor() == 0) {
4040 if ((encode_manager.irq_num >= 0) &&
4041 (encode_manager.irq_requested == true)) {
4042 free_irq(encode_manager.irq_num, &encode_manager);
4043 encode_manager.irq_requested = false;
4044 }
4045 spin_lock(&encode_manager.event.sem_lock);
4046 head = &encode_manager.process_queue;
4047 list_for_each_entry_safe(pitem, tmp, head, list) {
4048 if (pitem) {
4049 list_del(&pitem->list);
4050 kfree(pitem);
4051 count++;
4052 }
4053 }
4054 head = &encode_manager.free_queue;
4055 list_for_each_entry_safe(pitem, tmp, head, list) {
4056 if (pitem) {
4057 list_del(&pitem->list);
4058 kfree(pitem);
4059 count++;
4060 }
4061 }
4062 spin_unlock(&encode_manager.event.sem_lock);
4063 if (count == MAX_ENCODE_REQUEST)
4064 r = 0;
4065 else {
4066 enc_pr(LOG_ERROR, "lost some request item %d.\n",
4067 MAX_ENCODE_REQUEST - count);
4068 }
4069 }
4070 return r;
4071}
4072
4073static ssize_t encode_status_show(struct class *cla,
4074 struct class_attribute *attr, char *buf)
4075{
4076 u32 process_count = 0;
4077 u32 free_count = 0;
4078 struct encode_queue_item_s *pitem = NULL;
4079 struct encode_wq_s *current_wq = NULL;
4080 struct encode_wq_s *last_wq = NULL;
4081 struct list_head *head = NULL;
4082 s32 irq_num = 0;
4083 u32 hw_status = 0;
4084 u32 process_queue_state = 0;
4085 u32 wq_count = 0;
4086 u32 ucode_index;
4087 bool need_reset;
4088 bool process_irq;
4089 bool inited;
4090 bool use_reserve;
4091 struct Buff_s reserve_mem;
4092 u32 max_instance;
4093#ifdef CONFIG_CMA
4094 bool check_cma = false;
4095#endif
4096
4097 spin_lock(&encode_manager.event.sem_lock);
4098 head = &encode_manager.free_queue;
4099 list_for_each_entry(pitem, head, list) {
4100 free_count++;
4101 if (free_count > MAX_ENCODE_REQUEST)
4102 break;
4103 }
4104
4105 head = &encode_manager.process_queue;
4106 list_for_each_entry(pitem, head, list) {
4107 process_count++;
4108 if (free_count > MAX_ENCODE_REQUEST)
4109 break;
4110 }
4111
4112 current_wq = encode_manager.current_wq;
4113 last_wq = encode_manager.last_wq;
4114 pitem = encode_manager.current_item;
4115 irq_num = encode_manager.irq_num;
4116 hw_status = encode_manager.encode_hw_status;
4117 process_queue_state = encode_manager.process_queue_state;
4118 wq_count = encode_manager.wq_count;
4119 ucode_index = encode_manager.ucode_index;
4120 need_reset = encode_manager.need_reset;
4121 process_irq = encode_manager.process_irq;
4122 inited = encode_manager.inited;
4123 use_reserve = encode_manager.use_reserve;
4124 reserve_mem.buf_start = encode_manager.reserve_mem.buf_start;
4125 reserve_mem.buf_size = encode_manager.reserve_mem.buf_size;
4126
4127 max_instance = encode_manager.max_instance;
4128#ifdef CONFIG_CMA
4129 check_cma = encode_manager.check_cma;
4130#endif
4131
4132 spin_unlock(&encode_manager.event.sem_lock);
4133
4134 enc_pr(LOG_DEBUG,
4135 "encode process queue count: %d, free queue count: %d.\n",
4136 process_count, free_count);
4137 enc_pr(LOG_DEBUG,
4138 "encode curent wq: %p, last wq: %p, wq count: %d, max_instance: %d.\n",
4139 current_wq, last_wq, wq_count, max_instance);
4140 if (current_wq)
4141 enc_pr(LOG_DEBUG,
4142 "encode curent wq -- encode width: %d, encode height: %d.\n",
4143 current_wq->pic.encoder_width,
4144 current_wq->pic.encoder_height);
4145 enc_pr(LOG_DEBUG,
4146 "encode curent pitem: %p, ucode_index: %d, hw_status: %d, need_reset: %s, process_irq: %s.\n",
4147 pitem, ucode_index, hw_status, need_reset ? "true" : "false",
4148 process_irq ? "true" : "false");
4149 enc_pr(LOG_DEBUG,
4150 "encode irq num: %d, inited: %s, process_queue_state: %d.\n",
4151 irq_num, inited ? "true" : "false", process_queue_state);
4152 if (use_reserve) {
4153 enc_pr(LOG_DEBUG,
4154 "encode use reserve memory, buffer start: 0x%x, size: %d MB.\n",
4155 reserve_mem.buf_start,
4156 reserve_mem.buf_size / SZ_1M);
4157 } else {
4158#ifdef CONFIG_CMA
4159 enc_pr(LOG_DEBUG, "encode check cma: %s.\n",
4160 check_cma ? "true" : "false");
4161#endif
4162 }
4163 return snprintf(buf, 40, "encode max instance: %d\n", max_instance);
4164}
4165
4166static struct class_attribute amvenc_class_attrs[] = {
4167 __ATTR(encode_status,
4168 S_IRUGO | S_IWUSR,
4169 encode_status_show,
4170 NULL),
4171 __ATTR_NULL
4172};
4173
4174static struct class amvenc_avc_class = {
4175 .name = CLASS_NAME,
4176 .class_attrs = amvenc_class_attrs,
4177};
4178
4179s32 init_avc_device(void)
4180{
4181 s32 r = 0;
4182
4183 r = register_chrdev(0, DEVICE_NAME, &amvenc_avc_fops);
4184 if (r <= 0) {
4185 enc_pr(LOG_ERROR, "register amvenc_avc device error.\n");
4186 return r;
4187 }
4188 avc_device_major = r;
4189
4190 r = class_register(&amvenc_avc_class);
4191 if (r < 0) {
4192 enc_pr(LOG_ERROR, "error create amvenc_avc class.\n");
4193 return r;
4194 }
4195
4196 amvenc_avc_dev = device_create(&amvenc_avc_class, NULL,
4197 MKDEV(avc_device_major, 0), NULL,
4198 DEVICE_NAME);
4199
4200 if (IS_ERR(amvenc_avc_dev)) {
4201 enc_pr(LOG_ERROR, "create amvenc_avc device error.\n");
4202 class_unregister(&amvenc_avc_class);
4203 return -1;
4204 }
4205 return r;
4206}
4207
4208s32 uninit_avc_device(void)
4209{
4210 if (amvenc_avc_dev)
4211 device_destroy(&amvenc_avc_class, MKDEV(avc_device_major, 0));
4212
4213 class_destroy(&amvenc_avc_class);
4214
4215 unregister_chrdev(avc_device_major, DEVICE_NAME);
4216 return 0;
4217}
4218
4219static s32 avc_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4220{
4221 s32 r;
4222 struct resource res;
4223
4224 if (!rmem) {
4225 enc_pr(LOG_ERROR,
4226 "Can not obtain I/O memory, and will allocate avc buffer!\n");
4227 r = -EFAULT;
4228 return r;
4229 }
4230 res.start = (phys_addr_t)rmem->base;
4231 res.end = res.start + (phys_addr_t)rmem->size - 1;
4232 encode_manager.reserve_mem.buf_start = res.start;
4233 encode_manager.reserve_mem.buf_size = res.end - res.start + 1;
4234
4235 if (encode_manager.reserve_mem.buf_size >=
4236 amvenc_buffspec[0].min_buffsize) {
4237 encode_manager.max_instance =
4238 encode_manager.reserve_mem.buf_size /
4239 amvenc_buffspec[0].min_buffsize;
4240 if (encode_manager.max_instance > MAX_ENCODE_INSTANCE)
4241 encode_manager.max_instance = MAX_ENCODE_INSTANCE;
4242 encode_manager.reserve_buff = kzalloc(
4243 encode_manager.max_instance *
4244 sizeof(struct Buff_s), GFP_KERNEL);
4245 if (encode_manager.reserve_buff) {
4246 u32 i;
4247 struct Buff_s *reserve_buff;
4248 u32 max_instance = encode_manager.max_instance;
4249
4250 for (i = 0; i < max_instance; i++) {
4251 reserve_buff = &encode_manager.reserve_buff[i];
4252 reserve_buff->buf_start =
4253 i *
4254 amvenc_buffspec[0]
4255 .min_buffsize +
4256 encode_manager.reserve_mem.buf_start;
4257 reserve_buff->buf_size =
4258 encode_manager.reserve_mem.buf_start;
4259 reserve_buff->used = false;
4260 }
4261 encode_manager.use_reserve = true;
4262 r = 0;
4263 enc_pr(LOG_DEBUG,
4264 "amvenc_avc use reserve memory, buff start: 0x%x, size: 0x%x, max instance is %d\n",
4265 encode_manager.reserve_mem.buf_start,
4266 encode_manager.reserve_mem.buf_size,
4267 encode_manager.max_instance);
4268 } else {
4269 enc_pr(LOG_ERROR,
4270 "amvenc_avc alloc reserve buffer pointer fail. max instance is %d.\n",
4271 encode_manager.max_instance);
4272 encode_manager.max_instance = 0;
4273 encode_manager.reserve_mem.buf_start = 0;
4274 encode_manager.reserve_mem.buf_size = 0;
4275 r = -ENOMEM;
4276 }
4277 } else {
4278 enc_pr(LOG_ERROR,
4279 "amvenc_avc memory resource too small, size is 0x%x. Need 0x%x bytes at least.\n",
4280 encode_manager.reserve_mem.buf_size,
4281 amvenc_buffspec[0]
4282 .min_buffsize);
4283 encode_manager.reserve_mem.buf_start = 0;
4284 encode_manager.reserve_mem.buf_size = 0;
4285 r = -ENOMEM;
4286 }
4287 return r;
4288}
4289
4290static s32 amvenc_avc_probe(struct platform_device *pdev)
4291{
4292 /* struct resource mem; */
4293 s32 res_irq;
4294 s32 idx;
4295 s32 r;
4296
4297 enc_pr(LOG_INFO, "amvenc_avc probe start.\n");
4298
4299 encode_manager.this_pdev = pdev;
4300#ifdef CONFIG_CMA
4301 encode_manager.check_cma = false;
4302#endif
4303 encode_manager.reserve_mem.buf_start = 0;
4304 encode_manager.reserve_mem.buf_size = 0;
4305 encode_manager.use_reserve = false;
4306 encode_manager.max_instance = 0;
4307 encode_manager.reserve_buff = NULL;
4308
4309 idx = of_reserved_mem_device_init(&pdev->dev);
4310 if (idx != 0) {
4311 enc_pr(LOG_DEBUG,
4312 "amvenc_avc_probe -- reserved memory config fail.\n");
4313 }
4314
4315 if (encode_manager.use_reserve == false) {
4316#ifndef CONFIG_CMA
4317 enc_pr(LOG_ERROR,
4318 "amvenc_avc memory is invaild, probe fail!\n");
4319 return -EFAULT;
4320#else
4321 encode_manager.cma_pool_size =
4322 (codec_mm_get_total_size() > (MIN_SIZE * 3)) ?
4323 (MIN_SIZE * 3) : codec_mm_get_total_size();
4324 enc_pr(LOG_DEBUG,
4325 "amvenc_avc - cma memory pool size: %d MB\n",
4326 (u32)encode_manager.cma_pool_size / SZ_1M);
4327#endif
4328 }
4329
4330 res_irq = platform_get_irq(pdev, 0);
4331 if (res_irq < 0) {
4332 enc_pr(LOG_ERROR, "[%s] get irq error!", __func__);
4333 return -EINVAL;
4334 }
4335
4336 encode_manager.irq_num = res_irq;
4337 if (encode_wq_init()) {
4338 kfree(encode_manager.reserve_buff);
4339 encode_manager.reserve_buff = NULL;
4340 enc_pr(LOG_ERROR, "encode work queue init error.\n");
4341 return -EFAULT;
4342 }
4343
4344 r = init_avc_device();
4345 enc_pr(LOG_INFO, "amvenc_avc probe end.\n");
4346 return r;
4347}
4348
4349static s32 amvenc_avc_remove(struct platform_device *pdev)
4350{
4351 kfree(encode_manager.reserve_buff);
4352 encode_manager.reserve_buff = NULL;
4353 if (encode_wq_uninit())
4354 enc_pr(LOG_ERROR, "encode work queue uninit error.\n");
4355 uninit_avc_device();
4356 enc_pr(LOG_INFO, "amvenc_avc remove.\n");
4357 return 0;
4358}
4359
4360static const struct of_device_id amlogic_avcenc_dt_match[] = {
4361 {
4362 .compatible = "amlogic, amvenc_avc",
4363 },
4364 {},
4365};
4366
4367static struct platform_driver amvenc_avc_driver = {
4368 .probe = amvenc_avc_probe,
4369 .remove = amvenc_avc_remove,
4370 .driver = {
4371 .name = DRIVER_NAME,
4372 .of_match_table = amlogic_avcenc_dt_match,
4373 }
4374};
4375
4376static struct codec_profile_t amvenc_avc_profile = {
4377 .name = "avc",
4378 .profile = ""
4379};
4380
4381static s32 __init amvenc_avc_driver_init_module(void)
4382{
4383 enc_pr(LOG_INFO, "amvenc_avc module init\n");
4384
4385 if (platform_driver_register(&amvenc_avc_driver)) {
4386 enc_pr(LOG_ERROR,
4387 "failed to register amvenc_avc driver\n");
4388 return -ENODEV;
4389 }
4390 vcodec_profile_register(&amvenc_avc_profile);
4391 return 0;
4392}
4393
4394static void __exit amvenc_avc_driver_remove_module(void)
4395{
4396 enc_pr(LOG_INFO, "amvenc_avc module remove.\n");
4397
4398 platform_driver_unregister(&amvenc_avc_driver);
4399}
4400
4401static const struct reserved_mem_ops rmem_avc_ops = {
4402 .device_init = avc_mem_device_init,
4403};
4404
4405static s32 __init avc_mem_setup(struct reserved_mem *rmem)
4406{
4407 rmem->ops = &rmem_avc_ops;
4408 enc_pr(LOG_DEBUG, "amvenc_avc reserved mem setup.\n");
4409 return 0;
4410}
4411
4412static int enc_dma_buf_map(struct enc_dma_cfg *cfg)
4413{
4414 long ret = -1;
4415 int fd = -1;
4416 struct dma_buf *dbuf = NULL;
4417 struct dma_buf_attachment *d_att = NULL;
4418 struct sg_table *sg = NULL;
4419 void *vaddr = NULL;
4420 struct device *dev = NULL;
4421 enum dma_data_direction dir;
4422
4423 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
4424 enc_pr(LOG_ERROR, "error input param\n");
4425 return -EINVAL;
4426 }
4427 enc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd);
4428
4429 fd = cfg->fd;
4430 dev = cfg->dev;
4431 dir = cfg->dir;
4432 enc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd);
4433
4434 dbuf = dma_buf_get(fd);
4435 if (dbuf == NULL) {
4436 enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd);
4437 return -EINVAL;
4438 }
4439
4440 d_att = dma_buf_attach(dbuf, dev);
4441 if (d_att == NULL) {
4442 enc_pr(LOG_ERROR, "failed to set dma attach\n");
4443 goto attach_err;
4444 }
4445
4446 sg = dma_buf_map_attachment(d_att, dir);
4447 if (sg == NULL) {
4448 enc_pr(LOG_ERROR, "failed to get dma sg\n");
4449 goto map_attach_err;
4450 }
4451
4452 ret = dma_buf_begin_cpu_access(dbuf, dir);
4453 if (ret != 0) {
4454 enc_pr(LOG_ERROR, "failed to access dma buff\n");
4455 goto access_err;
4456 }
4457
4458 vaddr = dma_buf_vmap(dbuf);
4459 if (vaddr == NULL) {
4460 enc_pr(LOG_ERROR, "failed to vmap dma buf\n");
4461 goto vmap_err;
4462 }
4463 cfg->dbuf = dbuf;
4464 cfg->attach = d_att;
4465 cfg->vaddr = vaddr;
4466 cfg->sg = sg;
4467
4468 return ret;
4469
4470vmap_err:
4471 dma_buf_end_cpu_access(dbuf, dir);
4472
4473access_err:
4474 dma_buf_unmap_attachment(d_att, sg, dir);
4475
4476map_attach_err:
4477 dma_buf_detach(dbuf, d_att);
4478
4479attach_err:
4480 dma_buf_put(dbuf);
4481
4482 return ret;
4483}
4484
4485static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr)
4486{
4487 struct sg_table *sg_table;
4488 struct page *page;
4489 int ret;
4490 enc_pr(LOG_INFO, "enc_dma_buf_get_phys in\n");
4491
4492 ret = enc_dma_buf_map(cfg);
4493 if (ret < 0) {
4494 enc_pr(LOG_ERROR, "gdc_dma_buf_map failed\n");
4495 return ret;
4496 }
4497 if (cfg->sg) {
4498 sg_table = cfg->sg;
4499 page = sg_page(sg_table->sgl);
4500 *addr = PFN_PHYS(page_to_pfn(page));
4501 ret = 0;
4502 }
4503 enc_pr(LOG_INFO, "enc_dma_buf_get_phys 0x%lx\n", *addr);
4504 return ret;
4505}
4506
4507static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg)
4508{
4509 int fd = -1;
4510 struct dma_buf *dbuf = NULL;
4511 struct dma_buf_attachment *d_att = NULL;
4512 struct sg_table *sg = NULL;
4513 void *vaddr = NULL;
4514 struct device *dev = NULL;
4515 enum dma_data_direction dir;
4516
4517 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL
4518 || cfg->dbuf == NULL || cfg->vaddr == NULL
4519 || cfg->attach == NULL || cfg->sg == NULL) {
4520 enc_pr(LOG_ERROR, "Error input param\n");
4521 return;
4522 }
4523
4524 fd = cfg->fd;
4525 dev = cfg->dev;
4526 dir = cfg->dir;
4527 dbuf = cfg->dbuf;
4528 vaddr = cfg->vaddr;
4529 d_att = cfg->attach;
4530 sg = cfg->sg;
4531
4532 dma_buf_vunmap(dbuf, vaddr);
4533
4534 dma_buf_end_cpu_access(dbuf, dir);
4535
4536 dma_buf_unmap_attachment(d_att, sg, dir);
4537
4538 dma_buf_detach(dbuf, d_att);
4539
4540 dma_buf_put(dbuf);
4541 enc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr);
4542}
4543
4544
4545module_param(fixed_slice_cfg, uint, 0664);
4546MODULE_PARM_DESC(fixed_slice_cfg, "\n fixed_slice_cfg\n");
4547
4548module_param(clock_level, uint, 0664);
4549MODULE_PARM_DESC(clock_level, "\n clock_level\n");
4550
4551module_param(encode_print_level, uint, 0664);
4552MODULE_PARM_DESC(encode_print_level, "\n encode_print_level\n");
4553
4554module_param(no_timeout, uint, 0664);
4555MODULE_PARM_DESC(no_timeout, "\n no_timeout flag for process request\n");
4556
4557module_param(nr_mode, int, 0664);
4558MODULE_PARM_DESC(nr_mode, "\n nr_mode option\n");
4559
4560module_param(qp_table_debug, uint, 0664);
4561MODULE_PARM_DESC(qp_table_debug, "\n print qp table\n");
4562
4563#ifdef H264_ENC_SVC
4564module_param(svc_enable, uint, 0664);
4565MODULE_PARM_DESC(svc_enable, "\n svc enable\n");
4566module_param(svc_ref_conf, uint, 0664);
4567MODULE_PARM_DESC(svc_ref_conf, "\n svc reference duration config\n");
4568#endif
4569
4570#ifdef MORE_MODULE_PARAM
4571module_param(me_mv_merge_ctl, uint, 0664);
4572MODULE_PARM_DESC(me_mv_merge_ctl, "\n me_mv_merge_ctl\n");
4573
4574module_param(me_step0_close_mv, uint, 0664);
4575MODULE_PARM_DESC(me_step0_close_mv, "\n me_step0_close_mv\n");
4576
4577module_param(me_f_skip_sad, uint, 0664);
4578MODULE_PARM_DESC(me_f_skip_sad, "\n me_f_skip_sad\n");
4579
4580module_param(me_f_skip_weight, uint, 0664);
4581MODULE_PARM_DESC(me_f_skip_weight, "\n me_f_skip_weight\n");
4582
4583module_param(me_mv_weight_01, uint, 0664);
4584MODULE_PARM_DESC(me_mv_weight_01, "\n me_mv_weight_01\n");
4585
4586module_param(me_mv_weight_23, uint, 0664);
4587MODULE_PARM_DESC(me_mv_weight_23, "\n me_mv_weight_23\n");
4588
4589module_param(me_sad_range_inc, uint, 0664);
4590MODULE_PARM_DESC(me_sad_range_inc, "\n me_sad_range_inc\n");
4591
4592module_param(me_sad_enough_01, uint, 0664);
4593MODULE_PARM_DESC(me_sad_enough_01, "\n me_sad_enough_01\n");
4594
4595module_param(me_sad_enough_23, uint, 0664);
4596MODULE_PARM_DESC(me_sad_enough_23, "\n me_sad_enough_23\n");
4597
4598module_param(y_tnr_mc_en, uint, 0664);
4599MODULE_PARM_DESC(y_tnr_mc_en, "\n y_tnr_mc_en option\n");
4600module_param(y_tnr_txt_mode, uint, 0664);
4601MODULE_PARM_DESC(y_tnr_txt_mode, "\n y_tnr_txt_mode option\n");
4602module_param(y_tnr_mot_sad_margin, uint, 0664);
4603MODULE_PARM_DESC(y_tnr_mot_sad_margin, "\n y_tnr_mot_sad_margin option\n");
4604module_param(y_tnr_mot_cortxt_rate, uint, 0664);
4605MODULE_PARM_DESC(y_tnr_mot_cortxt_rate, "\n y_tnr_mot_cortxt_rate option\n");
4606module_param(y_tnr_mot_distxt_ofst, uint, 0664);
4607MODULE_PARM_DESC(y_tnr_mot_distxt_ofst, "\n y_tnr_mot_distxt_ofst option\n");
4608module_param(y_tnr_mot_distxt_rate, uint, 0664);
4609MODULE_PARM_DESC(y_tnr_mot_distxt_rate, "\n y_tnr_mot_distxt_rate option\n");
4610module_param(y_tnr_mot_dismot_ofst, uint, 0664);
4611MODULE_PARM_DESC(y_tnr_mot_dismot_ofst, "\n y_tnr_mot_dismot_ofst option\n");
4612module_param(y_tnr_mot_frcsad_lock, uint, 0664);
4613MODULE_PARM_DESC(y_tnr_mot_frcsad_lock, "\n y_tnr_mot_frcsad_lock option\n");
4614module_param(y_tnr_mot2alp_frc_gain, uint, 0664);
4615MODULE_PARM_DESC(y_tnr_mot2alp_frc_gain, "\n y_tnr_mot2alp_frc_gain option\n");
4616module_param(y_tnr_mot2alp_nrm_gain, uint, 0664);
4617MODULE_PARM_DESC(y_tnr_mot2alp_nrm_gain, "\n y_tnr_mot2alp_nrm_gain option\n");
4618module_param(y_tnr_mot2alp_dis_gain, uint, 0664);
4619MODULE_PARM_DESC(y_tnr_mot2alp_dis_gain, "\n y_tnr_mot2alp_dis_gain option\n");
4620module_param(y_tnr_mot2alp_dis_ofst, uint, 0664);
4621MODULE_PARM_DESC(y_tnr_mot2alp_dis_ofst, "\n y_tnr_mot2alp_dis_ofst option\n");
4622module_param(y_tnr_alpha_min, uint, 0664);
4623MODULE_PARM_DESC(y_tnr_alpha_min, "\n y_tnr_alpha_min option\n");
4624module_param(y_tnr_alpha_max, uint, 0664);
4625MODULE_PARM_DESC(y_tnr_alpha_max, "\n y_tnr_alpha_max option\n");
4626module_param(y_tnr_deghost_os, uint, 0664);
4627MODULE_PARM_DESC(y_tnr_deghost_os, "\n y_tnr_deghost_os option\n");
4628
4629module_param(c_tnr_mc_en, uint, 0664);
4630MODULE_PARM_DESC(c_tnr_mc_en, "\n c_tnr_mc_en option\n");
4631module_param(c_tnr_txt_mode, uint, 0664);
4632MODULE_PARM_DESC(c_tnr_txt_mode, "\n c_tnr_txt_mode option\n");
4633module_param(c_tnr_mot_sad_margin, uint, 0664);
4634MODULE_PARM_DESC(c_tnr_mot_sad_margin, "\n c_tnr_mot_sad_margin option\n");
4635module_param(c_tnr_mot_cortxt_rate, uint, 0664);
4636MODULE_PARM_DESC(c_tnr_mot_cortxt_rate, "\n c_tnr_mot_cortxt_rate option\n");
4637module_param(c_tnr_mot_distxt_ofst, uint, 0664);
4638MODULE_PARM_DESC(c_tnr_mot_distxt_ofst, "\n c_tnr_mot_distxt_ofst option\n");
4639module_param(c_tnr_mot_distxt_rate, uint, 0664);
4640MODULE_PARM_DESC(c_tnr_mot_distxt_rate, "\n c_tnr_mot_distxt_rate option\n");
4641module_param(c_tnr_mot_dismot_ofst, uint, 0664);
4642MODULE_PARM_DESC(c_tnr_mot_dismot_ofst, "\n c_tnr_mot_dismot_ofst option\n");
4643module_param(c_tnr_mot_frcsad_lock, uint, 0664);
4644MODULE_PARM_DESC(c_tnr_mot_frcsad_lock, "\n c_tnr_mot_frcsad_lock option\n");
4645module_param(c_tnr_mot2alp_frc_gain, uint, 0664);
4646MODULE_PARM_DESC(c_tnr_mot2alp_frc_gain, "\n c_tnr_mot2alp_frc_gain option\n");
4647module_param(c_tnr_mot2alp_nrm_gain, uint, 0664);
4648MODULE_PARM_DESC(c_tnr_mot2alp_nrm_gain, "\n c_tnr_mot2alp_nrm_gain option\n");
4649module_param(c_tnr_mot2alp_dis_gain, uint, 0664);
4650MODULE_PARM_DESC(c_tnr_mot2alp_dis_gain, "\n c_tnr_mot2alp_dis_gain option\n");
4651module_param(c_tnr_mot2alp_dis_ofst, uint, 0664);
4652MODULE_PARM_DESC(c_tnr_mot2alp_dis_ofst, "\n c_tnr_mot2alp_dis_ofst option\n");
4653module_param(c_tnr_alpha_min, uint, 0664);
4654MODULE_PARM_DESC(c_tnr_alpha_min, "\n c_tnr_alpha_min option\n");
4655module_param(c_tnr_alpha_max, uint, 0664);
4656MODULE_PARM_DESC(c_tnr_alpha_max, "\n c_tnr_alpha_max option\n");
4657module_param(c_tnr_deghost_os, uint, 0664);
4658MODULE_PARM_DESC(c_tnr_deghost_os, "\n c_tnr_deghost_os option\n");
4659
4660module_param(y_snr_err_norm, uint, 0664);
4661MODULE_PARM_DESC(y_snr_err_norm, "\n y_snr_err_norm option\n");
4662module_param(y_snr_gau_bld_core, uint, 0664);
4663MODULE_PARM_DESC(y_snr_gau_bld_core, "\n y_snr_gau_bld_core option\n");
4664module_param(y_snr_gau_bld_ofst, int, 0664);
4665MODULE_PARM_DESC(y_snr_gau_bld_ofst, "\n y_snr_gau_bld_ofst option\n");
4666module_param(y_snr_gau_bld_rate, uint, 0664);
4667MODULE_PARM_DESC(y_snr_gau_bld_rate, "\n y_snr_gau_bld_rate option\n");
4668module_param(y_snr_gau_alp0_min, uint, 0664);
4669MODULE_PARM_DESC(y_snr_gau_alp0_min, "\n y_snr_gau_alp0_min option\n");
4670module_param(y_snr_gau_alp0_max, uint, 0664);
4671MODULE_PARM_DESC(y_snr_gau_alp0_max, "\n y_snr_gau_alp0_max option\n");
4672module_param(y_bld_beta2alp_rate, uint, 0664);
4673MODULE_PARM_DESC(y_bld_beta2alp_rate, "\n y_bld_beta2alp_rate option\n");
4674module_param(y_bld_beta_min, uint, 0664);
4675MODULE_PARM_DESC(y_bld_beta_min, "\n y_bld_beta_min option\n");
4676module_param(y_bld_beta_max, uint, 0664);
4677MODULE_PARM_DESC(y_bld_beta_max, "\n y_bld_beta_max option\n");
4678
4679module_param(c_snr_err_norm, uint, 0664);
4680MODULE_PARM_DESC(c_snr_err_norm, "\n c_snr_err_norm option\n");
4681module_param(c_snr_gau_bld_core, uint, 0664);
4682MODULE_PARM_DESC(c_snr_gau_bld_core, "\n c_snr_gau_bld_core option\n");
4683module_param(c_snr_gau_bld_ofst, int, 0664);
4684MODULE_PARM_DESC(c_snr_gau_bld_ofst, "\n c_snr_gau_bld_ofst option\n");
4685module_param(c_snr_gau_bld_rate, uint, 0664);
4686MODULE_PARM_DESC(c_snr_gau_bld_rate, "\n c_snr_gau_bld_rate option\n");
4687module_param(c_snr_gau_alp0_min, uint, 0664);
4688MODULE_PARM_DESC(c_snr_gau_alp0_min, "\n c_snr_gau_alp0_min option\n");
4689module_param(c_snr_gau_alp0_max, uint, 0664);
4690MODULE_PARM_DESC(c_snr_gau_alp0_max, "\n c_snr_gau_alp0_max option\n");
4691module_param(c_bld_beta2alp_rate, uint, 0664);
4692MODULE_PARM_DESC(c_bld_beta2alp_rate, "\n c_bld_beta2alp_rate option\n");
4693module_param(c_bld_beta_min, uint, 0664);
4694MODULE_PARM_DESC(c_bld_beta_min, "\n c_bld_beta_min option\n");
4695module_param(c_bld_beta_max, uint, 0664);
4696MODULE_PARM_DESC(c_bld_beta_max, "\n c_bld_beta_max option\n");
4697#endif
4698
4699module_init(amvenc_avc_driver_init_module);
4700module_exit(amvenc_avc_driver_remove_module);
4701RESERVEDMEM_OF_DECLARE(amvenc_avc, "amlogic, amvenc-memory", avc_mem_setup);
4702
4703MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver");
4704MODULE_LICENSE("GPL");
4705MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
4706