summaryrefslogtreecommitdiff
path: root/drivers/frame_sink/encoder/h264/encoder.c (plain)
blob: dbc3dc78b45c5d07eb5d79e07de9f458531b37cb
1/*
2 * drivers/amlogic/amports/encoder.c
3 *
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16*/
17
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/timer.h>
23#include <linux/fs.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spinlock.h>
29#include <linux/ctype.h>
30#include <linux/amlogic/media/frame_sync/ptsserv.h>
31#include <linux/amlogic/media/utils/amstream.h>
32#include <linux/amlogic/media/canvas/canvas.h>
33#include <linux/amlogic/media/canvas/canvas_mgr.h>
34#include <linux/amlogic/media/codec_mm/codec_mm.h>
35
36#include <linux/amlogic/media/utils/vdec_reg.h>
37#include "../../../frame_provider/decoder/utils/vdec.h"
38#include <linux/delay.h>
39#include <linux/poll.h>
40#include <linux/of.h>
41#include <linux/of_fdt.h>
42#include <linux/dma-contiguous.h>
43#include <linux/kthread.h>
44#include <linux/sched/rt.h>
45#include <linux/amlogic/media/utils/amports_config.h>
46#include "encoder.h"
47#include "../../../frame_provider/decoder/utils/amvdec.h"
48#include <linux/amlogic/media/utils/amlog.h>
49#include "../../../stream_input/amports/amports_priv.h"
50#include "../../../frame_provider/decoder/utils/firmware.h"
51#include <linux/of_reserved_mem.h>
52
53
54#ifdef CONFIG_AM_JPEG_ENCODER
55#include "jpegenc.h"
56#endif
57
58#define ENCODE_NAME "encoder"
59#define AMVENC_CANVAS_INDEX 0xE4
60#define AMVENC_CANVAS_MAX_INDEX 0xEF
61
62#define MIN_SIZE amvenc_buffspec[0].min_buffsize
63#define DUMP_INFO_BYTES_PER_MB 80
64
65#define ADJUSTED_QP_FLAG 64
66
67static s32 avc_device_major;
68static struct device *amvenc_avc_dev;
69#define DRIVER_NAME "amvenc_avc"
70#define CLASS_NAME "amvenc_avc"
71#define DEVICE_NAME "amvenc_avc"
72
73static struct encode_manager_s encode_manager;
74
75#define MULTI_SLICE_MC
76#define H264_ENC_CBR
77/* #define MORE_MODULE_PARAM */
78
79#define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX
80
81#define UCODE_MODE_FULL 0
82
83/* #define ENABLE_IGNORE_FUNCTION */
84
85static u32 ie_me_mb_type;
86static u32 ie_me_mode;
87static u32 ie_pippeline_block = 3;
88static u32 ie_cur_ref_sel;
89/* static u32 avc_endian = 6; */
90static u32 clock_level = 5;
91
92static u32 encode_print_level = LOG_DEBUG;
93static u32 no_timeout;
94static int nr_mode = -1;
95static u32 qp_table_debug;
96
97#ifdef H264_ENC_SVC
98static u32 svc_enable = 0; /* Enable sac feature or not */
99static u32 svc_ref_conf = 0; /* Continuous no reference numbers */
100#endif
101
102static u32 me_mv_merge_ctl =
103 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
104 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
105 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
106 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
107 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
108 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
109 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
110 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
111 /* [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged */
112 (0x12 << 18) |
113 /* [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged */
114 (0x2b << 12) |
115 /* [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV */
116 (0x80 << 0);
117 /* ( 0x4 << 18) |
118 * // [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged
119 */
120 /* ( 0x3f << 12) |
121 * // [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged
122 */
123 /* ( 0xc0 << 0);
124 * // [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV
125 */
126
127static u32 me_mv_weight_01 = (0x40 << 24) | (0x30 << 16) | (0x20 << 8) | 0x30;
128static u32 me_mv_weight_23 = (0x40 << 8) | 0x30;
129static u32 me_sad_range_inc = 0x03030303;
130static u32 me_step0_close_mv = 0x003ffc21;
131static u32 me_f_skip_sad;
132static u32 me_f_skip_weight;
133static u32 me_sad_enough_01;/* 0x00018010; */
134static u32 me_sad_enough_23;/* 0x00000020; */
135
136/* [31:0] NUM_ROWS_PER_SLICE_P */
137/* [15:0] NUM_ROWS_PER_SLICE_I */
138static u32 fixed_slice_cfg;
139
140/* y tnr */
141static unsigned int y_tnr_mc_en = 1;
142static unsigned int y_tnr_txt_mode;
143static unsigned int y_tnr_mot_sad_margin = 1;
144static unsigned int y_tnr_mot_cortxt_rate = 1;
145static unsigned int y_tnr_mot_distxt_ofst = 5;
146static unsigned int y_tnr_mot_distxt_rate = 4;
147static unsigned int y_tnr_mot_dismot_ofst = 4;
148static unsigned int y_tnr_mot_frcsad_lock = 8;
149static unsigned int y_tnr_mot2alp_frc_gain = 10;
150static unsigned int y_tnr_mot2alp_nrm_gain = 216;
151static unsigned int y_tnr_mot2alp_dis_gain = 128;
152static unsigned int y_tnr_mot2alp_dis_ofst = 32;
153static unsigned int y_tnr_alpha_min = 32;
154static unsigned int y_tnr_alpha_max = 63;
155static unsigned int y_tnr_deghost_os;
156/* c tnr */
157static unsigned int c_tnr_mc_en = 1;
158static unsigned int c_tnr_txt_mode;
159static unsigned int c_tnr_mot_sad_margin = 1;
160static unsigned int c_tnr_mot_cortxt_rate = 1;
161static unsigned int c_tnr_mot_distxt_ofst = 5;
162static unsigned int c_tnr_mot_distxt_rate = 4;
163static unsigned int c_tnr_mot_dismot_ofst = 4;
164static unsigned int c_tnr_mot_frcsad_lock = 8;
165static unsigned int c_tnr_mot2alp_frc_gain = 10;
166static unsigned int c_tnr_mot2alp_nrm_gain = 216;
167static unsigned int c_tnr_mot2alp_dis_gain = 128;
168static unsigned int c_tnr_mot2alp_dis_ofst = 32;
169static unsigned int c_tnr_alpha_min = 32;
170static unsigned int c_tnr_alpha_max = 63;
171static unsigned int c_tnr_deghost_os;
172/* y snr */
173static unsigned int y_snr_err_norm = 1;
174static unsigned int y_snr_gau_bld_core = 1;
175static int y_snr_gau_bld_ofst = -1;
176static unsigned int y_snr_gau_bld_rate = 48;
177static unsigned int y_snr_gau_alp0_min;
178static unsigned int y_snr_gau_alp0_max = 63;
179static unsigned int y_bld_beta2alp_rate = 16;
180static unsigned int y_bld_beta_min;
181static unsigned int y_bld_beta_max = 63;
182/* c snr */
183static unsigned int c_snr_err_norm = 1;
184static unsigned int c_snr_gau_bld_core = 1;
185static int c_snr_gau_bld_ofst = -1;
186static unsigned int c_snr_gau_bld_rate = 48;
187static unsigned int c_snr_gau_alp0_min;
188static unsigned int c_snr_gau_alp0_max = 63;
189static unsigned int c_bld_beta2alp_rate = 16;
190static unsigned int c_bld_beta_min;
191static unsigned int c_bld_beta_max = 63;
192static unsigned int qp_mode;
193
194static DEFINE_SPINLOCK(lock);
195
196#define ADV_MV_LARGE_16x8 1
197#define ADV_MV_LARGE_8x16 1
198#define ADV_MV_LARGE_16x16 1
199
200/* me weight offset should not very small, it used by v1 me module. */
201/* the min real sad for me is 16 by hardware. */
202#define ME_WEIGHT_OFFSET 0x520
203#define I4MB_WEIGHT_OFFSET 0x655
204#define I16MB_WEIGHT_OFFSET 0x560
205
206#define ADV_MV_16x16_WEIGHT 0x080
207#define ADV_MV_16_8_WEIGHT 0x0e0
208#define ADV_MV_8x8_WEIGHT 0x240
209#define ADV_MV_4x4x4_WEIGHT 0x3000
210
211#define IE_SAD_SHIFT_I16 0x001
212#define IE_SAD_SHIFT_I4 0x001
213#define ME_SAD_SHIFT_INTER 0x001
214
215#define STEP_2_SKIP_SAD 0
216#define STEP_1_SKIP_SAD 0
217#define STEP_0_SKIP_SAD 0
218#define STEP_2_SKIP_WEIGHT 0
219#define STEP_1_SKIP_WEIGHT 0
220#define STEP_0_SKIP_WEIGHT 0
221
222#define ME_SAD_RANGE_0 0x1 /* 0x0 */
223#define ME_SAD_RANGE_1 0x0
224#define ME_SAD_RANGE_2 0x0
225#define ME_SAD_RANGE_3 0x0
226
227/* use 0 for v3, 0x18 for v2 */
228#define ME_MV_PRE_WEIGHT_0 0x18
229/* use 0 for v3, 0x18 for v2 */
230#define ME_MV_PRE_WEIGHT_1 0x18
231#define ME_MV_PRE_WEIGHT_2 0x0
232#define ME_MV_PRE_WEIGHT_3 0x0
233
234/* use 0 for v3, 0x18 for v2 */
235#define ME_MV_STEP_WEIGHT_0 0x18
236/* use 0 for v3, 0x18 for v2 */
237#define ME_MV_STEP_WEIGHT_1 0x18
238#define ME_MV_STEP_WEIGHT_2 0x0
239#define ME_MV_STEP_WEIGHT_3 0x0
240
241#define ME_SAD_ENOUGH_0_DATA 0x00
242#define ME_SAD_ENOUGH_1_DATA 0x04
243#define ME_SAD_ENOUGH_2_DATA 0x11
244#define ADV_MV_8x8_ENOUGH_DATA 0x20
245
246/* V4_COLOR_BLOCK_FIX */
247#define V3_FORCE_SKIP_SAD_0 0x10
248/* 4 Blocks */
249#define V3_FORCE_SKIP_SAD_1 0x60
250/* 16 Blocks + V3_SKIP_WEIGHT_2 */
251#define V3_FORCE_SKIP_SAD_2 0x250
252/* almost disable it -- use t_lac_coeff_2 output to F_ZERO is better */
253#define V3_ME_F_ZERO_SAD (ME_WEIGHT_OFFSET + 0x10)
254
255#define V3_IE_F_ZERO_SAD_I16 (I16MB_WEIGHT_OFFSET + 0x10)
256#define V3_IE_F_ZERO_SAD_I4 (I4MB_WEIGHT_OFFSET + 0x20)
257
258#define V3_SKIP_WEIGHT_0 0x10
259/* 4 Blocks 8 separate search sad can be very low */
260#define V3_SKIP_WEIGHT_1 0x8 /* (4 * ME_MV_STEP_WEIGHT_1 + 0x100) */
261#define V3_SKIP_WEIGHT_2 0x3
262
263#define V3_LEVEL_1_F_SKIP_MAX_SAD 0x0
264#define V3_LEVEL_1_SKIP_MAX_SAD 0x6
265
266#define I4_ipred_weight_most 0x18
267#define I4_ipred_weight_else 0x28
268
269#define C_ipred_weight_V 0x04
270#define C_ipred_weight_H 0x08
271#define C_ipred_weight_DC 0x0c
272
273#define I16_ipred_weight_V 0x04
274#define I16_ipred_weight_H 0x08
275#define I16_ipred_weight_DC 0x0c
276
277/* 0x00 same as disable */
278#define v3_left_small_max_ie_sad 0x00
279#define v3_left_small_max_me_sad 0x40
280
281#define v5_use_small_diff_cnt 0
282#define v5_simple_mb_inter_all_en 1
283#define v5_simple_mb_inter_8x8_en 1
284#define v5_simple_mb_inter_16_8_en 1
285#define v5_simple_mb_inter_16x16_en 1
286#define v5_simple_mb_intra_en 1
287#define v5_simple_mb_C_en 0
288#define v5_simple_mb_Y_en 1
289#define v5_small_diff_Y 0x10
290#define v5_small_diff_C 0x18
291/* shift 8-bits, 2, 1, 0, -1, -2, -3, -4 */
292#define v5_simple_dq_setting 0x43210fed
293#define v5_simple_me_weight_setting 0
294
295#ifdef H264_ENC_CBR
296#define CBR_TABLE_SIZE 0x800
297#define CBR_SHORT_SHIFT 12 /* same as disable */
298#define CBR_LONG_MB_NUM 2
299#define START_TABLE_ID 8
300#define CBR_LONG_THRESH 4
301#endif
302
303static u32 v3_mv_sad[64] = {
304 /* For step0 */
305 0x00000004,
306 0x00010008,
307 0x00020010,
308 0x00030018,
309 0x00040020,
310 0x00050028,
311 0x00060038,
312 0x00070048,
313 0x00080058,
314 0x00090068,
315 0x000a0080,
316 0x000b0098,
317 0x000c00b0,
318 0x000d00c8,
319 0x000e00e8,
320 0x000f0110,
321 /* For step1 */
322 0x00100002,
323 0x00110004,
324 0x00120008,
325 0x0013000c,
326 0x00140010,
327 0x00150014,
328 0x0016001c,
329 0x00170024,
330 0x0018002c,
331 0x00190034,
332 0x001a0044,
333 0x001b0054,
334 0x001c0064,
335 0x001d0074,
336 0x001e0094,
337 0x001f00b4,
338 /* For step2 */
339 0x00200006,
340 0x0021000c,
341 0x0022000c,
342 0x00230018,
343 0x00240018,
344 0x00250018,
345 0x00260018,
346 0x00270030,
347 0x00280030,
348 0x00290030,
349 0x002a0030,
350 0x002b0030,
351 0x002c0030,
352 0x002d0030,
353 0x002e0030,
354 0x002f0050,
355 /* For step2 4x4-8x8 */
356 0x00300001,
357 0x00310002,
358 0x00320002,
359 0x00330004,
360 0x00340004,
361 0x00350004,
362 0x00360004,
363 0x00370006,
364 0x00380006,
365 0x00390006,
366 0x003a0006,
367 0x003b0006,
368 0x003c0006,
369 0x003d0006,
370 0x003e0006,
371 0x003f0006
372};
373
374static struct BuffInfo_s amvenc_buffspec[] = {
375 {
376 .lev_id = 0,
377 .max_width = 1920,
378 .max_height = 1088,
379 .min_buffsize = 0x1400000,
380 .dct = {
381 .buf_start = 0,
382 .buf_size = 0x800000, /* 1920x1088x4 */
383 },
384 .dec0_y = {
385 .buf_start = 0x800000,
386 .buf_size = 0x300000,
387 },
388 .dec1_y = {
389 .buf_start = 0xb00000,
390 .buf_size = 0x300000,
391 },
392 .assit = {
393 .buf_start = 0xe10000,
394 .buf_size = 0xc0000,
395 },
396 .bitstream = {
397 .buf_start = 0xf00000,
398 .buf_size = 0x100000,
399 },
400 .scale_buff = {
401 .buf_start = 0x1000000,
402 .buf_size = 0x300000,
403 },
404 .dump_info = {
405 .buf_start = 0x1300000,
406 .buf_size = 0xa0000, /* (1920x1088/256)x80 */
407 },
408 .cbr_info = {
409 .buf_start = 0x13b0000,
410 .buf_size = 0x2000,
411 }
412 }
413};
414
415enum ucode_type_e {
416 UCODE_GXL,
417 UCODE_TXL,
418 UCODE_G12A,
419 UCODE_MAX
420};
421
422const char *ucode_name[] = {
423 "gxl_h264_enc",
424 "txl_h264_enc_cavlc",
425 "ga_h264_enc_cabac",
426};
427
428static void dma_flush(u32 buf_start, u32 buf_size);
429static void cache_flush(u32 buf_start, u32 buf_size);
430static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr);
431static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg);
432
433static const char *select_ucode(u32 ucode_index)
434{
435 enum ucode_type_e ucode = UCODE_GXL;
436
437 switch (ucode_index) {
438 case UCODE_MODE_FULL:
439 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A)
440 ucode = UCODE_G12A;
441 else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL)
442 ucode = UCODE_TXL;
443 else /* (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) */
444 ucode = UCODE_GXL;
445 break;
446 break;
447 default:
448 break;
449 }
450 return (const char *)ucode_name[ucode];
451}
452
453static void hcodec_prog_qtbl(struct encode_wq_s *wq)
454{
455 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
456 (0 << 23) | /* quant_table_addr */
457 (1 << 22)); /* quant_table_addr_update */
458
459 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
460 wq->quant_tbl_i4[0]);
461 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
462 wq->quant_tbl_i4[1]);
463 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
464 wq->quant_tbl_i4[2]);
465 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
466 wq->quant_tbl_i4[3]);
467 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
468 wq->quant_tbl_i4[4]);
469 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
470 wq->quant_tbl_i4[5]);
471 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
472 wq->quant_tbl_i4[6]);
473 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
474 wq->quant_tbl_i4[7]);
475
476 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
477 (8 << 23) | /* quant_table_addr */
478 (1 << 22)); /* quant_table_addr_update */
479
480 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
481 wq->quant_tbl_i16[0]);
482 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
483 wq->quant_tbl_i16[1]);
484 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
485 wq->quant_tbl_i16[2]);
486 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
487 wq->quant_tbl_i16[3]);
488 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
489 wq->quant_tbl_i16[4]);
490 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
491 wq->quant_tbl_i16[5]);
492 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
493 wq->quant_tbl_i16[6]);
494 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
495 wq->quant_tbl_i16[7]);
496
497 WRITE_HREG(HCODEC_Q_QUANT_CONTROL,
498 (16 << 23) | /* quant_table_addr */
499 (1 << 22)); /* quant_table_addr_update */
500
501 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
502 wq->quant_tbl_me[0]);
503 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
504 wq->quant_tbl_me[1]);
505 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
506 wq->quant_tbl_me[2]);
507 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
508 wq->quant_tbl_me[3]);
509 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
510 wq->quant_tbl_me[4]);
511 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
512 wq->quant_tbl_me[5]);
513 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
514 wq->quant_tbl_me[6]);
515 WRITE_HREG(HCODEC_QUANT_TABLE_DATA,
516 wq->quant_tbl_me[7]);
517}
518
519static void InitEncodeWeight(void)
520{
521 me_mv_merge_ctl =
522 (0x1 << 31) | /* [31] me_merge_mv_en_16 */
523 (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */
524 (0x1 << 29) | /* [29] me_merge_flex_en_16 */
525 (0x1 << 28) | /* [28] me_merge_sad_en_16 */
526 (0x1 << 27) | /* [27] me_merge_mv_en_8 */
527 (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */
528 (0x1 << 25) | /* [25] me_merge_flex_en_8 */
529 (0x1 << 24) | /* [24] me_merge_sad_en_8 */
530 (0x12 << 18) |
531 /* [23:18] me_merge_mv_diff_16 - MV diff
532 * <= n pixel can be merged
533 */
534 (0x2b << 12) |
535 /* [17:12] me_merge_mv_diff_8 - MV diff
536 * <= n pixel can be merged
537 */
538 (0x80 << 0);
539 /* [11:0] me_merge_min_sad - SAD
540 * >= 0x180 can be merged with other MV
541 */
542
543 me_mv_weight_01 = (ME_MV_STEP_WEIGHT_1 << 24) |
544 (ME_MV_PRE_WEIGHT_1 << 16) |
545 (ME_MV_STEP_WEIGHT_0 << 8) |
546 (ME_MV_PRE_WEIGHT_0 << 0);
547
548 me_mv_weight_23 = (ME_MV_STEP_WEIGHT_3 << 24) |
549 (ME_MV_PRE_WEIGHT_3 << 16) |
550 (ME_MV_STEP_WEIGHT_2 << 8) |
551 (ME_MV_PRE_WEIGHT_2 << 0);
552
553 me_sad_range_inc = (ME_SAD_RANGE_3 << 24) |
554 (ME_SAD_RANGE_2 << 16) |
555 (ME_SAD_RANGE_1 << 8) |
556 (ME_SAD_RANGE_0 << 0);
557
558 me_step0_close_mv = (0x100 << 10) |
559 /* me_step0_big_sad -- two MV sad
560 * diff bigger will use use 1
561 */
562 (2 << 5) | /* me_step0_close_mv_y */
563 (2 << 0); /* me_step0_close_mv_x */
564
565 me_f_skip_sad = (0x00 << 24) | /* force_skip_sad_3 */
566 (STEP_2_SKIP_SAD << 16) | /* force_skip_sad_2 */
567 (STEP_1_SKIP_SAD << 8) | /* force_skip_sad_1 */
568 (STEP_0_SKIP_SAD << 0); /* force_skip_sad_0 */
569
570 me_f_skip_weight = (0x00 << 24) | /* force_skip_weight_3 */
571 /* force_skip_weight_2 */
572 (STEP_2_SKIP_WEIGHT << 16) |
573 /* force_skip_weight_1 */
574 (STEP_1_SKIP_WEIGHT << 8) |
575 /* force_skip_weight_0 */
576 (STEP_0_SKIP_WEIGHT << 0);
577
578 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
579 me_f_skip_sad = 0;
580 me_f_skip_weight = 0;
581 me_mv_weight_01 = 0;
582 me_mv_weight_23 = 0;
583 }
584
585 me_sad_enough_01 = (ME_SAD_ENOUGH_1_DATA << 12) |
586 /* me_sad_enough_1 */
587 (ME_SAD_ENOUGH_0_DATA << 0) |
588 /* me_sad_enough_0 */
589 (0 << 12) | /* me_sad_enough_1 */
590 (0 << 0); /* me_sad_enough_0 */
591
592 me_sad_enough_23 = (ADV_MV_8x8_ENOUGH_DATA << 12) |
593 /* adv_mv_8x8_enough */
594 (ME_SAD_ENOUGH_2_DATA << 0) |
595 /* me_sad_enough_2 */
596 (0 << 12) | /* me_sad_enough_3 */
597 (0 << 0); /* me_sad_enough_2 */
598}
599
600/*output stream buffer setting*/
601static void avc_init_output_buffer(struct encode_wq_s *wq)
602{
603 WRITE_HREG(HCODEC_VLC_VB_MEM_CTL,
604 ((1 << 31) | (0x3f << 24) |
605 (0x20 << 16) | (2 << 0)));
606 WRITE_HREG(HCODEC_VLC_VB_START_PTR,
607 wq->mem.BitstreamStart);
608 WRITE_HREG(HCODEC_VLC_VB_WR_PTR,
609 wq->mem.BitstreamStart);
610 WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR,
611 wq->mem.BitstreamStart);
612 WRITE_HREG(HCODEC_VLC_VB_END_PTR,
613 wq->mem.BitstreamEnd);
614 WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1);
615 WRITE_HREG(HCODEC_VLC_VB_CONTROL,
616 ((0 << 14) | (7 << 3) |
617 (1 << 1) | (0 << 0)));
618}
619
620/*input dct buffer setting*/
621static void avc_init_input_buffer(struct encode_wq_s *wq)
622{
623 WRITE_HREG(HCODEC_QDCT_MB_START_PTR,
624 wq->mem.dct_buff_start_addr);
625 WRITE_HREG(HCODEC_QDCT_MB_END_PTR,
626 wq->mem.dct_buff_end_addr);
627 WRITE_HREG(HCODEC_QDCT_MB_WR_PTR,
628 wq->mem.dct_buff_start_addr);
629 WRITE_HREG(HCODEC_QDCT_MB_RD_PTR,
630 wq->mem.dct_buff_start_addr);
631 WRITE_HREG(HCODEC_QDCT_MB_BUFF, 0);
632}
633
634/*input reference buffer setting*/
635static void avc_init_reference_buffer(s32 canvas)
636{
637 WRITE_HREG(HCODEC_ANC0_CANVAS_ADDR, canvas);
638 WRITE_HREG(HCODEC_VLC_HCMD_CONFIG, 0);
639}
640
641static void avc_init_assit_buffer(struct encode_wq_s *wq)
642{
643 WRITE_HREG(MEM_OFFSET_REG, wq->mem.assit_buffer_offset);
644}
645
646/*deblock buffer setting, same as INI_CANVAS*/
647static void avc_init_dblk_buffer(s32 canvas)
648{
649 WRITE_HREG(HCODEC_REC_CANVAS_ADDR, canvas);
650 WRITE_HREG(HCODEC_DBKR_CANVAS_ADDR, canvas);
651 WRITE_HREG(HCODEC_DBKW_CANVAS_ADDR, canvas);
652}
653
654static void avc_init_encoder(struct encode_wq_s *wq, bool idr)
655{
656 WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0);
657 WRITE_HREG(HCODEC_VLC_CONFIG, 0x07);
658 WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0);
659
660 WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15);
661 WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8);
662 WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14);
663
664 WRITE_HREG(IDR_PIC_ID, wq->pic.idr_pic_id);
665 WRITE_HREG(FRAME_NUMBER,
666 (idr == true) ? 0 : wq->pic.frame_number);
667 WRITE_HREG(PIC_ORDER_CNT_LSB,
668 (idr == true) ? 0 : wq->pic.pic_order_cnt_lsb);
669
670 WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB,
671 wq->pic.log2_max_pic_order_cnt_lsb);
672 WRITE_HREG(LOG2_MAX_FRAME_NUM,
673 wq->pic.log2_max_frame_num);
674 WRITE_HREG(ANC0_BUFFER_ID, 0);
675 WRITE_HREG(QPPICTURE, wq->pic.init_qppicture);
676}
677
678static void avc_canvas_init(struct encode_wq_s *wq)
679{
680 u32 canvas_width, canvas_height;
681 u32 start_addr = wq->mem.buf_start;
682
683 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
684 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
685
686 canvas_config(ENC_CANVAS_OFFSET,
687 start_addr + wq->mem.bufspec.dec0_y.buf_start,
688 canvas_width, canvas_height,
689 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
690 canvas_config(1 + ENC_CANVAS_OFFSET,
691 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
692 canvas_width, canvas_height / 2,
693 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
694 /*here the third plane use the same address as the second plane*/
695 canvas_config(2 + ENC_CANVAS_OFFSET,
696 start_addr + wq->mem.bufspec.dec0_uv.buf_start,
697 canvas_width, canvas_height / 2,
698 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
699
700 canvas_config(3 + ENC_CANVAS_OFFSET,
701 start_addr + wq->mem.bufspec.dec1_y.buf_start,
702 canvas_width, canvas_height,
703 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
704 canvas_config(4 + ENC_CANVAS_OFFSET,
705 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
706 canvas_width, canvas_height / 2,
707 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
708 /*here the third plane use the same address as the second plane*/
709 canvas_config(5 + ENC_CANVAS_OFFSET,
710 start_addr + wq->mem.bufspec.dec1_uv.buf_start,
711 canvas_width, canvas_height / 2,
712 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
713}
714
715static void avc_buffspec_init(struct encode_wq_s *wq)
716{
717 u32 canvas_width, canvas_height;
718 u32 start_addr = wq->mem.buf_start;
719 u32 mb_w = (wq->pic.encoder_width + 15) >> 4;
720 u32 mb_h = (wq->pic.encoder_height + 15) >> 4;
721 u32 mbs = mb_w * mb_h;
722
723 canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5;
724 canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4;
725
726 wq->mem.dct_buff_start_addr = start_addr +
727 wq->mem.bufspec.dct.buf_start;
728 wq->mem.dct_buff_end_addr =
729 wq->mem.dct_buff_start_addr +
730 wq->mem.bufspec.dct.buf_size - 1;
731 enc_pr(LOG_INFO, "dct_buff_start_addr is 0x%x, wq:%p.\n",
732 wq->mem.dct_buff_start_addr, (void *)wq);
733
734 wq->mem.bufspec.dec0_uv.buf_start =
735 wq->mem.bufspec.dec0_y.buf_start +
736 canvas_width * canvas_height;
737 wq->mem.bufspec.dec0_uv.buf_size = canvas_width * canvas_height / 2;
738 wq->mem.bufspec.dec1_uv.buf_start =
739 wq->mem.bufspec.dec1_y.buf_start +
740 canvas_width * canvas_height;
741 wq->mem.bufspec.dec1_uv.buf_size = canvas_width * canvas_height / 2;
742 wq->mem.assit_buffer_offset = start_addr +
743 wq->mem.bufspec.assit.buf_start;
744 enc_pr(LOG_INFO, "assit_buffer_offset is 0x%x, wq: %p.\n",
745 wq->mem.assit_buffer_offset, (void *)wq);
746 /*output stream buffer config*/
747 wq->mem.BitstreamStart = start_addr +
748 wq->mem.bufspec.bitstream.buf_start;
749 wq->mem.BitstreamEnd =
750 wq->mem.BitstreamStart +
751 wq->mem.bufspec.bitstream.buf_size - 1;
752 enc_pr(LOG_INFO, "BitstreamStart is 0x%x, wq: %p.\n",
753 wq->mem.BitstreamStart, (void *)wq);
754
755 wq->mem.scaler_buff_start_addr =
756 wq->mem.buf_start + wq->mem.bufspec.scale_buff.buf_start;
757 wq->mem.dump_info_ddr_start_addr =
758 wq->mem.buf_start + wq->mem.bufspec.dump_info.buf_start;
759 enc_pr(LOG_INFO,
760 "CBR: dump_info_ddr_start_addr:%x.\n",
761 wq->mem.dump_info_ddr_start_addr);
762 enc_pr(LOG_INFO, "CBR: buf_start :%d.\n",
763 wq->mem.buf_start);
764 enc_pr(LOG_INFO, "CBR: dump_info.buf_start :%d.\n",
765 wq->mem.bufspec.dump_info.buf_start);
766 wq->mem.dump_info_ddr_size =
767 DUMP_INFO_BYTES_PER_MB * mbs;
768 wq->mem.dump_info_ddr_size =
769 (wq->mem.dump_info_ddr_size + PAGE_SIZE - 1)
770 & ~(PAGE_SIZE - 1);
771 wq->mem.cbr_info_ddr_start_addr =
772 wq->mem.buf_start + wq->mem.bufspec.cbr_info.buf_start;
773 wq->mem.cbr_info_ddr_size =
774 wq->mem.bufspec.cbr_info.buf_size;
775 wq->mem.cbr_info_ddr_virt_addr =
776 codec_mm_vmap(wq->mem.cbr_info_ddr_start_addr,
777 wq->mem.bufspec.cbr_info.buf_size);
778
779 wq->mem.dblk_buf_canvas =
780 ((ENC_CANVAS_OFFSET + 2) << 16) |
781 ((ENC_CANVAS_OFFSET + 1) << 8) |
782 (ENC_CANVAS_OFFSET);
783 wq->mem.ref_buf_canvas =
784 ((ENC_CANVAS_OFFSET + 5) << 16) |
785 ((ENC_CANVAS_OFFSET + 4) << 8) |
786 (ENC_CANVAS_OFFSET + 3);
787}
788
789static void avc_init_ie_me_parameter(struct encode_wq_s *wq, u32 quant)
790{
791 ie_cur_ref_sel = 0;
792 ie_pippeline_block = 12;
793 /* currently disable half and sub pixel */
794 ie_me_mode =
795 (ie_pippeline_block & IE_PIPPELINE_BLOCK_MASK) <<
796 IE_PIPPELINE_BLOCK_SHIFT;
797
798 WRITE_HREG(IE_ME_MODE, ie_me_mode);
799 WRITE_HREG(IE_REF_SEL, ie_cur_ref_sel);
800 WRITE_HREG(IE_ME_MB_TYPE, ie_me_mb_type);
801#ifdef MULTI_SLICE_MC
802 if (fixed_slice_cfg)
803 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
804 else if (wq->pic.rows_per_slice !=
805 (wq->pic.encoder_height + 15) >> 4) {
806 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
807
808 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
809 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
810 } else
811 WRITE_HREG(FIXED_SLICE_CFG, 0);
812#else
813 WRITE_HREG(FIXED_SLICE_CFG, 0);
814#endif
815}
816
817/* for temp */
818#define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1)
819#define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2)
820#define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3)
821#define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4)
822#define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5)
823#define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6)
824#define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7)
825#define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8)
826#define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9)
827#define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa)
828#define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb)
829
830static void mfdin_basic(u32 input, u8 iformat,
831 u8 oformat, u32 picsize_x, u32 picsize_y,
832 u8 r2y_en, u8 nr, u8 ifmt_extra)
833{
834 u8 dsample_en; /* Downsample Enable */
835 u8 interp_en; /* Interpolation Enable */
836 u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */
837 u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */
838 /* mfdin_reg3_canv[25:24];
839 * // bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3
840 */
841 u8 canv_idx0_bppx;
842 /* mfdin_reg3_canv[27:26];
843 * // bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3
844 */
845 u8 canv_idx1_bppx;
846 /* mfdin_reg3_canv[29:28];
847 * // bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3
848 */
849 u8 canv_idx0_bppy;
850 /* mfdin_reg3_canv[31:30];
851 * // bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3
852 */
853 u8 canv_idx1_bppy;
854 u8 ifmt444, ifmt422, ifmt420, linear_bytes4p;
855 u8 nr_enable;
856 u8 cfg_y_snr_en;
857 u8 cfg_y_tnr_en;
858 u8 cfg_c_snr_en;
859 u8 cfg_c_tnr_en;
860 u32 linear_bytesperline;
861 s32 reg_offset;
862 bool linear_enable = false;
863 bool format_err = false;
864
865 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
866 if ((iformat == 7) && (ifmt_extra > 2))
867 format_err = true;
868 } else if (iformat == 7)
869 format_err = true;
870
871 if (format_err) {
872 enc_pr(LOG_ERROR,
873 "mfdin format err, iformat:%d, ifmt_extra:%d\n",
874 iformat, ifmt_extra);
875 return;
876 }
877 if (iformat != 7)
878 ifmt_extra = 0;
879
880 ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) ||
881 (iformat == 9) || (iformat == 12)) ? 1 : 0;
882 if (iformat == 7 && ifmt_extra == 1)
883 ifmt444 = 1;
884 ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0;
885 if (iformat == 7 && ifmt_extra != 1)
886 ifmt422 = 1;
887 ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) ||
888 (iformat == 11)) ? 1 : 0;
889 dsample_en = ((ifmt444 && (oformat != 2)) ||
890 (ifmt422 && (oformat == 0))) ? 1 : 0;
891 interp_en = ((ifmt422 && (oformat == 2)) ||
892 (ifmt420 && (oformat != 0))) ? 1 : 0;
893 y_size = (oformat != 0) ? 1 : 0;
894 if (iformat == 12)
895 y_size = 0;
896 r2y_mode = (r2y_en == 1) ? 1 : 0; /* Fixed to 1 (TODO) */
897 canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1;
898 canv_idx1_bppx = (iformat == 4) ? 0 : 1;
899 canv_idx0_bppy = 1;
900 canv_idx1_bppy = (iformat == 5) ? 1 : 0;
901
902 if ((iformat == 8) || (iformat == 9) || (iformat == 12))
903 linear_bytes4p = 3;
904 else if (iformat == 10)
905 linear_bytes4p = 2;
906 else if (iformat == 11)
907 linear_bytes4p = 1;
908 else
909 linear_bytes4p = 0;
910 if (iformat == 12)
911 linear_bytesperline = picsize_x * 4;
912 else
913 linear_bytesperline = picsize_x * linear_bytes4p;
914
915 if (iformat < 8)
916 linear_enable = false;
917 else
918 linear_enable = true;
919
920 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXBB) {
921 reg_offset = -8;
922 /* nr_mode: 0:Disabled 1:SNR Only 2:TNR Only 3:3DNR */
923 nr_enable = (nr) ? 1 : 0;
924 cfg_y_snr_en = ((nr == 1) || (nr == 3)) ? 1 : 0;
925 cfg_y_tnr_en = ((nr == 2) || (nr == 3)) ? 1 : 0;
926 cfg_c_snr_en = cfg_y_snr_en;
927 /* cfg_c_tnr_en = cfg_y_tnr_en; */
928 cfg_c_tnr_en = 0;
929
930 /* NR For Y */
931 WRITE_HREG((HCODEC_MFDIN_REG0D + reg_offset),
932 ((cfg_y_snr_en << 0) |
933 (y_snr_err_norm << 1) |
934 (y_snr_gau_bld_core << 2) |
935 (((y_snr_gau_bld_ofst) & 0xff) << 6) |
936 (y_snr_gau_bld_rate << 14) |
937 (y_snr_gau_alp0_min << 20) |
938 (y_snr_gau_alp0_max << 26)));
939 WRITE_HREG((HCODEC_MFDIN_REG0E + reg_offset),
940 ((cfg_y_tnr_en << 0) |
941 (y_tnr_mc_en << 1) |
942 (y_tnr_txt_mode << 2) |
943 (y_tnr_mot_sad_margin << 3) |
944 (y_tnr_alpha_min << 7) |
945 (y_tnr_alpha_max << 13) |
946 (y_tnr_deghost_os << 19)));
947 WRITE_HREG((HCODEC_MFDIN_REG0F + reg_offset),
948 ((y_tnr_mot_cortxt_rate << 0) |
949 (y_tnr_mot_distxt_ofst << 8) |
950 (y_tnr_mot_distxt_rate << 4) |
951 (y_tnr_mot_dismot_ofst << 16) |
952 (y_tnr_mot_frcsad_lock << 24)));
953 WRITE_HREG((HCODEC_MFDIN_REG10 + reg_offset),
954 ((y_tnr_mot2alp_frc_gain << 0) |
955 (y_tnr_mot2alp_nrm_gain << 8) |
956 (y_tnr_mot2alp_dis_gain << 16) |
957 (y_tnr_mot2alp_dis_ofst << 24)));
958 WRITE_HREG((HCODEC_MFDIN_REG11 + reg_offset),
959 ((y_bld_beta2alp_rate << 0) |
960 (y_bld_beta_min << 8) |
961 (y_bld_beta_max << 14)));
962
963 /* NR For C */
964 WRITE_HREG((HCODEC_MFDIN_REG12 + reg_offset),
965 ((cfg_y_snr_en << 0) |
966 (c_snr_err_norm << 1) |
967 (c_snr_gau_bld_core << 2) |
968 (((c_snr_gau_bld_ofst) & 0xff) << 6) |
969 (c_snr_gau_bld_rate << 14) |
970 (c_snr_gau_alp0_min << 20) |
971 (c_snr_gau_alp0_max << 26)));
972
973 WRITE_HREG((HCODEC_MFDIN_REG13 + reg_offset),
974 ((cfg_c_tnr_en << 0) |
975 (c_tnr_mc_en << 1) |
976 (c_tnr_txt_mode << 2) |
977 (c_tnr_mot_sad_margin << 3) |
978 (c_tnr_alpha_min << 7) |
979 (c_tnr_alpha_max << 13) |
980 (c_tnr_deghost_os << 19)));
981 WRITE_HREG((HCODEC_MFDIN_REG14 + reg_offset),
982 ((c_tnr_mot_cortxt_rate << 0) |
983 (c_tnr_mot_distxt_ofst << 8) |
984 (c_tnr_mot_distxt_rate << 4) |
985 (c_tnr_mot_dismot_ofst << 16) |
986 (c_tnr_mot_frcsad_lock << 24)));
987 WRITE_HREG((HCODEC_MFDIN_REG15 + reg_offset),
988 ((c_tnr_mot2alp_frc_gain << 0) |
989 (c_tnr_mot2alp_nrm_gain << 8) |
990 (c_tnr_mot2alp_dis_gain << 16) |
991 (c_tnr_mot2alp_dis_ofst << 24)));
992
993 WRITE_HREG((HCODEC_MFDIN_REG16 + reg_offset),
994 ((c_bld_beta2alp_rate << 0) |
995 (c_bld_beta_min << 8) |
996 (c_bld_beta_max << 14)));
997
998 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
999 (iformat << 0) | (oformat << 4) |
1000 (dsample_en << 6) | (y_size << 8) |
1001 (interp_en << 9) | (r2y_en << 12) |
1002 (r2y_mode << 13) | (ifmt_extra << 16) |
1003 (nr_enable << 19));
1004 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1005 (picsize_x << 14) | (picsize_y << 0));
1006 } else {
1007 reg_offset = 0;
1008 WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset),
1009 (iformat << 0) | (oformat << 4) |
1010 (dsample_en << 6) | (y_size << 8) |
1011 (interp_en << 9) | (r2y_en << 12) |
1012 (r2y_mode << 13));
1013 WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset),
1014 (picsize_x << 12) | (picsize_y << 0));
1015 }
1016
1017 if (linear_enable == false) {
1018 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1019 (input & 0xffffff) |
1020 (canv_idx1_bppy << 30) |
1021 (canv_idx0_bppy << 28) |
1022 (canv_idx1_bppx << 26) |
1023 (canv_idx0_bppx << 24));
1024 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1025 (0 << 16) | (0 << 0));
1026 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0);
1027 } else {
1028 WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset),
1029 (canv_idx1_bppy << 30) |
1030 (canv_idx0_bppy << 28) |
1031 (canv_idx1_bppx << 26) |
1032 (canv_idx0_bppx << 24));
1033 WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset),
1034 (linear_bytes4p << 16) | (linear_bytesperline << 0));
1035 WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input);
1036 }
1037
1038 if (iformat == 12)
1039 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1040 (2 << 0) | (1 << 3) | (0 << 6) |
1041 (3 << 9) | (6 << 12) | (5 << 15) |
1042 (4 << 18) | (7 << 21));
1043 else
1044 WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset),
1045 (7 << 0) | (6 << 3) | (5 << 6) |
1046 (4 << 9) | (3 << 12) | (2 << 15) |
1047 (1 << 18) | (0 << 21));
1048}
1049
1050#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1051static int scale_frame(struct encode_wq_s *wq,
1052 struct encode_request_s *request,
1053 struct config_para_ex_s *ge2d_config,
1054 u32 src_addr, bool canvas)
1055{
1056 struct ge2d_context_s *context = encode_manager.context;
1057 int src_top, src_left, src_width, src_height;
1058 struct canvas_s cs0, cs1, cs2, cd;
1059 u32 src_canvas, dst_canvas;
1060 u32 src_canvas_w, dst_canvas_w;
1061 u32 src_h = request->src_h;
1062 u32 dst_w = ((wq->pic.encoder_width + 15) >> 4) << 4;
1063 u32 dst_h = ((wq->pic.encoder_height + 15) >> 4) << 4;
1064 int input_format = GE2D_FORMAT_M24_NV21;
1065
1066 src_top = request->crop_top;
1067 src_left = request->crop_left;
1068 src_width = request->src_w - src_left - request->crop_right;
1069 src_height = request->src_h - src_top - request->crop_bottom;
1070 pr_err("request->fmt=%d, %d %d, canvas=%d\n", request->fmt, FMT_NV21, FMT_BGR888, canvas);
1071
1072 if (canvas) {
1073 if ((request->fmt == FMT_NV21)
1074 || (request->fmt == FMT_NV12)) {
1075 src_canvas = src_addr & 0xffff;
1076 input_format = GE2D_FORMAT_M24_NV21;
1077 } else if (request->fmt == FMT_BGR888) {
1078 src_canvas = src_addr & 0xffffff;
1079 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1080 } else if (request->fmt == FMT_RGBA8888) {
1081 src_canvas = src_addr & 0xffffff;
1082 input_format = GE2D_FORMAT_S32_ABGR;
1083 } else {
1084 src_canvas = src_addr & 0xffffff;
1085 input_format = GE2D_FORMAT_M24_YUV420;
1086 }
1087 } else {
1088 if ((request->fmt == FMT_NV21)
1089 || (request->fmt == FMT_NV12)) {
1090 src_canvas_w =
1091 ((request->src_w + 31) >> 5) << 5;
1092 canvas_config(ENC_CANVAS_OFFSET + 9,
1093 src_addr,
1094 src_canvas_w, src_h,
1095 CANVAS_ADDR_NOWRAP,
1096 CANVAS_BLKMODE_LINEAR);
1097 canvas_config(ENC_CANVAS_OFFSET + 10,
1098 src_addr + src_canvas_w * src_h,
1099 src_canvas_w, src_h / 2,
1100 CANVAS_ADDR_NOWRAP,
1101 CANVAS_BLKMODE_LINEAR);
1102 src_canvas =
1103 ((ENC_CANVAS_OFFSET + 10) << 8)
1104 | (ENC_CANVAS_OFFSET + 9);
1105 input_format = GE2D_FORMAT_M24_NV21;
1106 } else if (request->fmt == FMT_BGR888) {
1107 src_canvas_w =
1108 ((request->src_w + 31) >> 5) << 5;
1109
1110 canvas_config(ENC_CANVAS_OFFSET + 9,
1111 src_addr,
1112 src_canvas_w * 3, src_h,
1113 CANVAS_ADDR_NOWRAP,
1114 CANVAS_BLKMODE_LINEAR);
1115 src_canvas = ENC_CANVAS_OFFSET + 9;
1116 input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d
1117 } else if (request->fmt == FMT_RGBA8888) {
1118 src_canvas_w =
1119 ((request->src_w + 31) >> 5) << 5;
1120 canvas_config(
1121 ENC_CANVAS_OFFSET + 9,
1122 src_addr,
1123 src_canvas_w * 4,
1124 src_h,
1125 CANVAS_ADDR_NOWRAP,
1126 CANVAS_BLKMODE_LINEAR);
1127 src_canvas = ENC_CANVAS_OFFSET + 9;
1128 input_format = GE2D_FORMAT_S32_ABGR; //Opposite color after ge2d
1129 } else {
1130 src_canvas_w =
1131 ((request->src_w + 63) >> 6) << 6;
1132 canvas_config(ENC_CANVAS_OFFSET + 9,
1133 src_addr,
1134 src_canvas_w, src_h,
1135 CANVAS_ADDR_NOWRAP,
1136 CANVAS_BLKMODE_LINEAR);
1137 canvas_config(ENC_CANVAS_OFFSET + 10,
1138 src_addr + src_canvas_w * src_h,
1139 src_canvas_w / 2, src_h / 2,
1140 CANVAS_ADDR_NOWRAP,
1141 CANVAS_BLKMODE_LINEAR);
1142 canvas_config(ENC_CANVAS_OFFSET + 11,
1143 src_addr + src_canvas_w * src_h * 5 / 4,
1144 src_canvas_w / 2, src_h / 2,
1145 CANVAS_ADDR_NOWRAP,
1146 CANVAS_BLKMODE_LINEAR);
1147 src_canvas =
1148 ((ENC_CANVAS_OFFSET + 11) << 16) |
1149 ((ENC_CANVAS_OFFSET + 10) << 8) |
1150 (ENC_CANVAS_OFFSET + 9);
1151 input_format = GE2D_FORMAT_M24_YUV420;
1152 }
1153 }
1154
1155 dst_canvas_w = ((dst_w + 31) >> 5) << 5;
1156
1157 canvas_config(ENC_CANVAS_OFFSET + 6,
1158 wq->mem.scaler_buff_start_addr,
1159 dst_canvas_w, dst_h,
1160 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1161
1162 canvas_config(ENC_CANVAS_OFFSET + 7,
1163 wq->mem.scaler_buff_start_addr + dst_canvas_w * dst_h,
1164 dst_canvas_w, dst_h / 2,
1165 CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR);
1166
1167 dst_canvas = ((ENC_CANVAS_OFFSET + 7) << 8) |
1168 (ENC_CANVAS_OFFSET + 6);
1169
1170 ge2d_config->alu_const_color = 0;
1171 ge2d_config->bitmask_en = 0;
1172 ge2d_config->src1_gb_alpha = 0;
1173 ge2d_config->dst_xy_swap = 0;
1174 canvas_read(src_canvas & 0xff, &cs0);
1175 canvas_read((src_canvas >> 8) & 0xff, &cs1);
1176 canvas_read((src_canvas >> 16) & 0xff, &cs2);
1177 ge2d_config->src_planes[0].addr = cs0.addr;
1178 ge2d_config->src_planes[0].w = dst_w * 4;//cs0.width;
1179 ge2d_config->src_planes[0].h = dst_h;//cs0.height;
1180 ge2d_config->src_planes[1].addr = cs1.addr;
1181 ge2d_config->src_planes[1].w = cs1.width;
1182 ge2d_config->src_planes[1].h = cs1.height;
1183 ge2d_config->src_planes[2].addr = cs2.addr;
1184 ge2d_config->src_planes[2].w = cs2.width;
1185 ge2d_config->src_planes[2].h = cs2.height;
1186
1187 canvas_read(dst_canvas & 0xff, &cd);
1188
1189 ge2d_config->dst_planes[0].addr = cd.addr;
1190 ge2d_config->dst_planes[0].w = dst_w * 4;//cd.width;
1191 ge2d_config->dst_planes[0].h = dst_h;//cd.height;
1192 ge2d_config->src_key.key_enable = 0;
1193 ge2d_config->src_key.key_mask = 0;
1194 ge2d_config->src_key.key_mode = 0;
1195 ge2d_config->src_para.canvas_index = src_canvas;
1196 ge2d_config->src_para.mem_type = CANVAS_TYPE_INVALID;
1197 ge2d_config->src_para.format = input_format | GE2D_LITTLE_ENDIAN;
1198 ge2d_config->src_para.fill_color_en = 0;
1199 ge2d_config->src_para.fill_mode = 0;
1200 ge2d_config->src_para.x_rev = 0;
1201 ge2d_config->src_para.y_rev = 0;
1202 ge2d_config->src_para.color = 0xffffffff;
1203 ge2d_config->src_para.top = 0;
1204 ge2d_config->src_para.left = 0;
1205 ge2d_config->src_para.width = dst_w;//request->src_w;
1206 ge2d_config->src_para.height = dst_h;//request->src_h;
1207 ge2d_config->src2_para.mem_type = CANVAS_TYPE_INVALID;
1208 ge2d_config->dst_para.canvas_index = dst_canvas;
1209 ge2d_config->dst_para.mem_type = CANVAS_TYPE_INVALID;
1210 ge2d_config->dst_para.format =
1211 GE2D_FORMAT_M24_NV21 | GE2D_LITTLE_ENDIAN;
1212
1213 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720) {
1214 ge2d_config->dst_para.format |= GE2D_FORMAT_BT_STANDARD;
1215 }
1216
1217 ge2d_config->dst_para.fill_color_en = 0;
1218 ge2d_config->dst_para.fill_mode = 0;
1219 ge2d_config->dst_para.x_rev = 0;
1220 ge2d_config->dst_para.y_rev = 0;
1221 ge2d_config->dst_para.color = 0;
1222 ge2d_config->dst_para.top = 0;
1223 ge2d_config->dst_para.left = 0;
1224 ge2d_config->dst_para.width = dst_w;
1225 ge2d_config->dst_para.height = dst_h;
1226 ge2d_config->dst_para.x_rev = 0;
1227 ge2d_config->dst_para.y_rev = 0;
1228
1229
1230 if (ge2d_context_config_ex(context, ge2d_config) < 0) {
1231 pr_err("++ge2d configing error.\n");
1232 return -1;
1233 }
1234 stretchblt_noalpha(context, src_left, src_top, src_width, src_height,
1235 0, 0, wq->pic.encoder_width, wq->pic.encoder_height);
1236 return dst_canvas_w*dst_h * 3 / 2;
1237}
1238#endif
1239
1240static s32 set_input_format(struct encode_wq_s *wq,
1241 struct encode_request_s *request)
1242{
1243 s32 ret = 0;
1244 u8 iformat = MAX_FRAME_FMT, oformat = MAX_FRAME_FMT, r2y_en = 0;
1245 u32 picsize_x, picsize_y, src_addr;
1246 u32 canvas_w = 0;
1247 u32 input = request->src;
1248 u32 input_y = 0;
1249 u32 input_u = 0;
1250 u32 input_v = 0;
1251 u8 ifmt_extra = 0;
1252
1253 if ((request->fmt == FMT_RGB565) || (request->fmt >= MAX_FRAME_FMT))
1254 return -1;
1255
1256 picsize_x = ((wq->pic.encoder_width + 15) >> 4) << 4;
1257 picsize_y = ((wq->pic.encoder_height + 15) >> 4) << 4;
1258 oformat = 0;
1259
1260 if ((request->type == LOCAL_BUFF)
1261 || (request->type == PHYSICAL_BUFF)
1262 || (request->type == DMA_BUFF)) {
1263 if ((request->type == LOCAL_BUFF) &&
1264 (request->flush_flag & AMVENC_FLUSH_FLAG_INPUT))
1265 dma_flush(wq->mem.dct_buff_start_addr,
1266 request->framesize);
1267 if (request->type == LOCAL_BUFF) {
1268 input = wq->mem.dct_buff_start_addr;
1269 src_addr =
1270 wq->mem.dct_buff_start_addr;
1271 } else if (request->type == DMA_BUFF) {
1272 if (request->plane_num == 3) {
1273 input_y = (unsigned long)request->dma_cfg[0].paddr;
1274 input_u = (unsigned long)request->dma_cfg[1].paddr;
1275 input_v = (unsigned long)request->dma_cfg[2].paddr;
1276 } else if (request->plane_num == 2) {
1277 input_y = (unsigned long)request->dma_cfg[0].paddr;
1278 input_u = (unsigned long)request->dma_cfg[1].paddr;
1279 input_v = input_u;
1280 } else if (request->plane_num == 1) {
1281 input_y = (unsigned long)request->dma_cfg[0].paddr;
1282 if (request->fmt == FMT_NV21
1283 || request->fmt == FMT_NV12) {
1284 input_u = input_y + picsize_x * picsize_y;
1285 input_v = input_u;
1286 }
1287 if (request->fmt == FMT_YUV420) {
1288 input_u = input_y + picsize_x * picsize_y;
1289 input_v = input_u + picsize_x * picsize_y / 4;
1290 }
1291 }
1292 src_addr = input_y;
1293 picsize_y = wq->pic.encoder_height;
1294 enc_pr(LOG_INFO, "dma addr[0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx]\n",
1295 (unsigned long)request->dma_cfg[0].vaddr,
1296 (unsigned long)request->dma_cfg[0].paddr,
1297 (unsigned long)request->dma_cfg[1].vaddr,
1298 (unsigned long)request->dma_cfg[1].paddr,
1299 (unsigned long)request->dma_cfg[2].vaddr,
1300 (unsigned long)request->dma_cfg[2].paddr);
1301 } else {
1302 src_addr = input;
1303 picsize_y = wq->pic.encoder_height;
1304 }
1305 if (request->scale_enable) {
1306#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1307 struct config_para_ex_s ge2d_config;
1308
1309 memset(&ge2d_config, 0,
1310 sizeof(struct config_para_ex_s));
1311 scale_frame(
1312 wq, request,
1313 &ge2d_config,
1314 src_addr,
1315 false);
1316 iformat = 2;
1317 r2y_en = 0;
1318 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1319 (ENC_CANVAS_OFFSET + 6);
1320 ret = 0;
1321 goto MFDIN;
1322#else
1323 enc_pr(LOG_ERROR,
1324 "Warning: need enable ge2d for scale frame!\n");
1325 return -1;
1326#endif
1327 }
1328 if ((request->fmt <= FMT_YUV444_PLANE) ||
1329 (request->fmt >= FMT_YUV422_12BIT))
1330 r2y_en = 0;
1331 else
1332 r2y_en = 1;
1333
1334 if (request->fmt >= FMT_YUV422_12BIT) {
1335 iformat = 7;
1336 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1337 if (request->fmt == FMT_YUV422_12BIT)
1338 canvas_w = picsize_x * 24 / 8;
1339 else if (request->fmt == FMT_YUV444_10BIT)
1340 canvas_w = picsize_x * 32 / 8;
1341 else
1342 canvas_w = (picsize_x * 20 + 7) / 8;
1343 canvas_w = ((canvas_w + 31) >> 5) << 5;
1344 canvas_config(ENC_CANVAS_OFFSET + 6,
1345 input,
1346 canvas_w, picsize_y,
1347 CANVAS_ADDR_NOWRAP,
1348 CANVAS_BLKMODE_LINEAR);
1349 input = ENC_CANVAS_OFFSET + 6;
1350 input = input & 0xff;
1351 } else if (request->fmt == FMT_YUV422_SINGLE)
1352 iformat = 10;
1353 else if ((request->fmt == FMT_YUV444_SINGLE)
1354 || (request->fmt == FMT_RGB888)) {
1355 iformat = 1;
1356 if (request->fmt == FMT_RGB888)
1357 r2y_en = 1;
1358 canvas_w = picsize_x * 3;
1359 canvas_w = ((canvas_w + 31) >> 5) << 5;
1360 canvas_config(ENC_CANVAS_OFFSET + 6,
1361 input,
1362 canvas_w, picsize_y,
1363 CANVAS_ADDR_NOWRAP,
1364 CANVAS_BLKMODE_LINEAR);
1365 input = ENC_CANVAS_OFFSET + 6;
1366 } else if ((request->fmt == FMT_NV21)
1367 || (request->fmt == FMT_NV12)) {
1368 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1369 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1370 if (request->type == DMA_BUFF) {
1371 canvas_config(ENC_CANVAS_OFFSET + 6,
1372 input_y,
1373 canvas_w, picsize_y,
1374 CANVAS_ADDR_NOWRAP,
1375 CANVAS_BLKMODE_LINEAR);
1376 canvas_config(ENC_CANVAS_OFFSET + 7,
1377 input_u,
1378 canvas_w, picsize_y / 2,
1379 CANVAS_ADDR_NOWRAP,
1380 CANVAS_BLKMODE_LINEAR);
1381 } else {
1382 canvas_config(ENC_CANVAS_OFFSET + 6,
1383 input,
1384 canvas_w, picsize_y,
1385 CANVAS_ADDR_NOWRAP,
1386 CANVAS_BLKMODE_LINEAR);
1387 canvas_config(ENC_CANVAS_OFFSET + 7,
1388 input + canvas_w * picsize_y,
1389 canvas_w, picsize_y / 2,
1390 CANVAS_ADDR_NOWRAP,
1391 CANVAS_BLKMODE_LINEAR);
1392 }
1393 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1394 (ENC_CANVAS_OFFSET + 6);
1395 } else if (request->fmt == FMT_YUV420) {
1396 iformat = 4;
1397 canvas_w = ((wq->pic.encoder_width + 63) >> 6) << 6;
1398 if (request->type == DMA_BUFF) {
1399 canvas_config(ENC_CANVAS_OFFSET + 6,
1400 input_y,
1401 canvas_w, picsize_y,
1402 CANVAS_ADDR_NOWRAP,
1403 CANVAS_BLKMODE_LINEAR);
1404 canvas_config(ENC_CANVAS_OFFSET + 7,
1405 input_u,
1406 canvas_w / 2, picsize_y / 2,
1407 CANVAS_ADDR_NOWRAP,
1408 CANVAS_BLKMODE_LINEAR);
1409 canvas_config(ENC_CANVAS_OFFSET + 8,
1410 input_v,
1411 canvas_w / 2, picsize_y / 2,
1412 CANVAS_ADDR_NOWRAP,
1413 CANVAS_BLKMODE_LINEAR);
1414 } else {
1415 canvas_config(ENC_CANVAS_OFFSET + 6,
1416 input,
1417 canvas_w, picsize_y,
1418 CANVAS_ADDR_NOWRAP,
1419 CANVAS_BLKMODE_LINEAR);
1420 canvas_config(ENC_CANVAS_OFFSET + 7,
1421 input + canvas_w * picsize_y,
1422 canvas_w / 2, picsize_y / 2,
1423 CANVAS_ADDR_NOWRAP,
1424 CANVAS_BLKMODE_LINEAR);
1425 canvas_config(ENC_CANVAS_OFFSET + 8,
1426 input + canvas_w * picsize_y * 5 / 4,
1427 canvas_w / 2, picsize_y / 2,
1428 CANVAS_ADDR_NOWRAP,
1429 CANVAS_BLKMODE_LINEAR);
1430
1431 }
1432 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1433 ((ENC_CANVAS_OFFSET + 7) << 8) |
1434 (ENC_CANVAS_OFFSET + 6);
1435 } else if ((request->fmt == FMT_YUV444_PLANE)
1436 || (request->fmt == FMT_RGB888_PLANE)) {
1437 if (request->fmt == FMT_RGB888_PLANE)
1438 r2y_en = 1;
1439 iformat = 5;
1440 canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5;
1441 canvas_config(ENC_CANVAS_OFFSET + 6,
1442 input,
1443 canvas_w, picsize_y,
1444 CANVAS_ADDR_NOWRAP,
1445 CANVAS_BLKMODE_LINEAR);
1446 canvas_config(ENC_CANVAS_OFFSET + 7,
1447 input + canvas_w * picsize_y,
1448 canvas_w, picsize_y,
1449 CANVAS_ADDR_NOWRAP,
1450 CANVAS_BLKMODE_LINEAR);
1451 canvas_config(ENC_CANVAS_OFFSET + 8,
1452 input + canvas_w * picsize_y * 2,
1453 canvas_w, picsize_y,
1454 CANVAS_ADDR_NOWRAP,
1455 CANVAS_BLKMODE_LINEAR);
1456 input = ((ENC_CANVAS_OFFSET + 8) << 16) |
1457 ((ENC_CANVAS_OFFSET + 7) << 8) |
1458 (ENC_CANVAS_OFFSET + 6);
1459 } else if (request->fmt == FMT_RGBA8888) {
1460 r2y_en = 1;
1461 iformat = 12;
1462 }
1463 ret = 0;
1464 } else if (request->type == CANVAS_BUFF) {
1465 r2y_en = 0;
1466 if (request->scale_enable) {
1467#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1468 struct config_para_ex_s ge2d_config;
1469 memset(&ge2d_config, 0,
1470 sizeof(struct config_para_ex_s));
1471 scale_frame(
1472 wq, request,
1473 &ge2d_config,
1474 input, true);
1475 iformat = 2;
1476 r2y_en = 0;
1477 input = ((ENC_CANVAS_OFFSET + 7) << 8) |
1478 (ENC_CANVAS_OFFSET + 6);
1479 ret = 0;
1480 goto MFDIN;
1481#else
1482 enc_pr(LOG_ERROR,
1483 "Warning: need enable ge2d for scale frame!\n");
1484 return -1;
1485#endif
1486 }
1487 if (request->fmt == FMT_YUV422_SINGLE) {
1488 iformat = 0;
1489 input = input & 0xff;
1490 } else if (request->fmt == FMT_YUV444_SINGLE) {
1491 iformat = 1;
1492 input = input & 0xff;
1493 } else if ((request->fmt == FMT_NV21)
1494 || (request->fmt == FMT_NV12)) {
1495 iformat = (request->fmt == FMT_NV21) ? 2 : 3;
1496 input = input & 0xffff;
1497 } else if (request->fmt == FMT_YUV420) {
1498 iformat = 4;
1499 input = input & 0xffffff;
1500 } else if ((request->fmt == FMT_YUV444_PLANE)
1501 || (request->fmt == FMT_RGB888_PLANE)) {
1502 if (request->fmt == FMT_RGB888_PLANE)
1503 r2y_en = 1;
1504 iformat = 5;
1505 input = input & 0xffffff;
1506 } else if ((request->fmt == FMT_YUV422_12BIT)
1507 || (request->fmt == FMT_YUV444_10BIT)
1508 || (request->fmt == FMT_YUV422_10BIT)) {
1509 iformat = 7;
1510 ifmt_extra = request->fmt - FMT_YUV422_12BIT;
1511 input = input & 0xff;
1512 } else
1513 ret = -1;
1514 }
1515#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
1516MFDIN:
1517#endif
1518 if (ret == 0)
1519 mfdin_basic(input, iformat, oformat,
1520 picsize_x, picsize_y, r2y_en,
1521 request->nr_mode, ifmt_extra);
1522 return ret;
1523}
1524
1525#ifdef H264_ENC_CBR
1526static void ConvertTable2Risc(void *table, u32 len)
1527{
1528 u32 i, j;
1529 u16 temp;
1530 u16 *tbl = (u16 *)table;
1531
1532 if ((len < 8) || (len % 8) || (!table)) {
1533 enc_pr(LOG_ERROR, "ConvertTable2Risc tbl %p, len %d error\n",
1534 table, len);
1535 return;
1536 }
1537 for (i = 0; i < len / 8; i++) {
1538 j = i << 2;
1539 temp = tbl[j];
1540 tbl[j] = tbl[j + 3];
1541 tbl[j + 3] = temp;
1542
1543 temp = tbl[j + 1];
1544 tbl[j + 1] = tbl[j + 2];
1545 tbl[j + 2] = temp;
1546 }
1547
1548}
1549#endif
1550
1551static void avc_prot_init(struct encode_wq_s *wq,
1552 struct encode_request_s *request, u32 quant, bool IDR)
1553{
1554 u32 data32;
1555 u32 pic_width, pic_height;
1556 u32 pic_mb_nr;
1557 u32 pic_mbx, pic_mby;
1558 u32 i_pic_qp, p_pic_qp;
1559 u32 i_pic_qp_c, p_pic_qp_c;
1560 u32 pic_width_in_mb;
1561 u32 slice_qp;
1562
1563 pic_width = wq->pic.encoder_width;
1564 pic_height = wq->pic.encoder_height;
1565 pic_mb_nr = 0;
1566 pic_mbx = 0;
1567 pic_mby = 0;
1568 i_pic_qp = quant;
1569 p_pic_qp = quant;
1570
1571 pic_width_in_mb = (pic_width + 15) / 16;
1572 WRITE_HREG(HCODEC_HDEC_MC_OMEM_AUTO,
1573 (1 << 31) | /* use_omem_mb_xy */
1574 ((pic_width_in_mb - 1) << 16)); /* omem_max_mb_x */
1575
1576 WRITE_HREG(HCODEC_VLC_ADV_CONFIG,
1577 /* early_mix_mc_hcmd -- will enable in P Picture */
1578 (0 << 10) |
1579 (1 << 9) | /* update_top_left_mix */
1580 (1 << 8) | /* p_top_left_mix */
1581 /* mv_cal_mixed_type -- will enable in P Picture */
1582 (0 << 7) |
1583 /* mc_hcmd_mixed_type -- will enable in P Picture */
1584 (0 << 6) |
1585 (1 << 5) | /* use_separate_int_control */
1586 (1 << 4) | /* hcmd_intra_use_q_info */
1587 (1 << 3) | /* hcmd_left_use_prev_info */
1588 (1 << 2) | /* hcmd_use_q_info */
1589 (1 << 1) | /* use_q_delta_quant */
1590 /* detect_I16_from_I4 use qdct detected mb_type */
1591 (0 << 0));
1592
1593 WRITE_HREG(HCODEC_QDCT_ADV_CONFIG,
1594 (1 << 29) | /* mb_info_latch_no_I16_pred_mode */
1595 (1 << 28) | /* ie_dma_mbxy_use_i_pred */
1596 (1 << 27) | /* ie_dma_read_write_use_ip_idx */
1597 (1 << 26) | /* ie_start_use_top_dma_count */
1598 (1 << 25) | /* i_pred_top_dma_rd_mbbot */
1599 (1 << 24) | /* i_pred_top_dma_wr_disable */
1600 /* i_pred_mix -- will enable in P Picture */
1601 (0 << 23) |
1602 (1 << 22) | /* me_ab_rd_when_intra_in_p */
1603 (1 << 21) | /* force_mb_skip_run_when_intra */
1604 /* mc_out_mixed_type -- will enable in P Picture */
1605 (0 << 20) |
1606 (1 << 19) | /* ie_start_when_quant_not_full */
1607 (1 << 18) | /* mb_info_state_mix */
1608 /* mb_type_use_mix_result -- will enable in P Picture */
1609 (0 << 17) |
1610 /* me_cb_ie_read_enable -- will enable in P Picture */
1611 (0 << 16) |
1612 /* ie_cur_data_from_me -- will enable in P Picture */
1613 (0 << 15) |
1614 (1 << 14) | /* rem_per_use_table */
1615 (0 << 13) | /* q_latch_int_enable */
1616 (1 << 12) | /* q_use_table */
1617 (0 << 11) | /* q_start_wait */
1618 (1 << 10) | /* LUMA_16_LEFT_use_cur */
1619 (1 << 9) | /* DC_16_LEFT_SUM_use_cur */
1620 (1 << 8) | /* c_ref_ie_sel_cur */
1621 (0 << 7) | /* c_ipred_perfect_mode */
1622 (1 << 6) | /* ref_ie_ul_sel */
1623 (1 << 5) | /* mb_type_use_ie_result */
1624 (1 << 4) | /* detect_I16_from_I4 */
1625 (1 << 3) | /* ie_not_wait_ref_busy */
1626 (1 << 2) | /* ie_I16_enable */
1627 (3 << 0)); /* ie_done_sel // fastest when waiting */
1628
1629 if (request != NULL) {
1630 WRITE_HREG(HCODEC_IE_WEIGHT,
1631 (request->i16_weight << 16) |
1632 (request->i4_weight << 0));
1633 WRITE_HREG(HCODEC_ME_WEIGHT,
1634 (request->me_weight << 0));
1635 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1636 /* ie_sad_offset_I16 */
1637 (request->i16_weight << 16) |
1638 /* ie_sad_offset_I4 */
1639 (request->i4_weight << 0));
1640 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1641 /* ie_sad_shift_I16 */
1642 (IE_SAD_SHIFT_I16 << 24) |
1643 /* ie_sad_shift_I4 */
1644 (IE_SAD_SHIFT_I4 << 20) |
1645 /* me_sad_shift_INTER */
1646 (ME_SAD_SHIFT_INTER << 16) |
1647 /* me_sad_offset_INTER */
1648 (request->me_weight << 0));
1649 wq->me_weight = request->me_weight;
1650 wq->i4_weight = request->i4_weight;
1651 wq->i16_weight = request->i16_weight;
1652 } else {
1653 WRITE_HREG(HCODEC_IE_WEIGHT,
1654 (I16MB_WEIGHT_OFFSET << 16) |
1655 (I4MB_WEIGHT_OFFSET << 0));
1656 WRITE_HREG(HCODEC_ME_WEIGHT,
1657 (ME_WEIGHT_OFFSET << 0));
1658 WRITE_HREG(HCODEC_SAD_CONTROL_0,
1659 /* ie_sad_offset_I16 */
1660 (I16MB_WEIGHT_OFFSET << 16) |
1661 /* ie_sad_offset_I4 */
1662 (I4MB_WEIGHT_OFFSET << 0));
1663 WRITE_HREG(HCODEC_SAD_CONTROL_1,
1664 /* ie_sad_shift_I16 */
1665 (IE_SAD_SHIFT_I16 << 24) |
1666 /* ie_sad_shift_I4 */
1667 (IE_SAD_SHIFT_I4 << 20) |
1668 /* me_sad_shift_INTER */
1669 (ME_SAD_SHIFT_INTER << 16) |
1670 /* me_sad_offset_INTER */
1671 (ME_WEIGHT_OFFSET << 0));
1672 }
1673
1674 WRITE_HREG(HCODEC_ADV_MV_CTL0,
1675 (ADV_MV_LARGE_16x8 << 31) |
1676 (ADV_MV_LARGE_8x16 << 30) |
1677 (ADV_MV_8x8_WEIGHT << 16) | /* adv_mv_8x8_weight */
1678 /* adv_mv_4x4x4_weight should be set bigger */
1679 (ADV_MV_4x4x4_WEIGHT << 0));
1680 WRITE_HREG(HCODEC_ADV_MV_CTL1,
1681 /* adv_mv_16x16_weight */
1682 (ADV_MV_16x16_WEIGHT << 16) |
1683 (ADV_MV_LARGE_16x16 << 15) |
1684 (ADV_MV_16_8_WEIGHT << 0)); /* adv_mv_16_8_weight */
1685
1686 hcodec_prog_qtbl(wq);
1687 if (IDR) {
1688 i_pic_qp =
1689 wq->quant_tbl_i4[0] & 0xff;
1690 i_pic_qp +=
1691 wq->quant_tbl_i16[0] & 0xff;
1692 i_pic_qp /= 2;
1693 p_pic_qp = i_pic_qp;
1694 } else {
1695 i_pic_qp =
1696 wq->quant_tbl_i4[0] & 0xff;
1697 i_pic_qp +=
1698 wq->quant_tbl_i16[0] & 0xff;
1699 p_pic_qp = wq->quant_tbl_me[0] & 0xff;
1700 slice_qp = (i_pic_qp + p_pic_qp) / 3;
1701 i_pic_qp = slice_qp;
1702 p_pic_qp = i_pic_qp;
1703 }
1704#ifdef H264_ENC_CBR
1705 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
1706 data32 = READ_HREG(HCODEC_SAD_CONTROL_1);
1707 data32 = data32 & 0xffff; /* remove sad shift */
1708 WRITE_HREG(HCODEC_SAD_CONTROL_1, data32);
1709 WRITE_HREG(H264_ENC_CBR_TABLE_ADDR,
1710 wq->mem.cbr_info_ddr_start_addr);
1711 WRITE_HREG(H264_ENC_CBR_MB_SIZE_ADDR,
1712 wq->mem.cbr_info_ddr_start_addr
1713 + CBR_TABLE_SIZE);
1714 WRITE_HREG(H264_ENC_CBR_CTL,
1715 (wq->cbr_info.start_tbl_id << 28) |
1716 (wq->cbr_info.short_shift << 24) |
1717 (wq->cbr_info.long_mb_num << 16) |
1718 (wq->cbr_info.long_th << 0));
1719 WRITE_HREG(H264_ENC_CBR_REGION_SIZE,
1720 (wq->cbr_info.block_w << 16) |
1721 (wq->cbr_info.block_h << 0));
1722 }
1723#endif
1724 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_0,
1725 (0 << 19) | /* vlc_delta_quant_1 */
1726 (i_pic_qp << 13) | /* vlc_quant_1 */
1727 (0 << 6) | /* vlc_delta_quant_0 */
1728 (i_pic_qp << 0)); /* vlc_quant_0 */
1729 WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_1,
1730 (14 << 6) | /* vlc_max_delta_q_neg */
1731 (13 << 0)); /* vlc_max_delta_q_pos */
1732 WRITE_HREG(HCODEC_VLC_PIC_SIZE,
1733 pic_width | (pic_height << 16));
1734 WRITE_HREG(HCODEC_VLC_PIC_POSITION,
1735 (pic_mb_nr << 16) |
1736 (pic_mby << 8) |
1737 (pic_mbx << 0));
1738
1739 /* synopsys parallel_case full_case */
1740 switch (i_pic_qp) {
1741 case 0:
1742 i_pic_qp_c = 0;
1743 break;
1744 case 1:
1745 i_pic_qp_c = 1;
1746 break;
1747 case 2:
1748 i_pic_qp_c = 2;
1749 break;
1750 case 3:
1751 i_pic_qp_c = 3;
1752 break;
1753 case 4:
1754 i_pic_qp_c = 4;
1755 break;
1756 case 5:
1757 i_pic_qp_c = 5;
1758 break;
1759 case 6:
1760 i_pic_qp_c = 6;
1761 break;
1762 case 7:
1763 i_pic_qp_c = 7;
1764 break;
1765 case 8:
1766 i_pic_qp_c = 8;
1767 break;
1768 case 9:
1769 i_pic_qp_c = 9;
1770 break;
1771 case 10:
1772 i_pic_qp_c = 10;
1773 break;
1774 case 11:
1775 i_pic_qp_c = 11;
1776 break;
1777 case 12:
1778 i_pic_qp_c = 12;
1779 break;
1780 case 13:
1781 i_pic_qp_c = 13;
1782 break;
1783 case 14:
1784 i_pic_qp_c = 14;
1785 break;
1786 case 15:
1787 i_pic_qp_c = 15;
1788 break;
1789 case 16:
1790 i_pic_qp_c = 16;
1791 break;
1792 case 17:
1793 i_pic_qp_c = 17;
1794 break;
1795 case 18:
1796 i_pic_qp_c = 18;
1797 break;
1798 case 19:
1799 i_pic_qp_c = 19;
1800 break;
1801 case 20:
1802 i_pic_qp_c = 20;
1803 break;
1804 case 21:
1805 i_pic_qp_c = 21;
1806 break;
1807 case 22:
1808 i_pic_qp_c = 22;
1809 break;
1810 case 23:
1811 i_pic_qp_c = 23;
1812 break;
1813 case 24:
1814 i_pic_qp_c = 24;
1815 break;
1816 case 25:
1817 i_pic_qp_c = 25;
1818 break;
1819 case 26:
1820 i_pic_qp_c = 26;
1821 break;
1822 case 27:
1823 i_pic_qp_c = 27;
1824 break;
1825 case 28:
1826 i_pic_qp_c = 28;
1827 break;
1828 case 29:
1829 i_pic_qp_c = 29;
1830 break;
1831 case 30:
1832 i_pic_qp_c = 29;
1833 break;
1834 case 31:
1835 i_pic_qp_c = 30;
1836 break;
1837 case 32:
1838 i_pic_qp_c = 31;
1839 break;
1840 case 33:
1841 i_pic_qp_c = 32;
1842 break;
1843 case 34:
1844 i_pic_qp_c = 32;
1845 break;
1846 case 35:
1847 i_pic_qp_c = 33;
1848 break;
1849 case 36:
1850 i_pic_qp_c = 34;
1851 break;
1852 case 37:
1853 i_pic_qp_c = 34;
1854 break;
1855 case 38:
1856 i_pic_qp_c = 35;
1857 break;
1858 case 39:
1859 i_pic_qp_c = 35;
1860 break;
1861 case 40:
1862 i_pic_qp_c = 36;
1863 break;
1864 case 41:
1865 i_pic_qp_c = 36;
1866 break;
1867 case 42:
1868 i_pic_qp_c = 37;
1869 break;
1870 case 43:
1871 i_pic_qp_c = 37;
1872 break;
1873 case 44:
1874 i_pic_qp_c = 37;
1875 break;
1876 case 45:
1877 i_pic_qp_c = 38;
1878 break;
1879 case 46:
1880 i_pic_qp_c = 38;
1881 break;
1882 case 47:
1883 i_pic_qp_c = 38;
1884 break;
1885 case 48:
1886 i_pic_qp_c = 39;
1887 break;
1888 case 49:
1889 i_pic_qp_c = 39;
1890 break;
1891 case 50:
1892 i_pic_qp_c = 39;
1893 break;
1894 default:
1895 i_pic_qp_c = 39;
1896 break;
1897 }
1898
1899 /* synopsys parallel_case full_case */
1900 switch (p_pic_qp) {
1901 case 0:
1902 p_pic_qp_c = 0;
1903 break;
1904 case 1:
1905 p_pic_qp_c = 1;
1906 break;
1907 case 2:
1908 p_pic_qp_c = 2;
1909 break;
1910 case 3:
1911 p_pic_qp_c = 3;
1912 break;
1913 case 4:
1914 p_pic_qp_c = 4;
1915 break;
1916 case 5:
1917 p_pic_qp_c = 5;
1918 break;
1919 case 6:
1920 p_pic_qp_c = 6;
1921 break;
1922 case 7:
1923 p_pic_qp_c = 7;
1924 break;
1925 case 8:
1926 p_pic_qp_c = 8;
1927 break;
1928 case 9:
1929 p_pic_qp_c = 9;
1930 break;
1931 case 10:
1932 p_pic_qp_c = 10;
1933 break;
1934 case 11:
1935 p_pic_qp_c = 11;
1936 break;
1937 case 12:
1938 p_pic_qp_c = 12;
1939 break;
1940 case 13:
1941 p_pic_qp_c = 13;
1942 break;
1943 case 14:
1944 p_pic_qp_c = 14;
1945 break;
1946 case 15:
1947 p_pic_qp_c = 15;
1948 break;
1949 case 16:
1950 p_pic_qp_c = 16;
1951 break;
1952 case 17:
1953 p_pic_qp_c = 17;
1954 break;
1955 case 18:
1956 p_pic_qp_c = 18;
1957 break;
1958 case 19:
1959 p_pic_qp_c = 19;
1960 break;
1961 case 20:
1962 p_pic_qp_c = 20;
1963 break;
1964 case 21:
1965 p_pic_qp_c = 21;
1966 break;
1967 case 22:
1968 p_pic_qp_c = 22;
1969 break;
1970 case 23:
1971 p_pic_qp_c = 23;
1972 break;
1973 case 24:
1974 p_pic_qp_c = 24;
1975 break;
1976 case 25:
1977 p_pic_qp_c = 25;
1978 break;
1979 case 26:
1980 p_pic_qp_c = 26;
1981 break;
1982 case 27:
1983 p_pic_qp_c = 27;
1984 break;
1985 case 28:
1986 p_pic_qp_c = 28;
1987 break;
1988 case 29:
1989 p_pic_qp_c = 29;
1990 break;
1991 case 30:
1992 p_pic_qp_c = 29;
1993 break;
1994 case 31:
1995 p_pic_qp_c = 30;
1996 break;
1997 case 32:
1998 p_pic_qp_c = 31;
1999 break;
2000 case 33:
2001 p_pic_qp_c = 32;
2002 break;
2003 case 34:
2004 p_pic_qp_c = 32;
2005 break;
2006 case 35:
2007 p_pic_qp_c = 33;
2008 break;
2009 case 36:
2010 p_pic_qp_c = 34;
2011 break;
2012 case 37:
2013 p_pic_qp_c = 34;
2014 break;
2015 case 38:
2016 p_pic_qp_c = 35;
2017 break;
2018 case 39:
2019 p_pic_qp_c = 35;
2020 break;
2021 case 40:
2022 p_pic_qp_c = 36;
2023 break;
2024 case 41:
2025 p_pic_qp_c = 36;
2026 break;
2027 case 42:
2028 p_pic_qp_c = 37;
2029 break;
2030 case 43:
2031 p_pic_qp_c = 37;
2032 break;
2033 case 44:
2034 p_pic_qp_c = 37;
2035 break;
2036 case 45:
2037 p_pic_qp_c = 38;
2038 break;
2039 case 46:
2040 p_pic_qp_c = 38;
2041 break;
2042 case 47:
2043 p_pic_qp_c = 38;
2044 break;
2045 case 48:
2046 p_pic_qp_c = 39;
2047 break;
2048 case 49:
2049 p_pic_qp_c = 39;
2050 break;
2051 case 50:
2052 p_pic_qp_c = 39;
2053 break;
2054 default:
2055 p_pic_qp_c = 39;
2056 break;
2057 }
2058 WRITE_HREG(HCODEC_QDCT_Q_QUANT_I,
2059 (i_pic_qp_c << 22) |
2060 (i_pic_qp << 16) |
2061 ((i_pic_qp_c % 6) << 12) |
2062 ((i_pic_qp_c / 6) << 8) |
2063 ((i_pic_qp % 6) << 4) |
2064 ((i_pic_qp / 6) << 0));
2065
2066 WRITE_HREG(HCODEC_QDCT_Q_QUANT_P,
2067 (p_pic_qp_c << 22) |
2068 (p_pic_qp << 16) |
2069 ((p_pic_qp_c % 6) << 12) |
2070 ((p_pic_qp_c / 6) << 8) |
2071 ((p_pic_qp % 6) << 4) |
2072 ((p_pic_qp / 6) << 0));
2073
2074#ifdef ENABLE_IGNORE_FUNCTION
2075 WRITE_HREG(HCODEC_IGNORE_CONFIG,
2076 (1 << 31) | /* ignore_lac_coeff_en */
2077 (1 << 26) | /* ignore_lac_coeff_else (<1) */
2078 (1 << 21) | /* ignore_lac_coeff_2 (<1) */
2079 (2 << 16) | /* ignore_lac_coeff_1 (<2) */
2080 (1 << 15) | /* ignore_cac_coeff_en */
2081 (1 << 10) | /* ignore_cac_coeff_else (<1) */
2082 (1 << 5) | /* ignore_cac_coeff_2 (<1) */
2083 (3 << 0)); /* ignore_cac_coeff_1 (<2) */
2084
2085 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB)
2086 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2087 (1 << 31) | /* ignore_t_lac_coeff_en */
2088 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2089 (2 << 21) | /* ignore_t_lac_coeff_2 (<2) */
2090 (6 << 16) | /* ignore_t_lac_coeff_1 (<6) */
2091 (1<<15) | /* ignore_cdc_coeff_en */
2092 (0<<14) | /* ignore_t_lac_coeff_else_le_3 */
2093 (1<<13) | /* ignore_t_lac_coeff_else_le_4 */
2094 (1<<12) | /* ignore_cdc_only_when_empty_cac_inter */
2095 (1<<11) | /* ignore_cdc_only_when_one_empty_inter */
2096 /* ignore_cdc_range_max_inter 0-0, 1-1, 2-2, 3-3 */
2097 (2<<9) |
2098 /* ignore_cdc_abs_max_inter 0-1, 1-2, 2-3, 3-4 */
2099 (0<<7) |
2100 /* ignore_cdc_only_when_empty_cac_intra */
2101 (1<<5) |
2102 /* ignore_cdc_only_when_one_empty_intra */
2103 (1<<4) |
2104 /* ignore_cdc_range_max_intra 0-0, 1-1, 2-2, 3-3 */
2105 (1<<2) |
2106 /* ignore_cdc_abs_max_intra 0-1, 1-2, 2-3, 3-4 */
2107 (0<<0));
2108 else
2109 WRITE_HREG(HCODEC_IGNORE_CONFIG_2,
2110 (1 << 31) | /* ignore_t_lac_coeff_en */
2111 (1 << 26) | /* ignore_t_lac_coeff_else (<1) */
2112 (1 << 21) | /* ignore_t_lac_coeff_2 (<1) */
2113 (5 << 16) | /* ignore_t_lac_coeff_1 (<5) */
2114 (0 << 0));
2115#else
2116 WRITE_HREG(HCODEC_IGNORE_CONFIG, 0);
2117 WRITE_HREG(HCODEC_IGNORE_CONFIG_2, 0);
2118#endif
2119
2120 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2121 (1 << 9) | /* mb_info_soft_reset */
2122 (1 << 0)); /* mb read buffer soft reset */
2123
2124 WRITE_HREG(HCODEC_QDCT_MB_CONTROL,
2125 (1 << 28) | /* ignore_t_p8x8 */
2126 (0 << 27) | /* zero_mc_out_null_non_skipped_mb */
2127 (0 << 26) | /* no_mc_out_null_non_skipped_mb */
2128 (0 << 25) | /* mc_out_even_skipped_mb */
2129 (0 << 24) | /* mc_out_wait_cbp_ready */
2130 (0 << 23) | /* mc_out_wait_mb_type_ready */
2131 (1 << 29) | /* ie_start_int_enable */
2132 (1 << 19) | /* i_pred_enable */
2133 (1 << 20) | /* ie_sub_enable */
2134 (1 << 18) | /* iq_enable */
2135 (1 << 17) | /* idct_enable */
2136 (1 << 14) | /* mb_pause_enable */
2137 (1 << 13) | /* q_enable */
2138 (1 << 12) | /* dct_enable */
2139 (1 << 10) | /* mb_info_en */
2140 (0 << 3) | /* endian */
2141 (0 << 1) | /* mb_read_en */
2142 (0 << 0)); /* soft reset */
2143
2144 WRITE_HREG(HCODEC_SAD_CONTROL,
2145 (0 << 3) | /* ie_result_buff_enable */
2146 (1 << 2) | /* ie_result_buff_soft_reset */
2147 (0 << 1) | /* sad_enable */
2148 (1 << 0)); /* sad soft reset */
2149 WRITE_HREG(HCODEC_IE_RESULT_BUFFER, 0);
2150
2151 WRITE_HREG(HCODEC_SAD_CONTROL,
2152 (1 << 3) | /* ie_result_buff_enable */
2153 (0 << 2) | /* ie_result_buff_soft_reset */
2154 (1 << 1) | /* sad_enable */
2155 (0 << 0)); /* sad soft reset */
2156
2157 WRITE_HREG(HCODEC_IE_CONTROL,
2158 (1 << 30) | /* active_ul_block */
2159 (0 << 1) | /* ie_enable */
2160 (1 << 0)); /* ie soft reset */
2161
2162 WRITE_HREG(HCODEC_IE_CONTROL,
2163 (1 << 30) | /* active_ul_block */
2164 (0 << 1) | /* ie_enable */
2165 (0 << 0)); /* ie soft reset */
2166
2167 WRITE_HREG(HCODEC_ME_SKIP_LINE,
2168 (8 << 24) | /* step_3_skip_line */
2169 (8 << 18) | /* step_2_skip_line */
2170 (2 << 12) | /* step_1_skip_line */
2171 (0 << 6) | /* step_0_skip_line */
2172 (0 << 0));
2173
2174 WRITE_HREG(HCODEC_ME_MV_MERGE_CTL, me_mv_merge_ctl);
2175 WRITE_HREG(HCODEC_ME_STEP0_CLOSE_MV, me_step0_close_mv);
2176 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_01, me_sad_enough_01);
2177 WRITE_HREG(HCODEC_ME_SAD_ENOUGH_23, me_sad_enough_23);
2178 WRITE_HREG(HCODEC_ME_F_SKIP_SAD, me_f_skip_sad);
2179 WRITE_HREG(HCODEC_ME_F_SKIP_WEIGHT, me_f_skip_weight);
2180 WRITE_HREG(HCODEC_ME_MV_WEIGHT_01, me_mv_weight_01);
2181 WRITE_HREG(HCODEC_ME_MV_WEIGHT_23, me_mv_weight_23);
2182 WRITE_HREG(HCODEC_ME_SAD_RANGE_INC, me_sad_range_inc);
2183
2184 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) {
2185 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, 0);
2186 WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL,
2187 (v5_use_small_diff_cnt << 7) |
2188 (v5_simple_mb_inter_all_en << 6) |
2189 (v5_simple_mb_inter_8x8_en << 5) |
2190 (v5_simple_mb_inter_16_8_en << 4) |
2191 (v5_simple_mb_inter_16x16_en << 3) |
2192 (v5_simple_mb_intra_en << 2) |
2193 (v5_simple_mb_C_en << 1) |
2194 (v5_simple_mb_Y_en << 0));
2195 WRITE_HREG(HCODEC_V5_MB_DIFF_SUM, 0);
2196 WRITE_HREG(HCODEC_V5_SMALL_DIFF_CNT,
2197 (v5_small_diff_C<<16) |
2198 (v5_small_diff_Y<<0));
2199 if (qp_mode == 1) {
2200 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2201 0);
2202 } else {
2203 WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT,
2204 v5_simple_dq_setting);
2205 }
2206 WRITE_HREG(HCODEC_V5_SIMPLE_MB_ME_WEIGHT,
2207 v5_simple_me_weight_setting);
2208 /* txlx can remove it */
2209 WRITE_HREG(HCODEC_QDCT_CONFIG, 1 << 0);
2210 }
2211
2212 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
2213 WRITE_HREG(HCODEC_V4_FORCE_SKIP_CFG,
2214 (i_pic_qp << 26) | /* v4_force_q_r_intra */
2215 (i_pic_qp << 20) | /* v4_force_q_r_inter */
2216 (0 << 19) | /* v4_force_q_y_enable */
2217 (5 << 16) | /* v4_force_qr_y */
2218 (6 << 12) | /* v4_force_qp_y */
2219 (0 << 0)); /* v4_force_skip_sad */
2220
2221 /* V3 Force skip */
2222 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2223 (1 << 31) | /* v3_skip_enable */
2224 (0 << 30) | /* v3_step_1_weight_enable */
2225 (1 << 28) | /* v3_mv_sad_weight_enable */
2226 (1 << 27) | /* v3_ipred_type_enable */
2227 (V3_FORCE_SKIP_SAD_1 << 12) |
2228 (V3_FORCE_SKIP_SAD_0 << 0));
2229 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2230 (V3_SKIP_WEIGHT_1 << 16) |
2231 (V3_SKIP_WEIGHT_0 << 0));
2232 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2233 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2234 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2235 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2236 (V3_FORCE_SKIP_SAD_2 << 16) |
2237 (V3_SKIP_WEIGHT_2 << 0));
2238 if (request != NULL) {
2239 unsigned int off1, off2;
2240
2241 off1 = V3_IE_F_ZERO_SAD_I4 - I4MB_WEIGHT_OFFSET;
2242 off2 = V3_IE_F_ZERO_SAD_I16
2243 - I16MB_WEIGHT_OFFSET;
2244 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2245 ((request->i16_weight + off2) << 16) |
2246 ((request->i4_weight + off1) << 0));
2247 off1 = V3_ME_F_ZERO_SAD - ME_WEIGHT_OFFSET;
2248 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2249 (0 << 25) |
2250 /* v3_no_ver_when_top_zero_en */
2251 (0 << 24) |
2252 /* v3_no_hor_when_left_zero_en */
2253 (3 << 16) | /* type_hor break */
2254 ((request->me_weight + off1) << 0));
2255 } else {
2256 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2257 (V3_IE_F_ZERO_SAD_I16 << 16) |
2258 (V3_IE_F_ZERO_SAD_I4 << 0));
2259 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2260 (0 << 25) |
2261 /* v3_no_ver_when_top_zero_en */
2262 (0 << 24) |
2263 /* v3_no_hor_when_left_zero_en */
2264 (3 << 16) | /* type_hor break */
2265 (V3_ME_F_ZERO_SAD << 0));
2266 }
2267 } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2268 /* V3 Force skip */
2269 WRITE_HREG(HCODEC_V3_SKIP_CONTROL,
2270 (1 << 31) | /* v3_skip_enable */
2271 (0 << 30) | /* v3_step_1_weight_enable */
2272 (1 << 28) | /* v3_mv_sad_weight_enable */
2273 (1 << 27) | /* v3_ipred_type_enable */
2274 (0 << 12) | /* V3_FORCE_SKIP_SAD_1 */
2275 (0 << 0)); /* V3_FORCE_SKIP_SAD_0 */
2276 WRITE_HREG(HCODEC_V3_SKIP_WEIGHT,
2277 (V3_SKIP_WEIGHT_1 << 16) |
2278 (V3_SKIP_WEIGHT_0 << 0));
2279 WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD,
2280 (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) |
2281 (V3_LEVEL_1_SKIP_MAX_SAD << 0));
2282 WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT,
2283 (0 << 16) | /* V3_FORCE_SKIP_SAD_2 */
2284 (V3_SKIP_WEIGHT_2 << 0));
2285 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0,
2286 (0 << 16) | /* V3_IE_F_ZERO_SAD_I16 */
2287 (0 << 0)); /* V3_IE_F_ZERO_SAD_I4 */
2288 WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1,
2289 (0 << 25) | /* v3_no_ver_when_top_zero_en */
2290 (0 << 24) | /* v3_no_hor_when_left_zero_en */
2291 (3 << 16) | /* type_hor break */
2292 (0 << 0)); /* V3_ME_F_ZERO_SAD */
2293 }
2294 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
2295 int i;
2296 /* MV SAD Table */
2297 for (i = 0; i < 64; i++)
2298 WRITE_HREG(HCODEC_V3_MV_SAD_TABLE,
2299 v3_mv_sad[i]);
2300
2301 /* IE PRED SAD Table*/
2302 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_0,
2303 (C_ipred_weight_H << 24) |
2304 (C_ipred_weight_V << 16) |
2305 (I4_ipred_weight_else << 8) |
2306 (I4_ipred_weight_most << 0));
2307 WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_1,
2308 (I16_ipred_weight_DC << 24) |
2309 (I16_ipred_weight_H << 16) |
2310 (I16_ipred_weight_V << 8) |
2311 (C_ipred_weight_DC << 0));
2312 WRITE_HREG(HCODEC_V3_LEFT_SMALL_MAX_SAD,
2313 (v3_left_small_max_me_sad << 16) |
2314 (v3_left_small_max_ie_sad << 0));
2315 }
2316 WRITE_HREG(HCODEC_IE_DATA_FEED_BUFF_INFO, 0);
2317 WRITE_HREG(HCODEC_CURR_CANVAS_CTRL, 0);
2318 data32 = READ_HREG(HCODEC_VLC_CONFIG);
2319 data32 = data32 | (1 << 0); /* set pop_coeff_even_all_zero */
2320 WRITE_HREG(HCODEC_VLC_CONFIG, data32);
2321
2322 WRITE_HREG(INFO_DUMP_START_ADDR,
2323 wq->mem.dump_info_ddr_start_addr);
2324
2325 /* clear mailbox interrupt */
2326 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2327
2328 /* enable mailbox interrupt */
2329 WRITE_HREG(HCODEC_IRQ_MBOX_MASK, 1);
2330}
2331
2332void amvenc_reset(void)
2333{
2334 READ_VREG(DOS_SW_RESET1);
2335 READ_VREG(DOS_SW_RESET1);
2336 READ_VREG(DOS_SW_RESET1);
2337 WRITE_VREG(DOS_SW_RESET1,
2338 (1 << 2) | (1 << 6) |
2339 (1 << 7) | (1 << 8) |
2340 (1 << 14) | (1 << 16) |
2341 (1 << 17));
2342 WRITE_VREG(DOS_SW_RESET1, 0);
2343 READ_VREG(DOS_SW_RESET1);
2344 READ_VREG(DOS_SW_RESET1);
2345 READ_VREG(DOS_SW_RESET1);
2346}
2347
2348void amvenc_start(void)
2349{
2350 READ_VREG(DOS_SW_RESET1);
2351 READ_VREG(DOS_SW_RESET1);
2352 READ_VREG(DOS_SW_RESET1);
2353 WRITE_VREG(DOS_SW_RESET1,
2354 (1 << 12) | (1 << 11));
2355 WRITE_VREG(DOS_SW_RESET1, 0);
2356
2357 READ_VREG(DOS_SW_RESET1);
2358 READ_VREG(DOS_SW_RESET1);
2359 READ_VREG(DOS_SW_RESET1);
2360
2361 WRITE_HREG(HCODEC_MPSR, 0x0001);
2362}
2363
2364void amvenc_stop(void)
2365{
2366 ulong timeout = jiffies + HZ;
2367
2368 WRITE_HREG(HCODEC_MPSR, 0);
2369 WRITE_HREG(HCODEC_CPSR, 0);
2370 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2371 if (time_after(jiffies, timeout))
2372 break;
2373 }
2374 READ_VREG(DOS_SW_RESET1);
2375 READ_VREG(DOS_SW_RESET1);
2376 READ_VREG(DOS_SW_RESET1);
2377
2378 WRITE_VREG(DOS_SW_RESET1,
2379 (1 << 12) | (1 << 11) |
2380 (1 << 2) | (1 << 6) |
2381 (1 << 7) | (1 << 8) |
2382 (1 << 14) | (1 << 16) |
2383 (1 << 17));
2384
2385 WRITE_VREG(DOS_SW_RESET1, 0);
2386
2387 READ_VREG(DOS_SW_RESET1);
2388 READ_VREG(DOS_SW_RESET1);
2389 READ_VREG(DOS_SW_RESET1);
2390}
2391
2392static void __iomem *mc_addr;
2393static u32 mc_addr_map;
2394#define MC_SIZE (4096 * 8)
2395s32 amvenc_loadmc(const char *p, struct encode_wq_s *wq)
2396{
2397 ulong timeout;
2398 s32 ret = 0;
2399
2400 /* use static mempry*/
2401 if (mc_addr == NULL) {
2402 mc_addr = kmalloc(MC_SIZE, GFP_KERNEL);
2403 if (!mc_addr) {
2404 enc_pr(LOG_ERROR, "avc loadmc iomap mc addr error.\n");
2405 return -ENOMEM;
2406 }
2407 }
2408
2409 enc_pr(LOG_ALL, "avc encode ucode name is %s\n", p);
2410 ret = get_data_from_name(p, (u8 *)mc_addr);
2411 if (ret < 0) {
2412 enc_pr(LOG_ERROR,
2413 "avc microcode fail ret=%d, name: %s, wq:%p.\n",
2414 ret, p, (void *)wq);
2415 }
2416
2417 mc_addr_map = dma_map_single(
2418 &encode_manager.this_pdev->dev,
2419 mc_addr, MC_SIZE, DMA_TO_DEVICE);
2420
2421 /* mc_addr_map = wq->mem.assit_buffer_offset; */
2422 /* mc_addr = ioremap_wc(mc_addr_map, MC_SIZE); */
2423 /* memcpy(mc_addr, p, MC_SIZE); */
2424 enc_pr(LOG_ALL, "address 0 is 0x%x\n", *((u32 *)mc_addr));
2425 enc_pr(LOG_ALL, "address 1 is 0x%x\n", *((u32 *)mc_addr + 1));
2426 enc_pr(LOG_ALL, "address 2 is 0x%x\n", *((u32 *)mc_addr + 2));
2427 enc_pr(LOG_ALL, "address 3 is 0x%x\n", *((u32 *)mc_addr + 3));
2428 WRITE_HREG(HCODEC_MPSR, 0);
2429 WRITE_HREG(HCODEC_CPSR, 0);
2430
2431 /* Read CBUS register for timing */
2432 timeout = READ_HREG(HCODEC_MPSR);
2433 timeout = READ_HREG(HCODEC_MPSR);
2434
2435 timeout = jiffies + HZ;
2436
2437 WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map);
2438 WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000);
2439 WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16)));
2440
2441 while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) {
2442 if (time_before(jiffies, timeout))
2443 schedule();
2444 else {
2445 enc_pr(LOG_ERROR, "hcodec load mc error\n");
2446 ret = -EBUSY;
2447 break;
2448 }
2449 }
2450 dma_unmap_single(
2451 &encode_manager.this_pdev->dev,
2452 mc_addr_map, MC_SIZE, DMA_TO_DEVICE);
2453 return ret;
2454}
2455
2456const u32 fix_mc[] __aligned(8) = {
2457 0x0809c05a, 0x06696000, 0x0c780000, 0x00000000
2458};
2459
2460
2461/*
2462 * DOS top level register access fix.
2463 * When hcodec is running, a protocol register HCODEC_CCPU_INTR_MSK
2464 * is set to make hcodec access one CBUS out of DOS domain once
2465 * to work around a HW bug for 4k2k dual decoder implementation.
2466 * If hcodec is not running, then a ucode is loaded and executed
2467 * instead.
2468 */
2469void amvenc_dos_top_reg_fix(void)
2470{
2471 bool hcodec_on;
2472 ulong flags;
2473
2474 spin_lock_irqsave(&lock, flags);
2475
2476 hcodec_on = vdec_on(VDEC_HCODEC);
2477
2478 if ((hcodec_on) && (READ_VREG(HCODEC_MPSR) & 1)) {
2479 WRITE_HREG(HCODEC_CCPU_INTR_MSK, 1);
2480 spin_unlock_irqrestore(&lock, flags);
2481 return;
2482 }
2483
2484 if (!hcodec_on)
2485 vdec_poweron(VDEC_HCODEC);
2486
2487 amhcodec_loadmc(fix_mc);
2488
2489 amhcodec_start();
2490
2491 udelay(1000);
2492
2493 amhcodec_stop();
2494
2495 if (!hcodec_on)
2496 vdec_poweroff(VDEC_HCODEC);
2497
2498 spin_unlock_irqrestore(&lock, flags);
2499}
2500
2501bool amvenc_avc_on(void)
2502{
2503 bool hcodec_on;
2504 ulong flags;
2505
2506 spin_lock_irqsave(&lock, flags);
2507
2508 hcodec_on = vdec_on(VDEC_HCODEC);
2509 hcodec_on &= (encode_manager.wq_count > 0);
2510
2511 spin_unlock_irqrestore(&lock, flags);
2512 return hcodec_on;
2513}
2514
2515static s32 avc_poweron(u32 clock)
2516{
2517 ulong flags;
2518 u32 data32;
2519
2520 data32 = 0;
2521
2522 amports_switch_gate("vdec", 1);
2523
2524 spin_lock_irqsave(&lock, flags);
2525
2526 WRITE_AOREG(AO_RTI_PWR_CNTL_REG0,
2527 (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18)));
2528 udelay(10);
2529 /* Powerup HCODEC */
2530 /* [1:0] HCODEC */
2531 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2532 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) &
2533 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2534 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2535 ? ~0x1 : ~0x3));
2536
2537 udelay(10);
2538
2539 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2540 WRITE_VREG(DOS_SW_RESET1, 0);
2541
2542 /* Enable Dos internal clock gating */
2543 hvdec_clock_enable(clock);
2544
2545 /* Powerup HCODEC memories */
2546 WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0);
2547
2548 /* Remove HCODEC ISO */
2549 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2550 READ_AOREG(AO_RTI_GEN_PWR_ISO0) &
2551 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2552 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2553 ? ~0x1 : ~0x30));
2554
2555 udelay(10);
2556 /* Disable auto-clock gate */
2557 WRITE_VREG(DOS_GEN_CTRL0,
2558 (READ_VREG(DOS_GEN_CTRL0) | 0x1));
2559 WRITE_VREG(DOS_GEN_CTRL0,
2560 (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE));
2561
2562 spin_unlock_irqrestore(&lock, flags);
2563
2564 mdelay(10);
2565 return 0;
2566}
2567
2568static s32 avc_poweroff(void)
2569{
2570 ulong flags;
2571
2572 spin_lock_irqsave(&lock, flags);
2573
2574 /* enable HCODEC isolation */
2575 WRITE_AOREG(AO_RTI_GEN_PWR_ISO0,
2576 READ_AOREG(AO_RTI_GEN_PWR_ISO0) |
2577 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2578 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2579 ? 0x1 : 0x30));
2580
2581 /* power off HCODEC memories */
2582 WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL);
2583
2584 /* disable HCODEC clock */
2585 hvdec_clock_disable();
2586
2587 /* HCODEC power off */
2588 WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0,
2589 READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) |
2590 ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 ||
2591 get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2)
2592 ? 0x1 : 0x3));
2593
2594 spin_unlock_irqrestore(&lock, flags);
2595
2596 /* release DOS clk81 clock gating */
2597 amports_switch_gate("vdec", 0);
2598 return 0;
2599}
2600
2601static s32 reload_mc(struct encode_wq_s *wq)
2602{
2603 const char *p = select_ucode(encode_manager.ucode_index);
2604
2605 amvenc_stop();
2606
2607 WRITE_VREG(DOS_SW_RESET1, 0xffffffff);
2608 WRITE_VREG(DOS_SW_RESET1, 0);
2609
2610 udelay(10);
2611
2612 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2613 enc_pr(LOG_INFO, "reload microcode\n");
2614
2615 if (amvenc_loadmc(p, wq) < 0)
2616 return -EBUSY;
2617 return 0;
2618}
2619
2620static void encode_isr_tasklet(ulong data)
2621{
2622 struct encode_manager_s *manager = (struct encode_manager_s *)data;
2623
2624 enc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status);
2625 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2626 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2627 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2628 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2629 && (manager->process_irq)) {
2630 wake_up_interruptible(&manager->event.hw_complete);
2631 }
2632}
2633
2634/* irq function */
2635static irqreturn_t enc_isr(s32 irq_number, void *para)
2636{
2637 struct encode_manager_s *manager = (struct encode_manager_s *)para;
2638
2639 WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1);
2640
2641 manager->encode_hw_status = READ_HREG(ENCODER_STATUS);
2642 if ((manager->encode_hw_status == ENCODER_IDR_DONE)
2643 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2644 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2645 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
2646 enc_pr(LOG_ALL, "encoder stage is %d\n",
2647 manager->encode_hw_status);
2648 }
2649
2650 if (((manager->encode_hw_status == ENCODER_IDR_DONE)
2651 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
2652 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
2653 || (manager->encode_hw_status == ENCODER_PICTURE_DONE))
2654 && (!manager->process_irq)) {
2655 manager->process_irq = true;
2656 if (manager->encode_hw_status != ENCODER_SEQUENCE_DONE)
2657 manager->need_reset = true;
2658 tasklet_schedule(&manager->encode_tasklet);
2659 }
2660 return IRQ_HANDLED;
2661}
2662
2663static s32 convert_request(struct encode_wq_s *wq, u32 *cmd_info)
2664{
2665 int i = 0;
2666 u8 *ptr;
2667 u32 data_offset;
2668 u32 cmd = cmd_info[0];
2669 unsigned long paddr = 0;
2670 struct enc_dma_cfg *cfg = NULL;
2671 s32 ret = 0;
2672 struct platform_device *pdev;
2673
2674 if (!wq)
2675 return -1;
2676 memset(&wq->request, 0, sizeof(struct encode_request_s));
2677 wq->request.me_weight = ME_WEIGHT_OFFSET;
2678 wq->request.i4_weight = I4MB_WEIGHT_OFFSET;
2679 wq->request.i16_weight = I16MB_WEIGHT_OFFSET;
2680
2681 if (cmd == ENCODER_SEQUENCE) {
2682 wq->request.cmd = cmd;
2683 wq->request.ucode_mode = cmd_info[1];
2684 wq->request.quant = cmd_info[2];
2685 wq->request.flush_flag = cmd_info[3];
2686 wq->request.timeout = cmd_info[4];
2687 wq->request.timeout = 5000; /* 5000 ms */
2688 } else if ((cmd == ENCODER_IDR) || (cmd == ENCODER_NON_IDR)) {
2689 wq->request.cmd = cmd;
2690 wq->request.ucode_mode = cmd_info[1];
2691 wq->request.type = cmd_info[2];
2692 wq->request.fmt = cmd_info[3];
2693 wq->request.src = cmd_info[4];
2694 wq->request.framesize = cmd_info[5];
2695 wq->request.quant = cmd_info[6];
2696 wq->request.flush_flag = cmd_info[7];
2697 wq->request.timeout = cmd_info[8];
2698 wq->request.crop_top = cmd_info[9];
2699 wq->request.crop_bottom = cmd_info[10];
2700 wq->request.crop_left = cmd_info[11];
2701 wq->request.crop_right = cmd_info[12];
2702 wq->request.src_w = cmd_info[13];
2703 wq->request.src_h = cmd_info[14];
2704 wq->request.scale_enable = cmd_info[15];
2705
2706 pr_err("hwenc: wq->pic.encoder_width:%d, wq->pic.encoder_height:%d, request fmt=%d\n",
2707 wq->pic.encoder_width, wq->pic.encoder_height, wq->request.fmt);
2708
2709 if (wq->pic.encoder_width >= 1280 && wq->pic.encoder_height >= 720 && wq->request.fmt == FMT_RGBA8888) {
2710 wq->request.scale_enable = 1;
2711 wq->request.src_w = wq->pic.encoder_width;
2712 wq->request.src_h = wq->pic.encoder_height;
2713 pr_err("hwenc: force wq->request.scale_enable=%d\n", wq->request.scale_enable);
2714 }
2715
2716 wq->request.nr_mode =
2717 (nr_mode > 0) ? nr_mode : cmd_info[16];
2718 if (cmd == ENCODER_IDR)
2719 wq->request.nr_mode = 0;
2720
2721 data_offset = 17 +
2722 (sizeof(wq->quant_tbl_i4)
2723 + sizeof(wq->quant_tbl_i16)
2724 + sizeof(wq->quant_tbl_me)) / 4;
2725
2726 if (wq->request.quant == ADJUSTED_QP_FLAG) {
2727 ptr = (u8 *) &cmd_info[17];
2728 memcpy(wq->quant_tbl_i4, ptr,
2729 sizeof(wq->quant_tbl_i4));
2730 ptr += sizeof(wq->quant_tbl_i4);
2731 memcpy(wq->quant_tbl_i16, ptr,
2732 sizeof(wq->quant_tbl_i16));
2733 ptr += sizeof(wq->quant_tbl_i16);
2734 memcpy(wq->quant_tbl_me, ptr,
2735 sizeof(wq->quant_tbl_me));
2736 wq->request.i4_weight -=
2737 cmd_info[data_offset++];
2738 wq->request.i16_weight -=
2739 cmd_info[data_offset++];
2740 wq->request.me_weight -=
2741 cmd_info[data_offset++];
2742 if (qp_table_debug) {
2743 u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]);
2744
2745 for (i = 0; i < 32; i++) {
2746 enc_pr(LOG_INFO, "%d ", *qp_tb);
2747 qp_tb++;
2748 }
2749 enc_pr(LOG_INFO, "\n");
2750
2751 qp_tb = (u8 *)(&wq->quant_tbl_i16[0]);
2752 for (i = 0; i < 32; i++) {
2753 enc_pr(LOG_INFO, "%d ", *qp_tb);
2754 qp_tb++;
2755 }
2756 enc_pr(LOG_INFO, "\n");
2757
2758 qp_tb = (u8 *)(&wq->quant_tbl_me[0]);
2759 for (i = 0; i < 32; i++) {
2760 enc_pr(LOG_INFO, "%d ", *qp_tb);
2761 qp_tb++;
2762 }
2763 enc_pr(LOG_INFO, "\n");
2764 }
2765 } else {
2766 memset(wq->quant_tbl_me, wq->request.quant,
2767 sizeof(wq->quant_tbl_me));
2768 memset(wq->quant_tbl_i4, wq->request.quant,
2769 sizeof(wq->quant_tbl_i4));
2770 memset(wq->quant_tbl_i16, wq->request.quant,
2771 sizeof(wq->quant_tbl_i16));
2772 data_offset += 3;
2773 }
2774#ifdef H264_ENC_CBR
2775 wq->cbr_info.block_w = cmd_info[data_offset++];
2776 wq->cbr_info.block_h = cmd_info[data_offset++];
2777 wq->cbr_info.long_th = cmd_info[data_offset++];
2778 wq->cbr_info.start_tbl_id = cmd_info[data_offset++];
2779 wq->cbr_info.short_shift = CBR_SHORT_SHIFT;
2780 wq->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
2781#endif
2782 data_offset = 17 +
2783 (sizeof(wq->quant_tbl_i4)
2784 + sizeof(wq->quant_tbl_i16)
2785 + sizeof(wq->quant_tbl_me)) / 4 + 7;
2786
2787 if (wq->request.type == DMA_BUFF) {
2788 wq->request.plane_num = cmd_info[data_offset++];
2789 enc_pr(LOG_INFO, "wq->request.plane_num %d\n",
2790 wq->request.plane_num);
2791 if (wq->request.fmt == FMT_NV12 ||
2792 wq->request.fmt == FMT_NV21 ||
2793 wq->request.fmt == FMT_YUV420) {
2794 for (i = 0; i < wq->request.plane_num; i++) {
2795 cfg = &wq->request.dma_cfg[i];
2796 cfg->dir = DMA_TO_DEVICE;
2797 cfg->fd = cmd_info[data_offset++];
2798 pdev = encode_manager.this_pdev;
2799 cfg->dev = &(pdev->dev);
2800
2801 ret = enc_dma_buf_get_phys(cfg, &paddr);
2802 if (ret < 0) {
2803 enc_pr(LOG_ERROR,
2804 "import fd %d failed\n",
2805 cfg->fd);
2806 cfg->paddr = NULL;
2807 cfg->vaddr = NULL;
2808 return -1;
2809 }
2810 cfg->paddr = (void *)paddr;
2811 enc_pr(LOG_INFO, "vaddr %p\n",
2812 cfg->vaddr);
2813 }
2814 } else {
2815 enc_pr(LOG_ERROR, "error fmt = %d\n",
2816 wq->request.fmt);
2817 }
2818 }
2819
2820 } else {
2821 enc_pr(LOG_ERROR, "error cmd = %d, wq: %p.\n",
2822 cmd, (void *)wq);
2823 return -1;
2824 }
2825 wq->request.parent = wq;
2826 return 0;
2827}
2828
2829void amvenc_avc_start_cmd(struct encode_wq_s *wq,
2830 struct encode_request_s *request)
2831{
2832 u32 reload_flag = 0;
2833
2834 if (request->ucode_mode != encode_manager.ucode_index) {
2835 encode_manager.ucode_index = request->ucode_mode;
2836 if (reload_mc(wq)) {
2837 enc_pr(LOG_ERROR,
2838 "reload mc fail, wq:%p\n", (void *)wq);
2839 return;
2840 }
2841 reload_flag = 1;
2842 encode_manager.need_reset = true;
2843 }
2844
2845 wq->hw_status = 0;
2846 wq->output_size = 0;
2847 wq->ucode_index = encode_manager.ucode_index;
2848
2849 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
2850 if (encode_manager.need_reset) {
2851 encode_manager.need_reset = false;
2852 encode_manager.encode_hw_status = ENCODER_IDLE;
2853 amvenc_reset();
2854 avc_canvas_init(wq);
2855 avc_init_encoder(wq,
2856 (request->cmd == ENCODER_IDR) ? true : false);
2857 avc_init_input_buffer(wq);
2858 avc_init_output_buffer(wq);
2859 avc_prot_init(wq, request, request->quant,
2860 (request->cmd == ENCODER_IDR) ? true : false);
2861 avc_init_assit_buffer(wq);
2862 enc_pr(LOG_INFO,
2863 "begin to new frame, request->cmd: %d, ucode mode: %d, wq:%p\n",
2864 request->cmd, request->ucode_mode, (void *)wq);
2865 }
2866 if ((request->cmd == ENCODER_IDR) ||
2867 (request->cmd == ENCODER_NON_IDR)) {
2868#ifdef H264_ENC_SVC
2869 /* encode non reference frame or not */
2870 if (request->cmd == ENCODER_IDR)
2871 wq->pic.non_ref_cnt = 0; //IDR reset counter
2872 if (wq->pic.enable_svc && wq->pic.non_ref_cnt) {
2873 enc_pr(LOG_INFO,
2874 "PIC is NON REF cmd %d cnt %d value 0x%x\n",
2875 request->cmd, wq->pic.non_ref_cnt,
2876 ENC_SLC_NON_REF);
2877 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_NON_REF);
2878 } else {
2879 enc_pr(LOG_INFO,
2880 "PIC is REF cmd %d cnt %d val 0x%x\n",
2881 request->cmd, wq->pic.non_ref_cnt,
2882 ENC_SLC_REF);
2883 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2884 }
2885#else
2886 /* if FW defined but not defined SVC in driver here*/
2887 WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF);
2888#endif
2889 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
2890 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
2891 }
2892 if ((request->cmd == ENCODER_IDR) ||
2893 (request->cmd == ENCODER_NON_IDR))
2894 set_input_format(wq, request);
2895
2896 if (request->cmd == ENCODER_IDR)
2897 ie_me_mb_type = HENC_MB_Type_I4MB;
2898 else if (request->cmd == ENCODER_NON_IDR)
2899 ie_me_mb_type =
2900 (HENC_SKIP_RUN_AUTO << 16) |
2901 (HENC_MB_Type_AUTO << 4) |
2902 (HENC_MB_Type_AUTO << 0);
2903 else
2904 ie_me_mb_type = 0;
2905 avc_init_ie_me_parameter(wq, request->quant);
2906
2907#ifdef MULTI_SLICE_MC
2908 if (fixed_slice_cfg)
2909 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
2910 else if (wq->pic.rows_per_slice !=
2911 (wq->pic.encoder_height + 15) >> 4) {
2912 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
2913
2914 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
2915 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
2916 } else
2917 WRITE_HREG(FIXED_SLICE_CFG, 0);
2918#else
2919 WRITE_HREG(FIXED_SLICE_CFG, 0);
2920#endif
2921
2922 encode_manager.encode_hw_status = request->cmd;
2923 wq->hw_status = request->cmd;
2924 WRITE_HREG(ENCODER_STATUS, request->cmd);
2925 if ((request->cmd == ENCODER_IDR)
2926 || (request->cmd == ENCODER_NON_IDR)
2927 || (request->cmd == ENCODER_SEQUENCE)
2928 || (request->cmd == ENCODER_PICTURE))
2929 encode_manager.process_irq = false;
2930
2931 if (reload_flag)
2932 amvenc_start();
2933 enc_pr(LOG_ALL, "amvenc_avc_start cmd out, request:%p.\n", (void*)request);
2934}
2935
2936static void dma_flush(u32 buf_start, u32 buf_size)
2937{
2938 if ((buf_start == 0) || (buf_size == 0))
2939 return;
2940 dma_sync_single_for_device(
2941 &encode_manager.this_pdev->dev, buf_start,
2942 buf_size, DMA_TO_DEVICE);
2943}
2944
2945static void cache_flush(u32 buf_start, u32 buf_size)
2946{
2947 if ((buf_start == 0) || (buf_size == 0))
2948 return;
2949 dma_sync_single_for_cpu(
2950 &encode_manager.this_pdev->dev, buf_start,
2951 buf_size, DMA_FROM_DEVICE);
2952}
2953
2954static u32 getbuffer(struct encode_wq_s *wq, u32 type)
2955{
2956 u32 ret = 0;
2957
2958 switch (type) {
2959 case ENCODER_BUFFER_INPUT:
2960 ret = wq->mem.dct_buff_start_addr;
2961 break;
2962 case ENCODER_BUFFER_REF0:
2963 ret = wq->mem.dct_buff_start_addr +
2964 wq->mem.bufspec.dec0_y.buf_start;
2965 break;
2966 case ENCODER_BUFFER_REF1:
2967 ret = wq->mem.dct_buff_start_addr +
2968 wq->mem.bufspec.dec1_y.buf_start;
2969 break;
2970 case ENCODER_BUFFER_OUTPUT:
2971 ret = wq->mem.BitstreamStart;
2972 break;
2973 case ENCODER_BUFFER_DUMP:
2974 ret = wq->mem.dump_info_ddr_start_addr;
2975 break;
2976 case ENCODER_BUFFER_CBR:
2977 ret = wq->mem.cbr_info_ddr_start_addr;
2978 break;
2979 default:
2980 break;
2981 }
2982 return ret;
2983}
2984
2985s32 amvenc_avc_start(struct encode_wq_s *wq, u32 clock)
2986{
2987 const char *p = select_ucode(encode_manager.ucode_index);
2988
2989 avc_poweron(clock);
2990 avc_canvas_init(wq);
2991
2992 WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32);
2993
2994 if (amvenc_loadmc(p, wq) < 0)
2995 return -EBUSY;
2996
2997 encode_manager.need_reset = true;
2998 encode_manager.process_irq = false;
2999 encode_manager.encode_hw_status = ENCODER_IDLE;
3000 amvenc_reset();
3001 avc_init_encoder(wq, true);
3002 avc_init_input_buffer(wq); /* dct buffer setting */
3003 avc_init_output_buffer(wq); /* output stream buffer */
3004
3005 ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT;
3006 avc_prot_init(wq, NULL, wq->pic.init_qppicture, true);
3007 if (request_irq(encode_manager.irq_num, enc_isr, IRQF_SHARED,
3008 "enc-irq", (void *)&encode_manager) == 0)
3009 encode_manager.irq_requested = true;
3010 else
3011 encode_manager.irq_requested = false;
3012
3013 /* decoder buffer , need set before each frame start */
3014 avc_init_dblk_buffer(wq->mem.dblk_buf_canvas);
3015 /* reference buffer , need set before each frame start */
3016 avc_init_reference_buffer(wq->mem.ref_buf_canvas);
3017 avc_init_assit_buffer(wq); /* assitant buffer for microcode */
3018 ie_me_mb_type = 0;
3019 avc_init_ie_me_parameter(wq, wq->pic.init_qppicture);
3020 WRITE_HREG(ENCODER_STATUS, ENCODER_IDLE);
3021
3022#ifdef MULTI_SLICE_MC
3023 if (fixed_slice_cfg)
3024 WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg);
3025 else if (wq->pic.rows_per_slice !=
3026 (wq->pic.encoder_height + 15) >> 4) {
3027 u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4;
3028
3029 mb_per_slice = mb_per_slice * wq->pic.rows_per_slice;
3030 WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice);
3031 } else
3032 WRITE_HREG(FIXED_SLICE_CFG, 0);
3033#else
3034 WRITE_HREG(FIXED_SLICE_CFG, 0);
3035#endif
3036 amvenc_start();
3037 return 0;
3038}
3039
3040void amvenc_avc_stop(void)
3041{
3042 if ((encode_manager.irq_num >= 0) &&
3043 (encode_manager.irq_requested == true)) {
3044 free_irq(encode_manager.irq_num, &encode_manager);
3045 encode_manager.irq_requested = false;
3046 }
3047 amvenc_stop();
3048 avc_poweroff();
3049}
3050
3051static s32 avc_init(struct encode_wq_s *wq)
3052{
3053 s32 r = 0;
3054
3055 encode_manager.ucode_index = wq->ucode_index;
3056 r = amvenc_avc_start(wq, clock_level);
3057
3058 enc_pr(LOG_DEBUG,
3059 "init avc encode. microcode %d, ret=%d, wq:%p.\n",
3060 encode_manager.ucode_index, r, (void *)wq);
3061 return 0;
3062}
3063
3064static s32 amvenc_avc_light_reset(struct encode_wq_s *wq, u32 value)
3065{
3066 s32 r = 0;
3067
3068 amvenc_avc_stop();
3069
3070 mdelay(value);
3071
3072 encode_manager.ucode_index = UCODE_MODE_FULL;
3073 r = amvenc_avc_start(wq, clock_level);
3074
3075 enc_pr(LOG_DEBUG,
3076 "amvenc_avc_light_reset finish, wq:%p. ret=%d\n",
3077 (void *)wq, r);
3078 return r;
3079}
3080
3081#ifdef CONFIG_CMA
3082static u32 checkCMA(void)
3083{
3084 u32 ret;
3085
3086 if (encode_manager.cma_pool_size > 0) {
3087 ret = encode_manager.cma_pool_size;
3088 ret = ret / MIN_SIZE;
3089 } else
3090 ret = 0;
3091 return ret;
3092}
3093#endif
3094
3095/* file operation */
3096static s32 amvenc_avc_open(struct inode *inode, struct file *file)
3097{
3098 s32 r = 0;
3099 struct encode_wq_s *wq = NULL;
3100
3101 file->private_data = NULL;
3102 enc_pr(LOG_DEBUG, "avc open\n");
3103#ifdef CONFIG_AM_JPEG_ENCODER
3104 if (jpegenc_on() == true) {
3105 enc_pr(LOG_ERROR,
3106 "hcodec in use for JPEG Encode now.\n");
3107 return -EBUSY;
3108 }
3109#endif
3110
3111#ifdef CONFIG_CMA
3112 if ((encode_manager.use_reserve == false) &&
3113 (encode_manager.check_cma == false)) {
3114 encode_manager.max_instance = checkCMA();
3115 if (encode_manager.max_instance > 0) {
3116 enc_pr(LOG_DEBUG,
3117 "amvenc_avc check CMA pool success, max instance: %d.\n",
3118 encode_manager.max_instance);
3119 } else {
3120 enc_pr(LOG_ERROR,
3121 "amvenc_avc CMA pool too small.\n");
3122 }
3123 encode_manager.check_cma = true;
3124 }
3125#endif
3126
3127 wq = create_encode_work_queue();
3128 if (wq == NULL) {
3129 enc_pr(LOG_ERROR, "amvenc_avc create instance fail.\n");
3130 return -EBUSY;
3131 }
3132
3133#ifdef CONFIG_CMA
3134 if (encode_manager.use_reserve == false) {
3135 wq->mem.buf_start = codec_mm_alloc_for_dma(ENCODE_NAME,
3136 MIN_SIZE >> PAGE_SHIFT, 0,
3137 CODEC_MM_FLAGS_CPU);
3138 if (wq->mem.buf_start) {
3139 wq->mem.buf_size = MIN_SIZE;
3140 enc_pr(LOG_DEBUG,
3141 "allocating phys 0x%x, size %dk, wq:%p.\n",
3142 wq->mem.buf_start,
3143 wq->mem.buf_size >> 10, (void *)wq);
3144 } else {
3145 enc_pr(LOG_ERROR,
3146 "CMA failed to allocate dma buffer for %s, wq:%p.\n",
3147 encode_manager.this_pdev->name,
3148 (void *)wq);
3149 destroy_encode_work_queue(wq);
3150 return -ENOMEM;
3151 }
3152 }
3153#endif
3154
3155 if (wq->mem.buf_start == 0 ||
3156 wq->mem.buf_size < MIN_SIZE) {
3157 enc_pr(LOG_ERROR,
3158 "alloc mem failed, start: 0x%x, size:0x%x, wq:%p.\n",
3159 wq->mem.buf_start,
3160 wq->mem.buf_size, (void *)wq);
3161 destroy_encode_work_queue(wq);
3162 return -ENOMEM;
3163 }
3164
3165 memcpy(&wq->mem.bufspec, &amvenc_buffspec[0],
3166 sizeof(struct BuffInfo_s));
3167
3168 enc_pr(LOG_DEBUG,
3169 "amvenc_avc memory config success, buff start:0x%x, size is 0x%x, wq:%p.\n",
3170 wq->mem.buf_start, wq->mem.buf_size, (void *)wq);
3171
3172 file->private_data = (void *) wq;
3173 return r;
3174}
3175
3176static s32 amvenc_avc_release(struct inode *inode, struct file *file)
3177{
3178 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3179
3180 if (wq) {
3181 enc_pr(LOG_DEBUG, "avc release, wq:%p\n", (void *)wq);
3182 destroy_encode_work_queue(wq);
3183 }
3184 return 0;
3185}
3186
3187static long amvenc_avc_ioctl(struct file *file, u32 cmd, ulong arg)
3188{
3189 long r = 0;
3190 u32 amrisc_cmd = 0;
3191 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3192#define MAX_ADDR_INFO_SIZE 52
3193 u32 addr_info[MAX_ADDR_INFO_SIZE + 4];
3194 ulong argV;
3195 u32 buf_start;
3196 s32 canvas = -1;
3197 struct canvas_s dst;
3198
3199 switch (cmd) {
3200 case AMVENC_AVC_IOC_GET_ADDR:
3201 if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET))
3202 put_user(1, (u32 *)arg);
3203 else
3204 put_user(2, (u32 *)arg);
3205 break;
3206 case AMVENC_AVC_IOC_INPUT_UPDATE:
3207 break;
3208 case AMVENC_AVC_IOC_NEW_CMD:
3209 if (copy_from_user(addr_info, (void *)arg,
3210 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3211 enc_pr(LOG_ERROR,
3212 "avc get new cmd error, wq:%p.\n", (void *)wq);
3213 return -1;
3214 }
3215 r = convert_request(wq, addr_info);
3216 if (r == 0)
3217 r = encode_wq_add_request(wq);
3218 if (r) {
3219 enc_pr(LOG_ERROR,
3220 "avc add new request error, wq:%p.\n",
3221 (void *)wq);
3222 }
3223 break;
3224 case AMVENC_AVC_IOC_GET_STAGE:
3225 put_user(wq->hw_status, (u32 *)arg);
3226 break;
3227 case AMVENC_AVC_IOC_GET_OUTPUT_SIZE:
3228 addr_info[0] = wq->output_size;
3229 addr_info[1] = wq->me_weight;
3230 addr_info[2] = wq->i4_weight;
3231 addr_info[3] = wq->i16_weight;
3232 r = copy_to_user((u32 *)arg,
3233 addr_info, 4 * sizeof(u32));
3234 break;
3235 case AMVENC_AVC_IOC_CONFIG_INIT:
3236 if (copy_from_user(addr_info, (void *)arg,
3237 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3238 enc_pr(LOG_ERROR,
3239 "avc config init error, wq:%p.\n", (void *)wq);
3240 return -1;
3241 }
3242 wq->ucode_index = UCODE_MODE_FULL;
3243#ifdef MULTI_SLICE_MC
3244 wq->pic.rows_per_slice = addr_info[1];
3245 enc_pr(LOG_DEBUG,
3246 "avc init -- rows_per_slice: %d, wq: %p.\n",
3247 wq->pic.rows_per_slice, (void *)wq);
3248#endif
3249 enc_pr(LOG_DEBUG,
3250 "avc init as mode %d, wq: %p.\n",
3251 wq->ucode_index, (void *)wq);
3252
3253 if (addr_info[2] > wq->mem.bufspec.max_width ||
3254 addr_info[3] > wq->mem.bufspec.max_height) {
3255 enc_pr(LOG_ERROR,
3256 "avc config init- encode size %dx%d is larger than supported (%dx%d). wq:%p.\n",
3257 addr_info[2], addr_info[3],
3258 wq->mem.bufspec.max_width,
3259 wq->mem.bufspec.max_height, (void *)wq);
3260 return -1;
3261 }
3262 pr_err("hwenc: AMVENC_AVC_IOC_CONFIG_INIT: w:%d, h:%d\n", wq->pic.encoder_width, wq->pic.encoder_height);
3263 wq->pic.encoder_width = addr_info[2];
3264 wq->pic.encoder_height = addr_info[3];
3265 if (wq->pic.encoder_width *
3266 wq->pic.encoder_height >= 1280 * 720)
3267 clock_level = 6;
3268 else
3269 clock_level = 5;
3270 avc_buffspec_init(wq);
3271 complete(&encode_manager.event.request_in_com);
3272 addr_info[1] = wq->mem.bufspec.dct.buf_start;
3273 addr_info[2] = wq->mem.bufspec.dct.buf_size;
3274 addr_info[3] = wq->mem.bufspec.bitstream.buf_start;
3275 addr_info[4] = wq->mem.bufspec.bitstream.buf_size;
3276 addr_info[5] = wq->mem.bufspec.scale_buff.buf_start;
3277 addr_info[6] = wq->mem.bufspec.scale_buff.buf_size;
3278 addr_info[7] = wq->mem.bufspec.dump_info.buf_start;
3279 addr_info[8] = wq->mem.bufspec.dump_info.buf_size;
3280 addr_info[9] = wq->mem.bufspec.cbr_info.buf_start;
3281 addr_info[10] = wq->mem.bufspec.cbr_info.buf_size;
3282 r = copy_to_user((u32 *)arg, addr_info, 11*sizeof(u32));
3283 break;
3284 case AMVENC_AVC_IOC_FLUSH_CACHE:
3285 if (copy_from_user(addr_info, (void *)arg,
3286 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3287 enc_pr(LOG_ERROR,
3288 "avc flush cache error, wq: %p.\n", (void *)wq);
3289 return -1;
3290 }
3291 buf_start = getbuffer(wq, addr_info[0]);
3292 dma_flush(buf_start + addr_info[1],
3293 addr_info[2] - addr_info[1]);
3294 break;
3295 case AMVENC_AVC_IOC_FLUSH_DMA:
3296 if (copy_from_user(addr_info, (void *)arg,
3297 MAX_ADDR_INFO_SIZE * sizeof(u32))) {
3298 enc_pr(LOG_ERROR,
3299 "avc flush dma error, wq:%p.\n", (void *)wq);
3300 return -1;
3301 }
3302 buf_start = getbuffer(wq, addr_info[0]);
3303 cache_flush(buf_start + addr_info[1],
3304 addr_info[2] - addr_info[1]);
3305 break;
3306 case AMVENC_AVC_IOC_GET_BUFFINFO:
3307 put_user(wq->mem.buf_size, (u32 *)arg);
3308 break;
3309 case AMVENC_AVC_IOC_GET_DEVINFO:
3310 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) {
3311 /* send the same id as GXTVBB to upper*/
3312 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3313 strlen(AMVENC_DEVINFO_GXTVBB));
3314 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) {
3315 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB,
3316 strlen(AMVENC_DEVINFO_GXTVBB));
3317 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) {
3318 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXBB,
3319 strlen(AMVENC_DEVINFO_GXBB));
3320 } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_MG9TV) {
3321 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_G9,
3322 strlen(AMVENC_DEVINFO_G9));
3323 } else {
3324 r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_M8,
3325 strlen(AMVENC_DEVINFO_M8));
3326 }
3327 break;
3328 case AMVENC_AVC_IOC_SUBMIT:
3329 get_user(amrisc_cmd, ((u32 *)arg));
3330 if (amrisc_cmd == ENCODER_IDR) {
3331 wq->pic.idr_pic_id++;
3332 if (wq->pic.idr_pic_id > 65535)
3333 wq->pic.idr_pic_id = 0;
3334 wq->pic.pic_order_cnt_lsb = 2;
3335 wq->pic.frame_number = 1;
3336 } else if (amrisc_cmd == ENCODER_NON_IDR) {
3337#ifdef H264_ENC_SVC
3338 /* only update when there is reference frame */
3339 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3340 wq->pic.frame_number++;
3341 enc_pr(LOG_INFO, "Increase frame_num to %d\n",
3342 wq->pic.frame_number);
3343 }
3344#else
3345 wq->pic.frame_number++;
3346#endif
3347
3348 wq->pic.pic_order_cnt_lsb += 2;
3349 if (wq->pic.frame_number > 65535)
3350 wq->pic.frame_number = 0;
3351 }
3352#ifdef H264_ENC_SVC
3353 /* only update when there is reference frame */
3354 if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) {
3355 amrisc_cmd = wq->mem.dblk_buf_canvas;
3356 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3357 /* current dblk buffer as next reference buffer */
3358 wq->mem.ref_buf_canvas = amrisc_cmd;
3359 enc_pr(LOG_INFO,
3360 "switch buffer enable %d cnt %d\n",
3361 wq->pic.enable_svc, wq->pic.non_ref_cnt);
3362 }
3363 if (wq->pic.enable_svc) {
3364 wq->pic.non_ref_cnt ++;
3365 if (wq->pic.non_ref_cnt > wq->pic.non_ref_limit) {
3366 enc_pr(LOG_INFO, "Svc clear cnt %d conf %d\n",
3367 wq->pic.non_ref_cnt,
3368 wq->pic.non_ref_limit);
3369 wq->pic.non_ref_cnt = 0;
3370 } else
3371 enc_pr(LOG_INFO,"Svc increase non ref counter to %d\n",
3372 wq->pic.non_ref_cnt );
3373 }
3374#else
3375 amrisc_cmd = wq->mem.dblk_buf_canvas;
3376 wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas;
3377 /* current dblk buffer as next reference buffer */
3378 wq->mem.ref_buf_canvas = amrisc_cmd;
3379#endif
3380 break;
3381 case AMVENC_AVC_IOC_READ_CANVAS:
3382 get_user(argV, ((u32 *)arg));
3383 canvas = argV;
3384 if (canvas & 0xff) {
3385 canvas_read(canvas & 0xff, &dst);
3386 addr_info[0] = dst.addr;
3387 if ((canvas & 0xff00) >> 8)
3388 canvas_read((canvas & 0xff00) >> 8, &dst);
3389 if ((canvas & 0xff0000) >> 16)
3390 canvas_read((canvas & 0xff0000) >> 16, &dst);
3391 addr_info[1] = dst.addr - addr_info[0] +
3392 dst.width * dst.height;
3393 } else {
3394 addr_info[0] = 0;
3395 addr_info[1] = 0;
3396 }
3397 dma_flush(dst.addr, dst.width * dst.height * 3 / 2);
3398 r = copy_to_user((u32 *)arg, addr_info, 2 * sizeof(u32));
3399 break;
3400 case AMVENC_AVC_IOC_MAX_INSTANCE:
3401 put_user(encode_manager.max_instance, (u32 *)arg);
3402 break;
3403 case AMVENC_AVC_IOC_QP_MODE:
3404 get_user(qp_mode, ((u32 *)arg));
3405 pr_info("qp_mode %d\n", qp_mode);
3406 break;
3407 default:
3408 r = -1;
3409 break;
3410 }
3411 return r;
3412}
3413
3414#ifdef CONFIG_COMPAT
3415static long amvenc_avc_compat_ioctl(struct file *filp,
3416 unsigned int cmd, unsigned long args)
3417{
3418 unsigned long ret;
3419
3420 args = (unsigned long)compat_ptr(args);
3421 ret = amvenc_avc_ioctl(filp, cmd, args);
3422 return ret;
3423}
3424#endif
3425
3426static s32 avc_mmap(struct file *filp, struct vm_area_struct *vma)
3427{
3428 struct encode_wq_s *wq = (struct encode_wq_s *)filp->private_data;
3429 ulong off = vma->vm_pgoff << PAGE_SHIFT;
3430 ulong vma_size = vma->vm_end - vma->vm_start;
3431
3432 if (vma_size == 0) {
3433 enc_pr(LOG_ERROR, "vma_size is 0, wq:%p.\n", (void *)wq);
3434 return -EAGAIN;
3435 }
3436 if (!off)
3437 off += wq->mem.buf_start;
3438 enc_pr(LOG_ALL,
3439 "vma_size is %ld , off is %ld, wq:%p.\n",
3440 vma_size, off, (void *)wq);
3441 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
3442 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
3443 if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
3444 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
3445 enc_pr(LOG_ERROR,
3446 "set_cached: failed remap_pfn_range, wq:%p.\n",
3447 (void *)wq);
3448 return -EAGAIN;
3449 }
3450 return 0;
3451}
3452
3453static u32 amvenc_avc_poll(struct file *file, poll_table *wait_table)
3454{
3455 struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data;
3456
3457 poll_wait(file, &wq->request_complete, wait_table);
3458
3459 if (atomic_read(&wq->request_ready)) {
3460 atomic_dec(&wq->request_ready);
3461 return POLLIN | POLLRDNORM;
3462 }
3463 return 0;
3464}
3465
3466static const struct file_operations amvenc_avc_fops = {
3467 .owner = THIS_MODULE,
3468 .open = amvenc_avc_open,
3469 .mmap = avc_mmap,
3470 .release = amvenc_avc_release,
3471 .unlocked_ioctl = amvenc_avc_ioctl,
3472#ifdef CONFIG_COMPAT
3473 .compat_ioctl = amvenc_avc_compat_ioctl,
3474#endif
3475 .poll = amvenc_avc_poll,
3476};
3477
3478/* work queue function */
3479static s32 encode_process_request(struct encode_manager_s *manager,
3480 struct encode_queue_item_s *pitem)
3481{
3482 s32 ret = 0;
3483 struct encode_wq_s *wq = pitem->request.parent;
3484 struct encode_request_s *request = &pitem->request;
3485 u32 timeout = (request->timeout == 0) ?
3486 1 : msecs_to_jiffies(request->timeout);
3487 u32 buf_start = 0;
3488 u32 size = 0;
3489 u32 flush_size = ((wq->pic.encoder_width + 31) >> 5 << 5) *
3490 ((wq->pic.encoder_height + 15) >> 4 << 4) * 3 / 2;
3491
3492 struct enc_dma_cfg *cfg = NULL;
3493 int i = 0;
3494
3495#ifdef H264_ENC_CBR
3496 if (request->cmd == ENCODER_IDR || request->cmd == ENCODER_NON_IDR) {
3497 if (request->flush_flag & AMVENC_FLUSH_FLAG_CBR
3498 && get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3499 void *vaddr = wq->mem.cbr_info_ddr_virt_addr;
3500 ConvertTable2Risc(vaddr, 0xa00);
3501 buf_start = getbuffer(wq, ENCODER_BUFFER_CBR);
3502 codec_mm_dma_flush(vaddr, wq->mem.cbr_info_ddr_size, DMA_TO_DEVICE);
3503 }
3504 }
3505#endif
3506
3507Again:
3508 amvenc_avc_start_cmd(wq, request);
3509
3510 if (no_timeout) {
3511 wait_event_interruptible(manager->event.hw_complete,
3512 (manager->encode_hw_status == ENCODER_IDR_DONE
3513 || manager->encode_hw_status == ENCODER_NON_IDR_DONE
3514 || manager->encode_hw_status == ENCODER_SEQUENCE_DONE
3515 || manager->encode_hw_status == ENCODER_PICTURE_DONE));
3516 } else {
3517 wait_event_interruptible_timeout(manager->event.hw_complete,
3518 ((manager->encode_hw_status == ENCODER_IDR_DONE)
3519 || (manager->encode_hw_status == ENCODER_NON_IDR_DONE)
3520 || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)
3521 || (manager->encode_hw_status == ENCODER_PICTURE_DONE)),
3522 timeout);
3523 }
3524
3525 if ((request->cmd == ENCODER_SEQUENCE) &&
3526 (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)) {
3527 wq->sps_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3528 wq->hw_status = manager->encode_hw_status;
3529 request->cmd = ENCODER_PICTURE;
3530 goto Again;
3531 } else if ((request->cmd == ENCODER_PICTURE) &&
3532 (manager->encode_hw_status == ENCODER_PICTURE_DONE)) {
3533 wq->pps_size =
3534 READ_HREG(HCODEC_VLC_TOTAL_BYTES) - wq->sps_size;
3535 wq->hw_status = manager->encode_hw_status;
3536 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3537 buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT);
3538 cache_flush(buf_start,
3539 wq->sps_size + wq->pps_size);
3540 }
3541 wq->output_size = (wq->sps_size << 16) | wq->pps_size;
3542 } else {
3543 wq->hw_status = manager->encode_hw_status;
3544 if ((manager->encode_hw_status == ENCODER_IDR_DONE) ||
3545 (manager->encode_hw_status == ENCODER_NON_IDR_DONE)) {
3546 wq->output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES);
3547 if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) {
3548 buf_start = getbuffer(wq,
3549 ENCODER_BUFFER_OUTPUT);
3550 cache_flush(buf_start, wq->output_size);
3551 }
3552 if (request->flush_flag &
3553 AMVENC_FLUSH_FLAG_DUMP) {
3554 buf_start = getbuffer(wq,
3555 ENCODER_BUFFER_DUMP);
3556 size = wq->mem.dump_info_ddr_size;
3557 cache_flush(buf_start, size);
3558 //enc_pr(LOG_DEBUG, "CBR flush dump_info done");
3559 }
3560 if (request->flush_flag &
3561 AMVENC_FLUSH_FLAG_REFERENCE) {
3562 u32 ref_id = ENCODER_BUFFER_REF0;
3563
3564 if ((wq->mem.ref_buf_canvas & 0xff) ==
3565 (ENC_CANVAS_OFFSET))
3566 ref_id = ENCODER_BUFFER_REF0;
3567 else
3568 ref_id = ENCODER_BUFFER_REF1;
3569 buf_start = getbuffer(wq, ref_id);
3570 cache_flush(buf_start, flush_size);
3571 }
3572 } else {
3573 manager->encode_hw_status = ENCODER_ERROR;
3574 enc_pr(LOG_DEBUG, "avc encode light reset --- ");
3575 enc_pr(LOG_DEBUG,
3576 "frame type: %s, size: %dx%d, wq: %p\n",
3577 (request->cmd == ENCODER_IDR) ? "IDR" : "P",
3578 wq->pic.encoder_width,
3579 wq->pic.encoder_height, (void *)wq);
3580 enc_pr(LOG_DEBUG,
3581 "mb info: 0x%x, encode status: 0x%x, dct status: 0x%x ",
3582 READ_HREG(HCODEC_VLC_MB_INFO),
3583 READ_HREG(ENCODER_STATUS),
3584 READ_HREG(HCODEC_QDCT_STATUS_CTRL));
3585 enc_pr(LOG_DEBUG,
3586 "vlc status: 0x%x, me status: 0x%x, risc pc:0x%x, debug:0x%x\n",
3587 READ_HREG(HCODEC_VLC_STATUS_CTRL),
3588 READ_HREG(HCODEC_ME_STATUS),
3589 READ_HREG(HCODEC_MPC_E),
3590 READ_HREG(DEBUG_REG));
3591 amvenc_avc_light_reset(wq, 30);
3592 }
3593 for (i = 0; i < request->plane_num; i++) {
3594 cfg = &request->dma_cfg[i];
3595 enc_pr(LOG_INFO, "request vaddr %p, paddr %p\n",
3596 cfg->vaddr, cfg->paddr);
3597 if (cfg->fd >= 0 && cfg->vaddr != NULL)
3598 enc_dma_buf_unmap(cfg);
3599 }
3600 }
3601 atomic_inc(&wq->request_ready);
3602 wake_up_interruptible(&wq->request_complete);
3603 return ret;
3604}
3605
3606s32 encode_wq_add_request(struct encode_wq_s *wq)
3607{
3608 struct encode_queue_item_s *pitem = NULL;
3609 struct list_head *head = NULL;
3610 struct encode_wq_s *tmp = NULL;
3611 bool find = false;
3612
3613 spin_lock(&encode_manager.event.sem_lock);
3614
3615 head = &encode_manager.wq;
3616 list_for_each_entry(tmp, head, list) {
3617 if ((wq == tmp) && (wq != NULL)) {
3618 find = true;
3619 break;
3620 }
3621 }
3622
3623 if (find == false) {
3624 enc_pr(LOG_ERROR, "current wq (%p) doesn't register.\n",
3625 (void *)wq);
3626 goto error;
3627 }
3628
3629 if (list_empty(&encode_manager.free_queue)) {
3630 enc_pr(LOG_ERROR, "work queue no space, wq:%p.\n",
3631 (void *)wq);
3632 goto error;
3633 }
3634
3635 pitem = list_entry(encode_manager.free_queue.next,
3636 struct encode_queue_item_s, list);
3637 if (IS_ERR(pitem))
3638 goto error;
3639
3640 memcpy(&pitem->request, &wq->request, sizeof(struct encode_request_s));
3641
3642 enc_pr(LOG_INFO, "new work request %p, vaddr %p, paddr %p\n", &pitem->request,
3643 pitem->request.dma_cfg[0].vaddr,pitem->request.dma_cfg[0].paddr);
3644
3645 memset(&wq->request, 0, sizeof(struct encode_request_s));
3646 wq->request.dma_cfg[0].fd = -1;
3647 wq->request.dma_cfg[1].fd = -1;
3648 wq->request.dma_cfg[2].fd = -1;
3649 wq->hw_status = 0;
3650 wq->output_size = 0;
3651 pitem->request.parent = wq;
3652 list_move_tail(&pitem->list, &encode_manager.process_queue);
3653 spin_unlock(&encode_manager.event.sem_lock);
3654
3655 enc_pr(LOG_INFO,
3656 "add new work ok, cmd:%d, ucode mode: %d, wq:%p.\n",
3657 pitem->request.cmd, pitem->request.ucode_mode,
3658 (void *)wq);
3659 complete(&encode_manager.event.request_in_com);/* new cmd come in */
3660 return 0;
3661error:
3662 spin_unlock(&encode_manager.event.sem_lock);
3663 return -1;
3664}
3665
3666struct encode_wq_s *create_encode_work_queue(void)
3667{
3668 struct encode_wq_s *encode_work_queue = NULL;
3669 bool done = false;
3670 u32 i, max_instance;
3671 struct Buff_s *reserve_buff;
3672
3673 encode_work_queue = kzalloc(sizeof(struct encode_wq_s), GFP_KERNEL);
3674 if (IS_ERR(encode_work_queue)) {
3675 enc_pr(LOG_ERROR, "can't create work queue\n");
3676 return NULL;
3677 }
3678 max_instance = encode_manager.max_instance;
3679 encode_work_queue->pic.init_qppicture = 26;
3680 encode_work_queue->pic.log2_max_frame_num = 4;
3681 encode_work_queue->pic.log2_max_pic_order_cnt_lsb = 4;
3682 encode_work_queue->pic.idr_pic_id = 0;
3683 encode_work_queue->pic.frame_number = 0;
3684 encode_work_queue->pic.pic_order_cnt_lsb = 0;
3685#ifdef H264_ENC_SVC
3686 /* Get settings from the global*/
3687 encode_work_queue->pic.enable_svc = svc_enable;
3688 encode_work_queue->pic.non_ref_limit = svc_ref_conf;
3689 encode_work_queue->pic.non_ref_cnt = 0;
3690 enc_pr(LOG_INFO, "svc conf enable %d, duration %d\n",
3691 encode_work_queue->pic.enable_svc,
3692 encode_work_queue->pic.non_ref_limit);
3693#endif
3694 encode_work_queue->ucode_index = UCODE_MODE_FULL;
3695
3696#ifdef H264_ENC_CBR
3697 encode_work_queue->cbr_info.block_w = 16;
3698 encode_work_queue->cbr_info.block_h = 9;
3699 encode_work_queue->cbr_info.long_th = CBR_LONG_THRESH;
3700 encode_work_queue->cbr_info.start_tbl_id = START_TABLE_ID;
3701 encode_work_queue->cbr_info.short_shift = CBR_SHORT_SHIFT;
3702 encode_work_queue->cbr_info.long_mb_num = CBR_LONG_MB_NUM;
3703#endif
3704 init_waitqueue_head(&encode_work_queue->request_complete);
3705 atomic_set(&encode_work_queue->request_ready, 0);
3706 spin_lock(&encode_manager.event.sem_lock);
3707 if (encode_manager.wq_count < encode_manager.max_instance) {
3708 list_add_tail(&encode_work_queue->list, &encode_manager.wq);
3709 encode_manager.wq_count++;
3710 if (encode_manager.use_reserve == true) {
3711 for (i = 0; i < max_instance; i++) {
3712 reserve_buff = &encode_manager.reserve_buff[i];
3713 if (reserve_buff->used == false) {
3714 encode_work_queue->mem.buf_start =
3715 reserve_buff->buf_start;
3716 encode_work_queue->mem.buf_size =
3717 reserve_buff->buf_size;
3718 reserve_buff->used = true;
3719 done = true;
3720 break;
3721 }
3722 }
3723 } else
3724 done = true;
3725 }
3726 spin_unlock(&encode_manager.event.sem_lock);
3727 if (done == false) {
3728 kfree(encode_work_queue);
3729 encode_work_queue = NULL;
3730 enc_pr(LOG_ERROR, "too many work queue!\n");
3731 }
3732 return encode_work_queue; /* find it */
3733}
3734
3735static void _destroy_encode_work_queue(struct encode_manager_s *manager,
3736 struct encode_wq_s **wq,
3737 struct encode_wq_s *encode_work_queue,
3738 bool *find)
3739{
3740 struct list_head *head;
3741 struct encode_wq_s *wp_tmp = NULL;
3742 u32 i, max_instance;
3743 struct Buff_s *reserve_buff;
3744 u32 buf_start = encode_work_queue->mem.buf_start;
3745
3746 max_instance = manager->max_instance;
3747 head = &manager->wq;
3748 list_for_each_entry_safe((*wq), wp_tmp, head, list) {
3749 if ((*wq) && (*wq == encode_work_queue)) {
3750 list_del(&(*wq)->list);
3751 if (manager->use_reserve == true) {
3752 for (i = 0; i < max_instance; i++) {
3753 reserve_buff =
3754 &manager->reserve_buff[i];
3755 if (reserve_buff->used == true &&
3756 buf_start ==
3757 reserve_buff->buf_start) {
3758 reserve_buff->used = false;
3759 break;
3760 }
3761 }
3762 }
3763 *find = true;
3764 manager->wq_count--;
3765 enc_pr(LOG_DEBUG,
3766 "remove encode_work_queue %p success, %s line %d.\n",
3767 (void *)encode_work_queue,
3768 __func__, __LINE__);
3769 break;
3770 }
3771 }
3772}
3773
3774s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue)
3775{
3776 struct encode_queue_item_s *pitem, *tmp;
3777 struct encode_wq_s *wq = NULL;
3778 bool find = false;
3779
3780 struct list_head *head;
3781
3782 if (encode_work_queue) {
3783 spin_lock(&encode_manager.event.sem_lock);
3784 if (encode_manager.current_wq == encode_work_queue) {
3785 encode_manager.remove_flag = true;
3786 spin_unlock(&encode_manager.event.sem_lock);
3787 enc_pr(LOG_DEBUG,
3788 "warning--Destroy the running queue, should not be here.\n");
3789 wait_for_completion(
3790 &encode_manager.event.process_complete);
3791 spin_lock(&encode_manager.event.sem_lock);
3792 } /* else we can delete it safely. */
3793
3794 head = &encode_manager.process_queue;
3795 list_for_each_entry_safe(pitem, tmp, head, list) {
3796 if (pitem && pitem->request.parent ==
3797 encode_work_queue) {
3798 pitem->request.parent = NULL;
3799 enc_pr(LOG_DEBUG,
3800 "warning--remove not process request, should not be here.\n");
3801 list_move_tail(&pitem->list,
3802 &encode_manager.free_queue);
3803 }
3804 }
3805
3806 _destroy_encode_work_queue(&encode_manager, &wq,
3807 encode_work_queue, &find);
3808 spin_unlock(&encode_manager.event.sem_lock);
3809#ifdef CONFIG_CMA
3810 if (encode_work_queue->mem.buf_start) {
3811 if (wq->mem.cbr_info_ddr_virt_addr != NULL) {
3812 codec_mm_unmap_phyaddr(wq->mem.cbr_info_ddr_virt_addr);
3813 wq->mem.cbr_info_ddr_virt_addr = NULL;
3814 }
3815 codec_mm_free_for_dma(
3816 ENCODE_NAME,
3817 encode_work_queue->mem.buf_start);
3818 encode_work_queue->mem.buf_start = 0;
3819
3820 }
3821#endif
3822 kfree(encode_work_queue);
3823 complete(&encode_manager.event.request_in_com);
3824 }
3825 return 0;
3826}
3827
3828static s32 encode_monitor_thread(void *data)
3829{
3830 struct encode_manager_s *manager = (struct encode_manager_s *)data;
3831 struct encode_queue_item_s *pitem = NULL;
3832 struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
3833 s32 ret = 0;
3834
3835 enc_pr(LOG_DEBUG, "encode workqueue monitor start.\n");
3836 sched_setscheduler(current, SCHED_FIFO, &param);
3837 allow_signal(SIGTERM);
3838 /* setup current_wq here. */
3839 while (manager->process_queue_state != ENCODE_PROCESS_QUEUE_STOP) {
3840 if (kthread_should_stop())
3841 break;
3842
3843 ret = wait_for_completion_interruptible(
3844 &manager->event.request_in_com);
3845
3846 if (ret == -ERESTARTSYS)
3847 break;
3848
3849 if (kthread_should_stop())
3850 break;
3851 if (manager->inited == false) {
3852 spin_lock(&manager->event.sem_lock);
3853 if (!list_empty(&manager->wq)) {
3854 struct encode_wq_s *first_wq =
3855 list_entry(manager->wq.next,
3856 struct encode_wq_s, list);
3857 manager->current_wq = first_wq;
3858 spin_unlock(&manager->event.sem_lock);
3859 if (first_wq) {
3860#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3861 if (!manager->context)
3862 manager->context =
3863 create_ge2d_work_queue();
3864#endif
3865 avc_init(first_wq);
3866 manager->inited = true;
3867 }
3868 spin_lock(&manager->event.sem_lock);
3869 manager->current_wq = NULL;
3870 spin_unlock(&manager->event.sem_lock);
3871 if (manager->remove_flag) {
3872 complete(
3873 &manager
3874 ->event.process_complete);
3875 manager->remove_flag = false;
3876 }
3877 } else
3878 spin_unlock(&manager->event.sem_lock);
3879 continue;
3880 }
3881
3882 spin_lock(&manager->event.sem_lock);
3883 pitem = NULL;
3884 if (list_empty(&manager->wq)) {
3885 spin_unlock(&manager->event.sem_lock);
3886 manager->inited = false;
3887 amvenc_avc_stop();
3888#ifdef CONFIG_AMLOGIC_MEDIA_GE2D
3889 if (manager->context) {
3890 destroy_ge2d_work_queue(manager->context);
3891 manager->context = NULL;
3892 }
3893#endif
3894 enc_pr(LOG_DEBUG, "power off encode.\n");
3895 continue;
3896 } else if (!list_empty(&manager->process_queue)) {
3897 pitem = list_entry(manager->process_queue.next,
3898 struct encode_queue_item_s, list);
3899 list_del(&pitem->list);
3900 manager->current_item = pitem;
3901 manager->current_wq = pitem->request.parent;
3902 }
3903 spin_unlock(&manager->event.sem_lock);
3904
3905 if (pitem) {
3906 encode_process_request(manager, pitem);
3907 spin_lock(&manager->event.sem_lock);
3908 list_add_tail(&pitem->list, &manager->free_queue);
3909 manager->current_item = NULL;
3910 manager->last_wq = manager->current_wq;
3911 manager->current_wq = NULL;
3912 spin_unlock(&manager->event.sem_lock);
3913 }
3914 if (manager->remove_flag) {
3915 complete(&manager->event.process_complete);
3916 manager->remove_flag = false;
3917 }
3918 }
3919 while (!kthread_should_stop())
3920 msleep(20);
3921
3922 enc_pr(LOG_DEBUG, "exit encode_monitor_thread.\n");
3923 return 0;
3924}
3925
3926static s32 encode_start_monitor(void)
3927{
3928 s32 ret = 0;
3929
3930 if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) {
3931 y_tnr_mot2alp_nrm_gain = 216;
3932 y_tnr_mot2alp_dis_gain = 144;
3933 c_tnr_mot2alp_nrm_gain = 216;
3934 c_tnr_mot2alp_dis_gain = 144;
3935 } else {
3936 /* more tnr */
3937 y_tnr_mot2alp_nrm_gain = 144;
3938 y_tnr_mot2alp_dis_gain = 96;
3939 c_tnr_mot2alp_nrm_gain = 144;
3940 c_tnr_mot2alp_dis_gain = 96;
3941 }
3942
3943 enc_pr(LOG_DEBUG, "encode start monitor.\n");
3944 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_START;
3945 encode_manager.encode_thread = kthread_run(encode_monitor_thread,
3946 &encode_manager, "encode_monitor");
3947 if (IS_ERR(encode_manager.encode_thread)) {
3948 ret = PTR_ERR(encode_manager.encode_thread);
3949 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3950 enc_pr(LOG_ERROR,
3951 "encode monitor : failed to start kthread (%d)\n", ret);
3952 }
3953 return ret;
3954}
3955
3956static s32 encode_stop_monitor(void)
3957{
3958 enc_pr(LOG_DEBUG, "stop encode monitor thread\n");
3959 if (encode_manager.encode_thread) {
3960 spin_lock(&encode_manager.event.sem_lock);
3961 if (!list_empty(&encode_manager.wq)) {
3962 u32 count = encode_manager.wq_count;
3963
3964 spin_unlock(&encode_manager.event.sem_lock);
3965 enc_pr(LOG_ERROR,
3966 "stop encode monitor thread error, active wq (%d) is not 0.\n",
3967 count);
3968 return -1;
3969 }
3970 spin_unlock(&encode_manager.event.sem_lock);
3971 encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP;
3972 send_sig(SIGTERM, encode_manager.encode_thread, 1);
3973 complete(&encode_manager.event.request_in_com);
3974 kthread_stop(encode_manager.encode_thread);
3975 encode_manager.encode_thread = NULL;
3976 kfree(mc_addr);
3977 mc_addr = NULL;
3978 }
3979 return 0;
3980}
3981
3982static s32 encode_wq_init(void)
3983{
3984 u32 i = 0;
3985 struct encode_queue_item_s *pitem = NULL;
3986
3987 enc_pr(LOG_DEBUG, "encode_wq_init.\n");
3988 encode_manager.irq_requested = false;
3989
3990 spin_lock_init(&encode_manager.event.sem_lock);
3991 init_completion(&encode_manager.event.request_in_com);
3992 init_waitqueue_head(&encode_manager.event.hw_complete);
3993 init_completion(&encode_manager.event.process_complete);
3994 INIT_LIST_HEAD(&encode_manager.process_queue);
3995 INIT_LIST_HEAD(&encode_manager.free_queue);
3996 INIT_LIST_HEAD(&encode_manager.wq);
3997
3998 tasklet_init(&encode_manager.encode_tasklet,
3999 encode_isr_tasklet,
4000 (ulong)&encode_manager);
4001
4002 for (i = 0; i < MAX_ENCODE_REQUEST; i++) {
4003 pitem = kcalloc(1,
4004 sizeof(struct encode_queue_item_s),
4005 GFP_KERNEL);
4006 if (IS_ERR(pitem)) {
4007 enc_pr(LOG_ERROR, "can't request queue item memory.\n");
4008 return -1;
4009 }
4010 pitem->request.parent = NULL;
4011 list_add_tail(&pitem->list, &encode_manager.free_queue);
4012 }
4013 encode_manager.current_wq = NULL;
4014 encode_manager.last_wq = NULL;
4015 encode_manager.encode_thread = NULL;
4016 encode_manager.current_item = NULL;
4017 encode_manager.wq_count = 0;
4018 encode_manager.remove_flag = false;
4019 InitEncodeWeight();
4020 if (encode_start_monitor()) {
4021 enc_pr(LOG_ERROR, "encode create thread error.\n");
4022 return -1;
4023 }
4024 return 0;
4025}
4026
4027static s32 encode_wq_uninit(void)
4028{
4029 struct encode_queue_item_s *pitem, *tmp;
4030 struct list_head *head;
4031 u32 count = 0;
4032 s32 r = -1;
4033
4034 enc_pr(LOG_DEBUG, "uninit encode wq.\n");
4035 if (encode_stop_monitor() == 0) {
4036 if ((encode_manager.irq_num >= 0) &&
4037 (encode_manager.irq_requested == true)) {
4038 free_irq(encode_manager.irq_num, &encode_manager);
4039 encode_manager.irq_requested = false;
4040 }
4041 spin_lock(&encode_manager.event.sem_lock);
4042 head = &encode_manager.process_queue;
4043 list_for_each_entry_safe(pitem, tmp, head, list) {
4044 if (pitem) {
4045 list_del(&pitem->list);
4046 kfree(pitem);
4047 count++;
4048 }
4049 }
4050 head = &encode_manager.free_queue;
4051 list_for_each_entry_safe(pitem, tmp, head, list) {
4052 if (pitem) {
4053 list_del(&pitem->list);
4054 kfree(pitem);
4055 count++;
4056 }
4057 }
4058 spin_unlock(&encode_manager.event.sem_lock);
4059 if (count == MAX_ENCODE_REQUEST)
4060 r = 0;
4061 else {
4062 enc_pr(LOG_ERROR, "lost some request item %d.\n",
4063 MAX_ENCODE_REQUEST - count);
4064 }
4065 }
4066 return r;
4067}
4068
4069static ssize_t encode_status_show(struct class *cla,
4070 struct class_attribute *attr, char *buf)
4071{
4072 u32 process_count = 0;
4073 u32 free_count = 0;
4074 struct encode_queue_item_s *pitem = NULL;
4075 struct encode_wq_s *current_wq = NULL;
4076 struct encode_wq_s *last_wq = NULL;
4077 struct list_head *head = NULL;
4078 s32 irq_num = 0;
4079 u32 hw_status = 0;
4080 u32 process_queue_state = 0;
4081 u32 wq_count = 0;
4082 u32 ucode_index;
4083 bool need_reset;
4084 bool process_irq;
4085 bool inited;
4086 bool use_reserve;
4087 struct Buff_s reserve_mem;
4088 u32 max_instance;
4089#ifdef CONFIG_CMA
4090 bool check_cma = false;
4091#endif
4092
4093 spin_lock(&encode_manager.event.sem_lock);
4094 head = &encode_manager.free_queue;
4095 list_for_each_entry(pitem, head, list) {
4096 free_count++;
4097 if (free_count > MAX_ENCODE_REQUEST)
4098 break;
4099 }
4100
4101 head = &encode_manager.process_queue;
4102 list_for_each_entry(pitem, head, list) {
4103 process_count++;
4104 if (free_count > MAX_ENCODE_REQUEST)
4105 break;
4106 }
4107
4108 current_wq = encode_manager.current_wq;
4109 last_wq = encode_manager.last_wq;
4110 pitem = encode_manager.current_item;
4111 irq_num = encode_manager.irq_num;
4112 hw_status = encode_manager.encode_hw_status;
4113 process_queue_state = encode_manager.process_queue_state;
4114 wq_count = encode_manager.wq_count;
4115 ucode_index = encode_manager.ucode_index;
4116 need_reset = encode_manager.need_reset;
4117 process_irq = encode_manager.process_irq;
4118 inited = encode_manager.inited;
4119 use_reserve = encode_manager.use_reserve;
4120 reserve_mem.buf_start = encode_manager.reserve_mem.buf_start;
4121 reserve_mem.buf_size = encode_manager.reserve_mem.buf_size;
4122
4123 max_instance = encode_manager.max_instance;
4124#ifdef CONFIG_CMA
4125 check_cma = encode_manager.check_cma;
4126#endif
4127
4128 spin_unlock(&encode_manager.event.sem_lock);
4129
4130 enc_pr(LOG_DEBUG,
4131 "encode process queue count: %d, free queue count: %d.\n",
4132 process_count, free_count);
4133 enc_pr(LOG_DEBUG,
4134 "encode curent wq: %p, last wq: %p, wq count: %d, max_instance: %d.\n",
4135 current_wq, last_wq, wq_count, max_instance);
4136 if (current_wq)
4137 enc_pr(LOG_DEBUG,
4138 "encode curent wq -- encode width: %d, encode height: %d.\n",
4139 current_wq->pic.encoder_width,
4140 current_wq->pic.encoder_height);
4141 enc_pr(LOG_DEBUG,
4142 "encode curent pitem: %p, ucode_index: %d, hw_status: %d, need_reset: %s, process_irq: %s.\n",
4143 pitem, ucode_index, hw_status, need_reset ? "true" : "false",
4144 process_irq ? "true" : "false");
4145 enc_pr(LOG_DEBUG,
4146 "encode irq num: %d, inited: %s, process_queue_state: %d.\n",
4147 irq_num, inited ? "true" : "false", process_queue_state);
4148 if (use_reserve) {
4149 enc_pr(LOG_DEBUG,
4150 "encode use reserve memory, buffer start: 0x%x, size: %d MB.\n",
4151 reserve_mem.buf_start,
4152 reserve_mem.buf_size / SZ_1M);
4153 } else {
4154#ifdef CONFIG_CMA
4155 enc_pr(LOG_DEBUG, "encode check cma: %s.\n",
4156 check_cma ? "true" : "false");
4157#endif
4158 }
4159 return snprintf(buf, 40, "encode max instance: %d\n", max_instance);
4160}
4161
4162static struct class_attribute amvenc_class_attrs[] = {
4163 __ATTR(encode_status,
4164 S_IRUGO | S_IWUSR,
4165 encode_status_show,
4166 NULL),
4167 __ATTR_NULL
4168};
4169
4170static struct class amvenc_avc_class = {
4171 .name = CLASS_NAME,
4172 .class_attrs = amvenc_class_attrs,
4173};
4174
4175s32 init_avc_device(void)
4176{
4177 s32 r = 0;
4178
4179 r = register_chrdev(0, DEVICE_NAME, &amvenc_avc_fops);
4180 if (r <= 0) {
4181 enc_pr(LOG_ERROR, "register amvenc_avc device error.\n");
4182 return r;
4183 }
4184 avc_device_major = r;
4185
4186 r = class_register(&amvenc_avc_class);
4187 if (r < 0) {
4188 enc_pr(LOG_ERROR, "error create amvenc_avc class.\n");
4189 return r;
4190 }
4191
4192 amvenc_avc_dev = device_create(&amvenc_avc_class, NULL,
4193 MKDEV(avc_device_major, 0), NULL,
4194 DEVICE_NAME);
4195
4196 if (IS_ERR(amvenc_avc_dev)) {
4197 enc_pr(LOG_ERROR, "create amvenc_avc device error.\n");
4198 class_unregister(&amvenc_avc_class);
4199 return -1;
4200 }
4201 return r;
4202}
4203
4204s32 uninit_avc_device(void)
4205{
4206 if (amvenc_avc_dev)
4207 device_destroy(&amvenc_avc_class, MKDEV(avc_device_major, 0));
4208
4209 class_destroy(&amvenc_avc_class);
4210
4211 unregister_chrdev(avc_device_major, DEVICE_NAME);
4212 return 0;
4213}
4214
4215static s32 avc_mem_device_init(struct reserved_mem *rmem, struct device *dev)
4216{
4217 s32 r;
4218 struct resource res;
4219
4220 if (!rmem) {
4221 enc_pr(LOG_ERROR,
4222 "Can not obtain I/O memory, and will allocate avc buffer!\n");
4223 r = -EFAULT;
4224 return r;
4225 }
4226 res.start = (phys_addr_t)rmem->base;
4227 res.end = res.start + (phys_addr_t)rmem->size - 1;
4228 encode_manager.reserve_mem.buf_start = res.start;
4229 encode_manager.reserve_mem.buf_size = res.end - res.start + 1;
4230
4231 if (encode_manager.reserve_mem.buf_size >=
4232 amvenc_buffspec[0].min_buffsize) {
4233 encode_manager.max_instance =
4234 encode_manager.reserve_mem.buf_size /
4235 amvenc_buffspec[0].min_buffsize;
4236 if (encode_manager.max_instance > MAX_ENCODE_INSTANCE)
4237 encode_manager.max_instance = MAX_ENCODE_INSTANCE;
4238 encode_manager.reserve_buff = kzalloc(
4239 encode_manager.max_instance *
4240 sizeof(struct Buff_s), GFP_KERNEL);
4241 if (encode_manager.reserve_buff) {
4242 u32 i;
4243 struct Buff_s *reserve_buff;
4244 u32 max_instance = encode_manager.max_instance;
4245
4246 for (i = 0; i < max_instance; i++) {
4247 reserve_buff = &encode_manager.reserve_buff[i];
4248 reserve_buff->buf_start =
4249 i *
4250 amvenc_buffspec[0]
4251 .min_buffsize +
4252 encode_manager.reserve_mem.buf_start;
4253 reserve_buff->buf_size =
4254 encode_manager.reserve_mem.buf_start;
4255 reserve_buff->used = false;
4256 }
4257 encode_manager.use_reserve = true;
4258 r = 0;
4259 enc_pr(LOG_DEBUG,
4260 "amvenc_avc use reserve memory, buff start: 0x%x, size: 0x%x, max instance is %d\n",
4261 encode_manager.reserve_mem.buf_start,
4262 encode_manager.reserve_mem.buf_size,
4263 encode_manager.max_instance);
4264 } else {
4265 enc_pr(LOG_ERROR,
4266 "amvenc_avc alloc reserve buffer pointer fail. max instance is %d.\n",
4267 encode_manager.max_instance);
4268 encode_manager.max_instance = 0;
4269 encode_manager.reserve_mem.buf_start = 0;
4270 encode_manager.reserve_mem.buf_size = 0;
4271 r = -ENOMEM;
4272 }
4273 } else {
4274 enc_pr(LOG_ERROR,
4275 "amvenc_avc memory resource too small, size is 0x%x. Need 0x%x bytes at least.\n",
4276 encode_manager.reserve_mem.buf_size,
4277 amvenc_buffspec[0]
4278 .min_buffsize);
4279 encode_manager.reserve_mem.buf_start = 0;
4280 encode_manager.reserve_mem.buf_size = 0;
4281 r = -ENOMEM;
4282 }
4283 return r;
4284}
4285
4286static s32 amvenc_avc_probe(struct platform_device *pdev)
4287{
4288 /* struct resource mem; */
4289 s32 res_irq;
4290 s32 idx;
4291 s32 r;
4292
4293 enc_pr(LOG_INFO, "amvenc_avc probe start.\n");
4294
4295 encode_manager.this_pdev = pdev;
4296#ifdef CONFIG_CMA
4297 encode_manager.check_cma = false;
4298#endif
4299 encode_manager.reserve_mem.buf_start = 0;
4300 encode_manager.reserve_mem.buf_size = 0;
4301 encode_manager.use_reserve = false;
4302 encode_manager.max_instance = 0;
4303 encode_manager.reserve_buff = NULL;
4304
4305 idx = of_reserved_mem_device_init(&pdev->dev);
4306 if (idx != 0) {
4307 enc_pr(LOG_DEBUG,
4308 "amvenc_avc_probe -- reserved memory config fail.\n");
4309 }
4310
4311 if (encode_manager.use_reserve == false) {
4312#ifndef CONFIG_CMA
4313 enc_pr(LOG_ERROR,
4314 "amvenc_avc memory is invaild, probe fail!\n");
4315 return -EFAULT;
4316#else
4317 encode_manager.cma_pool_size =
4318 (codec_mm_get_total_size() > (MIN_SIZE * 3)) ?
4319 (MIN_SIZE * 3) : codec_mm_get_total_size();
4320 enc_pr(LOG_DEBUG,
4321 "amvenc_avc - cma memory pool size: %d MB\n",
4322 (u32)encode_manager.cma_pool_size / SZ_1M);
4323#endif
4324 }
4325
4326 res_irq = platform_get_irq(pdev, 0);
4327 if (res_irq < 0) {
4328 enc_pr(LOG_ERROR, "[%s] get irq error!", __func__);
4329 return -EINVAL;
4330 }
4331
4332 encode_manager.irq_num = res_irq;
4333 if (encode_wq_init()) {
4334 kfree(encode_manager.reserve_buff);
4335 encode_manager.reserve_buff = NULL;
4336 enc_pr(LOG_ERROR, "encode work queue init error.\n");
4337 return -EFAULT;
4338 }
4339
4340 r = init_avc_device();
4341 enc_pr(LOG_INFO, "amvenc_avc probe end.\n");
4342 return r;
4343}
4344
4345static s32 amvenc_avc_remove(struct platform_device *pdev)
4346{
4347 kfree(encode_manager.reserve_buff);
4348 encode_manager.reserve_buff = NULL;
4349 if (encode_wq_uninit())
4350 enc_pr(LOG_ERROR, "encode work queue uninit error.\n");
4351 uninit_avc_device();
4352 enc_pr(LOG_INFO, "amvenc_avc remove.\n");
4353 return 0;
4354}
4355
4356static const struct of_device_id amlogic_avcenc_dt_match[] = {
4357 {
4358 .compatible = "amlogic, amvenc_avc",
4359 },
4360 {},
4361};
4362
4363static struct platform_driver amvenc_avc_driver = {
4364 .probe = amvenc_avc_probe,
4365 .remove = amvenc_avc_remove,
4366 .driver = {
4367 .name = DRIVER_NAME,
4368 .of_match_table = amlogic_avcenc_dt_match,
4369 }
4370};
4371
4372static struct codec_profile_t amvenc_avc_profile = {
4373 .name = "avc",
4374 .profile = ""
4375};
4376
4377static s32 __init amvenc_avc_driver_init_module(void)
4378{
4379 enc_pr(LOG_INFO, "amvenc_avc module init\n");
4380
4381 if (platform_driver_register(&amvenc_avc_driver)) {
4382 enc_pr(LOG_ERROR,
4383 "failed to register amvenc_avc driver\n");
4384 return -ENODEV;
4385 }
4386 vcodec_profile_register(&amvenc_avc_profile);
4387 return 0;
4388}
4389
4390static void __exit amvenc_avc_driver_remove_module(void)
4391{
4392 enc_pr(LOG_INFO, "amvenc_avc module remove.\n");
4393
4394 platform_driver_unregister(&amvenc_avc_driver);
4395}
4396
4397static const struct reserved_mem_ops rmem_avc_ops = {
4398 .device_init = avc_mem_device_init,
4399};
4400
4401static s32 __init avc_mem_setup(struct reserved_mem *rmem)
4402{
4403 rmem->ops = &rmem_avc_ops;
4404 enc_pr(LOG_DEBUG, "amvenc_avc reserved mem setup.\n");
4405 return 0;
4406}
4407
4408static int enc_dma_buf_map(struct enc_dma_cfg *cfg)
4409{
4410 long ret = -1;
4411 int fd = -1;
4412 struct dma_buf *dbuf = NULL;
4413 struct dma_buf_attachment *d_att = NULL;
4414 struct sg_table *sg = NULL;
4415 void *vaddr = NULL;
4416 struct device *dev = NULL;
4417 enum dma_data_direction dir;
4418
4419 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
4420 enc_pr(LOG_ERROR, "error input param\n");
4421 return -EINVAL;
4422 }
4423 enc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd);
4424
4425 fd = cfg->fd;
4426 dev = cfg->dev;
4427 dir = cfg->dir;
4428 enc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd);
4429
4430 dbuf = dma_buf_get(fd);
4431 if (dbuf == NULL) {
4432 enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd);
4433 return -EINVAL;
4434 }
4435
4436 d_att = dma_buf_attach(dbuf, dev);
4437 if (d_att == NULL) {
4438 enc_pr(LOG_ERROR, "failed to set dma attach\n");
4439 goto attach_err;
4440 }
4441
4442 sg = dma_buf_map_attachment(d_att, dir);
4443 if (sg == NULL) {
4444 enc_pr(LOG_ERROR, "failed to get dma sg\n");
4445 goto map_attach_err;
4446 }
4447
4448 ret = dma_buf_begin_cpu_access(dbuf, dir);
4449 if (ret != 0) {
4450 enc_pr(LOG_ERROR, "failed to access dma buff\n");
4451 goto access_err;
4452 }
4453
4454 vaddr = dma_buf_vmap(dbuf);
4455 if (vaddr == NULL) {
4456 enc_pr(LOG_ERROR, "failed to vmap dma buf\n");
4457 goto vmap_err;
4458 }
4459 cfg->dbuf = dbuf;
4460 cfg->attach = d_att;
4461 cfg->vaddr = vaddr;
4462 cfg->sg = sg;
4463
4464 return ret;
4465
4466vmap_err:
4467 dma_buf_end_cpu_access(dbuf, dir);
4468
4469access_err:
4470 dma_buf_unmap_attachment(d_att, sg, dir);
4471
4472map_attach_err:
4473 dma_buf_detach(dbuf, d_att);
4474
4475attach_err:
4476 dma_buf_put(dbuf);
4477
4478 return ret;
4479}
4480
4481static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr)
4482{
4483 struct sg_table *sg_table;
4484 struct page *page;
4485 int ret;
4486 enc_pr(LOG_INFO, "enc_dma_buf_get_phys in\n");
4487
4488 ret = enc_dma_buf_map(cfg);
4489 if (ret < 0) {
4490 enc_pr(LOG_ERROR, "gdc_dma_buf_map failed\n");
4491 return ret;
4492 }
4493 if (cfg->sg) {
4494 sg_table = cfg->sg;
4495 page = sg_page(sg_table->sgl);
4496 *addr = PFN_PHYS(page_to_pfn(page));
4497 ret = 0;
4498 }
4499 enc_pr(LOG_INFO, "enc_dma_buf_get_phys 0x%lx\n", *addr);
4500 return ret;
4501}
4502
4503static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg)
4504{
4505 int fd = -1;
4506 struct dma_buf *dbuf = NULL;
4507 struct dma_buf_attachment *d_att = NULL;
4508 struct sg_table *sg = NULL;
4509 void *vaddr = NULL;
4510 struct device *dev = NULL;
4511 enum dma_data_direction dir;
4512
4513 if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL
4514 || cfg->dbuf == NULL || cfg->vaddr == NULL
4515 || cfg->attach == NULL || cfg->sg == NULL) {
4516 enc_pr(LOG_ERROR, "Error input param\n");
4517 return;
4518 }
4519
4520 fd = cfg->fd;
4521 dev = cfg->dev;
4522 dir = cfg->dir;
4523 dbuf = cfg->dbuf;
4524 vaddr = cfg->vaddr;
4525 d_att = cfg->attach;
4526 sg = cfg->sg;
4527
4528 dma_buf_vunmap(dbuf, vaddr);
4529
4530 dma_buf_end_cpu_access(dbuf, dir);
4531
4532 dma_buf_unmap_attachment(d_att, sg, dir);
4533
4534 dma_buf_detach(dbuf, d_att);
4535
4536 dma_buf_put(dbuf);
4537 enc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr);
4538}
4539
4540
4541module_param(fixed_slice_cfg, uint, 0664);
4542MODULE_PARM_DESC(fixed_slice_cfg, "\n fixed_slice_cfg\n");
4543
4544module_param(clock_level, uint, 0664);
4545MODULE_PARM_DESC(clock_level, "\n clock_level\n");
4546
4547module_param(encode_print_level, uint, 0664);
4548MODULE_PARM_DESC(encode_print_level, "\n encode_print_level\n");
4549
4550module_param(no_timeout, uint, 0664);
4551MODULE_PARM_DESC(no_timeout, "\n no_timeout flag for process request\n");
4552
4553module_param(nr_mode, int, 0664);
4554MODULE_PARM_DESC(nr_mode, "\n nr_mode option\n");
4555
4556module_param(qp_table_debug, uint, 0664);
4557MODULE_PARM_DESC(qp_table_debug, "\n print qp table\n");
4558
4559#ifdef H264_ENC_SVC
4560module_param(svc_enable, uint, 0664);
4561MODULE_PARM_DESC(svc_enable, "\n svc enable\n");
4562module_param(svc_ref_conf, uint, 0664);
4563MODULE_PARM_DESC(svc_ref_conf, "\n svc reference duration config\n");
4564#endif
4565
4566#ifdef MORE_MODULE_PARAM
4567module_param(me_mv_merge_ctl, uint, 0664);
4568MODULE_PARM_DESC(me_mv_merge_ctl, "\n me_mv_merge_ctl\n");
4569
4570module_param(me_step0_close_mv, uint, 0664);
4571MODULE_PARM_DESC(me_step0_close_mv, "\n me_step0_close_mv\n");
4572
4573module_param(me_f_skip_sad, uint, 0664);
4574MODULE_PARM_DESC(me_f_skip_sad, "\n me_f_skip_sad\n");
4575
4576module_param(me_f_skip_weight, uint, 0664);
4577MODULE_PARM_DESC(me_f_skip_weight, "\n me_f_skip_weight\n");
4578
4579module_param(me_mv_weight_01, uint, 0664);
4580MODULE_PARM_DESC(me_mv_weight_01, "\n me_mv_weight_01\n");
4581
4582module_param(me_mv_weight_23, uint, 0664);
4583MODULE_PARM_DESC(me_mv_weight_23, "\n me_mv_weight_23\n");
4584
4585module_param(me_sad_range_inc, uint, 0664);
4586MODULE_PARM_DESC(me_sad_range_inc, "\n me_sad_range_inc\n");
4587
4588module_param(me_sad_enough_01, uint, 0664);
4589MODULE_PARM_DESC(me_sad_enough_01, "\n me_sad_enough_01\n");
4590
4591module_param(me_sad_enough_23, uint, 0664);
4592MODULE_PARM_DESC(me_sad_enough_23, "\n me_sad_enough_23\n");
4593
4594module_param(y_tnr_mc_en, uint, 0664);
4595MODULE_PARM_DESC(y_tnr_mc_en, "\n y_tnr_mc_en option\n");
4596module_param(y_tnr_txt_mode, uint, 0664);
4597MODULE_PARM_DESC(y_tnr_txt_mode, "\n y_tnr_txt_mode option\n");
4598module_param(y_tnr_mot_sad_margin, uint, 0664);
4599MODULE_PARM_DESC(y_tnr_mot_sad_margin, "\n y_tnr_mot_sad_margin option\n");
4600module_param(y_tnr_mot_cortxt_rate, uint, 0664);
4601MODULE_PARM_DESC(y_tnr_mot_cortxt_rate, "\n y_tnr_mot_cortxt_rate option\n");
4602module_param(y_tnr_mot_distxt_ofst, uint, 0664);
4603MODULE_PARM_DESC(y_tnr_mot_distxt_ofst, "\n y_tnr_mot_distxt_ofst option\n");
4604module_param(y_tnr_mot_distxt_rate, uint, 0664);
4605MODULE_PARM_DESC(y_tnr_mot_distxt_rate, "\n y_tnr_mot_distxt_rate option\n");
4606module_param(y_tnr_mot_dismot_ofst, uint, 0664);
4607MODULE_PARM_DESC(y_tnr_mot_dismot_ofst, "\n y_tnr_mot_dismot_ofst option\n");
4608module_param(y_tnr_mot_frcsad_lock, uint, 0664);
4609MODULE_PARM_DESC(y_tnr_mot_frcsad_lock, "\n y_tnr_mot_frcsad_lock option\n");
4610module_param(y_tnr_mot2alp_frc_gain, uint, 0664);
4611MODULE_PARM_DESC(y_tnr_mot2alp_frc_gain, "\n y_tnr_mot2alp_frc_gain option\n");
4612module_param(y_tnr_mot2alp_nrm_gain, uint, 0664);
4613MODULE_PARM_DESC(y_tnr_mot2alp_nrm_gain, "\n y_tnr_mot2alp_nrm_gain option\n");
4614module_param(y_tnr_mot2alp_dis_gain, uint, 0664);
4615MODULE_PARM_DESC(y_tnr_mot2alp_dis_gain, "\n y_tnr_mot2alp_dis_gain option\n");
4616module_param(y_tnr_mot2alp_dis_ofst, uint, 0664);
4617MODULE_PARM_DESC(y_tnr_mot2alp_dis_ofst, "\n y_tnr_mot2alp_dis_ofst option\n");
4618module_param(y_tnr_alpha_min, uint, 0664);
4619MODULE_PARM_DESC(y_tnr_alpha_min, "\n y_tnr_alpha_min option\n");
4620module_param(y_tnr_alpha_max, uint, 0664);
4621MODULE_PARM_DESC(y_tnr_alpha_max, "\n y_tnr_alpha_max option\n");
4622module_param(y_tnr_deghost_os, uint, 0664);
4623MODULE_PARM_DESC(y_tnr_deghost_os, "\n y_tnr_deghost_os option\n");
4624
4625module_param(c_tnr_mc_en, uint, 0664);
4626MODULE_PARM_DESC(c_tnr_mc_en, "\n c_tnr_mc_en option\n");
4627module_param(c_tnr_txt_mode, uint, 0664);
4628MODULE_PARM_DESC(c_tnr_txt_mode, "\n c_tnr_txt_mode option\n");
4629module_param(c_tnr_mot_sad_margin, uint, 0664);
4630MODULE_PARM_DESC(c_tnr_mot_sad_margin, "\n c_tnr_mot_sad_margin option\n");
4631module_param(c_tnr_mot_cortxt_rate, uint, 0664);
4632MODULE_PARM_DESC(c_tnr_mot_cortxt_rate, "\n c_tnr_mot_cortxt_rate option\n");
4633module_param(c_tnr_mot_distxt_ofst, uint, 0664);
4634MODULE_PARM_DESC(c_tnr_mot_distxt_ofst, "\n c_tnr_mot_distxt_ofst option\n");
4635module_param(c_tnr_mot_distxt_rate, uint, 0664);
4636MODULE_PARM_DESC(c_tnr_mot_distxt_rate, "\n c_tnr_mot_distxt_rate option\n");
4637module_param(c_tnr_mot_dismot_ofst, uint, 0664);
4638MODULE_PARM_DESC(c_tnr_mot_dismot_ofst, "\n c_tnr_mot_dismot_ofst option\n");
4639module_param(c_tnr_mot_frcsad_lock, uint, 0664);
4640MODULE_PARM_DESC(c_tnr_mot_frcsad_lock, "\n c_tnr_mot_frcsad_lock option\n");
4641module_param(c_tnr_mot2alp_frc_gain, uint, 0664);
4642MODULE_PARM_DESC(c_tnr_mot2alp_frc_gain, "\n c_tnr_mot2alp_frc_gain option\n");
4643module_param(c_tnr_mot2alp_nrm_gain, uint, 0664);
4644MODULE_PARM_DESC(c_tnr_mot2alp_nrm_gain, "\n c_tnr_mot2alp_nrm_gain option\n");
4645module_param(c_tnr_mot2alp_dis_gain, uint, 0664);
4646MODULE_PARM_DESC(c_tnr_mot2alp_dis_gain, "\n c_tnr_mot2alp_dis_gain option\n");
4647module_param(c_tnr_mot2alp_dis_ofst, uint, 0664);
4648MODULE_PARM_DESC(c_tnr_mot2alp_dis_ofst, "\n c_tnr_mot2alp_dis_ofst option\n");
4649module_param(c_tnr_alpha_min, uint, 0664);
4650MODULE_PARM_DESC(c_tnr_alpha_min, "\n c_tnr_alpha_min option\n");
4651module_param(c_tnr_alpha_max, uint, 0664);
4652MODULE_PARM_DESC(c_tnr_alpha_max, "\n c_tnr_alpha_max option\n");
4653module_param(c_tnr_deghost_os, uint, 0664);
4654MODULE_PARM_DESC(c_tnr_deghost_os, "\n c_tnr_deghost_os option\n");
4655
4656module_param(y_snr_err_norm, uint, 0664);
4657MODULE_PARM_DESC(y_snr_err_norm, "\n y_snr_err_norm option\n");
4658module_param(y_snr_gau_bld_core, uint, 0664);
4659MODULE_PARM_DESC(y_snr_gau_bld_core, "\n y_snr_gau_bld_core option\n");
4660module_param(y_snr_gau_bld_ofst, int, 0664);
4661MODULE_PARM_DESC(y_snr_gau_bld_ofst, "\n y_snr_gau_bld_ofst option\n");
4662module_param(y_snr_gau_bld_rate, uint, 0664);
4663MODULE_PARM_DESC(y_snr_gau_bld_rate, "\n y_snr_gau_bld_rate option\n");
4664module_param(y_snr_gau_alp0_min, uint, 0664);
4665MODULE_PARM_DESC(y_snr_gau_alp0_min, "\n y_snr_gau_alp0_min option\n");
4666module_param(y_snr_gau_alp0_max, uint, 0664);
4667MODULE_PARM_DESC(y_snr_gau_alp0_max, "\n y_snr_gau_alp0_max option\n");
4668module_param(y_bld_beta2alp_rate, uint, 0664);
4669MODULE_PARM_DESC(y_bld_beta2alp_rate, "\n y_bld_beta2alp_rate option\n");
4670module_param(y_bld_beta_min, uint, 0664);
4671MODULE_PARM_DESC(y_bld_beta_min, "\n y_bld_beta_min option\n");
4672module_param(y_bld_beta_max, uint, 0664);
4673MODULE_PARM_DESC(y_bld_beta_max, "\n y_bld_beta_max option\n");
4674
4675module_param(c_snr_err_norm, uint, 0664);
4676MODULE_PARM_DESC(c_snr_err_norm, "\n c_snr_err_norm option\n");
4677module_param(c_snr_gau_bld_core, uint, 0664);
4678MODULE_PARM_DESC(c_snr_gau_bld_core, "\n c_snr_gau_bld_core option\n");
4679module_param(c_snr_gau_bld_ofst, int, 0664);
4680MODULE_PARM_DESC(c_snr_gau_bld_ofst, "\n c_snr_gau_bld_ofst option\n");
4681module_param(c_snr_gau_bld_rate, uint, 0664);
4682MODULE_PARM_DESC(c_snr_gau_bld_rate, "\n c_snr_gau_bld_rate option\n");
4683module_param(c_snr_gau_alp0_min, uint, 0664);
4684MODULE_PARM_DESC(c_snr_gau_alp0_min, "\n c_snr_gau_alp0_min option\n");
4685module_param(c_snr_gau_alp0_max, uint, 0664);
4686MODULE_PARM_DESC(c_snr_gau_alp0_max, "\n c_snr_gau_alp0_max option\n");
4687module_param(c_bld_beta2alp_rate, uint, 0664);
4688MODULE_PARM_DESC(c_bld_beta2alp_rate, "\n c_bld_beta2alp_rate option\n");
4689module_param(c_bld_beta_min, uint, 0664);
4690MODULE_PARM_DESC(c_bld_beta_min, "\n c_bld_beta_min option\n");
4691module_param(c_bld_beta_max, uint, 0664);
4692MODULE_PARM_DESC(c_bld_beta_max, "\n c_bld_beta_max option\n");
4693#endif
4694
4695module_init(amvenc_avc_driver_init_module);
4696module_exit(amvenc_avc_driver_remove_module);
4697RESERVEDMEM_OF_DECLARE(amvenc_avc, "amlogic, amvenc-memory", avc_mem_setup);
4698
4699MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver");
4700MODULE_LICENSE("GPL");
4701MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>");
4702