blob: 7b83dd28064ce847ce33ff1fe088cef7b17bdd72
1 | /* |
2 | * drivers/amlogic/amports/encoder.c |
3 | * |
4 | * Copyright (C) 2015 Amlogic, Inc. All rights reserved. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. |
15 | * |
16 | */ |
17 | |
18 | #include <linux/kernel.h> |
19 | #include <linux/types.h> |
20 | #include <linux/errno.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/timer.h> |
23 | #include <linux/fs.h> |
24 | #include <linux/sched.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/platform_device.h> |
28 | #include <linux/spinlock.h> |
29 | #include <linux/ctype.h> |
30 | #include <linux/amlogic/media/frame_sync/ptsserv.h> |
31 | #include <linux/amlogic/media/utils/amstream.h> |
32 | #include <linux/amlogic/media/canvas/canvas.h> |
33 | #include <linux/amlogic/media/canvas/canvas_mgr.h> |
34 | #include <linux/amlogic/media/codec_mm/codec_mm.h> |
35 | |
36 | #include <linux/amlogic/media/utils/vdec_reg.h> |
37 | #include "../../../frame_provider/decoder/utils/vdec.h" |
38 | #include <linux/delay.h> |
39 | #include <linux/poll.h> |
40 | #include <linux/of.h> |
41 | #include <linux/of_fdt.h> |
42 | #include <linux/dma-contiguous.h> |
43 | #include <linux/kthread.h> |
44 | #include <linux/sched/rt.h> |
45 | #include <linux/amlogic/media/utils/amports_config.h> |
46 | #include "encoder.h" |
47 | #include "../../../frame_provider/decoder/utils/amvdec.h" |
48 | #include <linux/amlogic/media/utils/amlog.h> |
49 | #include "../../../stream_input/amports/amports_priv.h" |
50 | #include "../../../frame_provider/decoder/utils/firmware.h" |
51 | #include <linux/of_reserved_mem.h> |
52 | |
53 | |
54 | #ifdef CONFIG_AM_JPEG_ENCODER |
55 | #include "jpegenc.h" |
56 | #endif |
57 | |
58 | #define ENCODE_NAME "encoder" |
59 | #define AMVENC_CANVAS_INDEX 0xE4 |
60 | #define AMVENC_CANVAS_MAX_INDEX 0xEF |
61 | |
62 | #define MIN_SIZE amvenc_buffspec[0].min_buffsize |
63 | #define DUMP_INFO_BYTES_PER_MB 80 |
64 | |
65 | #define ADJUSTED_QP_FLAG 64 |
66 | |
67 | static s32 avc_device_major; |
68 | static struct device *amvenc_avc_dev; |
69 | #define DRIVER_NAME "amvenc_avc" |
70 | #define CLASS_NAME "amvenc_avc" |
71 | #define DEVICE_NAME "amvenc_avc" |
72 | |
73 | static struct encode_manager_s encode_manager; |
74 | |
75 | #define MULTI_SLICE_MC |
76 | #define H264_ENC_CBR |
77 | /* #define MORE_MODULE_PARAM */ |
78 | |
79 | #define ENC_CANVAS_OFFSET AMVENC_CANVAS_INDEX |
80 | |
81 | #define UCODE_MODE_FULL 0 |
82 | |
83 | /* #define ENABLE_IGNORE_FUNCTION */ |
84 | |
85 | static u32 ie_me_mb_type; |
86 | static u32 ie_me_mode; |
87 | static u32 ie_pippeline_block = 3; |
88 | static u32 ie_cur_ref_sel; |
89 | /* static u32 avc_endian = 6; */ |
90 | static u32 clock_level = 5; |
91 | |
92 | static u32 encode_print_level = LOG_DEBUG; |
93 | static u32 no_timeout; |
94 | static int nr_mode = -1; |
95 | static u32 qp_table_debug; |
96 | |
97 | #ifdef H264_ENC_SVC |
98 | static u32 svc_enable = 0; /* Enable sac feature or not */ |
99 | static u32 svc_ref_conf = 0; /* Continuous no reference numbers */ |
100 | #endif |
101 | |
102 | static u32 me_mv_merge_ctl = |
103 | (0x1 << 31) | /* [31] me_merge_mv_en_16 */ |
104 | (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */ |
105 | (0x1 << 29) | /* [29] me_merge_flex_en_16 */ |
106 | (0x1 << 28) | /* [28] me_merge_sad_en_16 */ |
107 | (0x1 << 27) | /* [27] me_merge_mv_en_8 */ |
108 | (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */ |
109 | (0x1 << 25) | /* [25] me_merge_flex_en_8 */ |
110 | (0x1 << 24) | /* [24] me_merge_sad_en_8 */ |
111 | /* [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged */ |
112 | (0x12 << 18) | |
113 | /* [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged */ |
114 | (0x2b << 12) | |
115 | /* [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV */ |
116 | (0x80 << 0); |
117 | /* ( 0x4 << 18) | |
118 | * // [23:18] me_merge_mv_diff_16 - MV diff <= n pixel can be merged |
119 | */ |
120 | /* ( 0x3f << 12) | |
121 | * // [17:12] me_merge_mv_diff_8 - MV diff <= n pixel can be merged |
122 | */ |
123 | /* ( 0xc0 << 0); |
124 | * // [11:0] me_merge_min_sad - SAD >= 0x180 can be merged with other MV |
125 | */ |
126 | |
127 | static u32 me_mv_weight_01 = (0x40 << 24) | (0x30 << 16) | (0x20 << 8) | 0x30; |
128 | static u32 me_mv_weight_23 = (0x40 << 8) | 0x30; |
129 | static u32 me_sad_range_inc = 0x03030303; |
130 | static u32 me_step0_close_mv = 0x003ffc21; |
131 | static u32 me_f_skip_sad; |
132 | static u32 me_f_skip_weight; |
133 | static u32 me_sad_enough_01;/* 0x00018010; */ |
134 | static u32 me_sad_enough_23;/* 0x00000020; */ |
135 | |
136 | /* [31:0] NUM_ROWS_PER_SLICE_P */ |
137 | /* [15:0] NUM_ROWS_PER_SLICE_I */ |
138 | static u32 fixed_slice_cfg; |
139 | |
140 | /* y tnr */ |
141 | static unsigned int y_tnr_mc_en = 1; |
142 | static unsigned int y_tnr_txt_mode; |
143 | static unsigned int y_tnr_mot_sad_margin = 1; |
144 | static unsigned int y_tnr_mot_cortxt_rate = 1; |
145 | static unsigned int y_tnr_mot_distxt_ofst = 5; |
146 | static unsigned int y_tnr_mot_distxt_rate = 4; |
147 | static unsigned int y_tnr_mot_dismot_ofst = 4; |
148 | static unsigned int y_tnr_mot_frcsad_lock = 8; |
149 | static unsigned int y_tnr_mot2alp_frc_gain = 10; |
150 | static unsigned int y_tnr_mot2alp_nrm_gain = 216; |
151 | static unsigned int y_tnr_mot2alp_dis_gain = 128; |
152 | static unsigned int y_tnr_mot2alp_dis_ofst = 32; |
153 | static unsigned int y_tnr_alpha_min = 32; |
154 | static unsigned int y_tnr_alpha_max = 63; |
155 | static unsigned int y_tnr_deghost_os; |
156 | /* c tnr */ |
157 | static unsigned int c_tnr_mc_en = 1; |
158 | static unsigned int c_tnr_txt_mode; |
159 | static unsigned int c_tnr_mot_sad_margin = 1; |
160 | static unsigned int c_tnr_mot_cortxt_rate = 1; |
161 | static unsigned int c_tnr_mot_distxt_ofst = 5; |
162 | static unsigned int c_tnr_mot_distxt_rate = 4; |
163 | static unsigned int c_tnr_mot_dismot_ofst = 4; |
164 | static unsigned int c_tnr_mot_frcsad_lock = 8; |
165 | static unsigned int c_tnr_mot2alp_frc_gain = 10; |
166 | static unsigned int c_tnr_mot2alp_nrm_gain = 216; |
167 | static unsigned int c_tnr_mot2alp_dis_gain = 128; |
168 | static unsigned int c_tnr_mot2alp_dis_ofst = 32; |
169 | static unsigned int c_tnr_alpha_min = 32; |
170 | static unsigned int c_tnr_alpha_max = 63; |
171 | static unsigned int c_tnr_deghost_os; |
172 | /* y snr */ |
173 | static unsigned int y_snr_err_norm = 1; |
174 | static unsigned int y_snr_gau_bld_core = 1; |
175 | static int y_snr_gau_bld_ofst = -1; |
176 | static unsigned int y_snr_gau_bld_rate = 48; |
177 | static unsigned int y_snr_gau_alp0_min; |
178 | static unsigned int y_snr_gau_alp0_max = 63; |
179 | static unsigned int y_bld_beta2alp_rate = 16; |
180 | static unsigned int y_bld_beta_min; |
181 | static unsigned int y_bld_beta_max = 63; |
182 | /* c snr */ |
183 | static unsigned int c_snr_err_norm = 1; |
184 | static unsigned int c_snr_gau_bld_core = 1; |
185 | static int c_snr_gau_bld_ofst = -1; |
186 | static unsigned int c_snr_gau_bld_rate = 48; |
187 | static unsigned int c_snr_gau_alp0_min; |
188 | static unsigned int c_snr_gau_alp0_max = 63; |
189 | static unsigned int c_bld_beta2alp_rate = 16; |
190 | static unsigned int c_bld_beta_min; |
191 | static unsigned int c_bld_beta_max = 63; |
192 | static unsigned int qp_mode; |
193 | |
194 | static DEFINE_SPINLOCK(lock); |
195 | |
196 | #define ADV_MV_LARGE_16x8 1 |
197 | #define ADV_MV_LARGE_8x16 1 |
198 | #define ADV_MV_LARGE_16x16 1 |
199 | |
200 | /* me weight offset should not very small, it used by v1 me module. */ |
201 | /* the min real sad for me is 16 by hardware. */ |
202 | #define ME_WEIGHT_OFFSET 0x520 |
203 | #define I4MB_WEIGHT_OFFSET 0x655 |
204 | #define I16MB_WEIGHT_OFFSET 0x560 |
205 | |
206 | #define ADV_MV_16x16_WEIGHT 0x080 |
207 | #define ADV_MV_16_8_WEIGHT 0x0e0 |
208 | #define ADV_MV_8x8_WEIGHT 0x240 |
209 | #define ADV_MV_4x4x4_WEIGHT 0x3000 |
210 | |
211 | #define IE_SAD_SHIFT_I16 0x001 |
212 | #define IE_SAD_SHIFT_I4 0x001 |
213 | #define ME_SAD_SHIFT_INTER 0x001 |
214 | |
215 | #define STEP_2_SKIP_SAD 0 |
216 | #define STEP_1_SKIP_SAD 0 |
217 | #define STEP_0_SKIP_SAD 0 |
218 | #define STEP_2_SKIP_WEIGHT 0 |
219 | #define STEP_1_SKIP_WEIGHT 0 |
220 | #define STEP_0_SKIP_WEIGHT 0 |
221 | |
222 | #define ME_SAD_RANGE_0 0x1 /* 0x0 */ |
223 | #define ME_SAD_RANGE_1 0x0 |
224 | #define ME_SAD_RANGE_2 0x0 |
225 | #define ME_SAD_RANGE_3 0x0 |
226 | |
227 | /* use 0 for v3, 0x18 for v2 */ |
228 | #define ME_MV_PRE_WEIGHT_0 0x18 |
229 | /* use 0 for v3, 0x18 for v2 */ |
230 | #define ME_MV_PRE_WEIGHT_1 0x18 |
231 | #define ME_MV_PRE_WEIGHT_2 0x0 |
232 | #define ME_MV_PRE_WEIGHT_3 0x0 |
233 | |
234 | /* use 0 for v3, 0x18 for v2 */ |
235 | #define ME_MV_STEP_WEIGHT_0 0x18 |
236 | /* use 0 for v3, 0x18 for v2 */ |
237 | #define ME_MV_STEP_WEIGHT_1 0x18 |
238 | #define ME_MV_STEP_WEIGHT_2 0x0 |
239 | #define ME_MV_STEP_WEIGHT_3 0x0 |
240 | |
241 | #define ME_SAD_ENOUGH_0_DATA 0x00 |
242 | #define ME_SAD_ENOUGH_1_DATA 0x04 |
243 | #define ME_SAD_ENOUGH_2_DATA 0x11 |
244 | #define ADV_MV_8x8_ENOUGH_DATA 0x20 |
245 | |
246 | /* V4_COLOR_BLOCK_FIX */ |
247 | #define V3_FORCE_SKIP_SAD_0 0x10 |
248 | /* 4 Blocks */ |
249 | #define V3_FORCE_SKIP_SAD_1 0x60 |
250 | /* 16 Blocks + V3_SKIP_WEIGHT_2 */ |
251 | #define V3_FORCE_SKIP_SAD_2 0x250 |
252 | /* almost disable it -- use t_lac_coeff_2 output to F_ZERO is better */ |
253 | #define V3_ME_F_ZERO_SAD (ME_WEIGHT_OFFSET + 0x10) |
254 | |
255 | #define V3_IE_F_ZERO_SAD_I16 (I16MB_WEIGHT_OFFSET + 0x10) |
256 | #define V3_IE_F_ZERO_SAD_I4 (I4MB_WEIGHT_OFFSET + 0x20) |
257 | |
258 | #define V3_SKIP_WEIGHT_0 0x10 |
259 | /* 4 Blocks 8 separate search sad can be very low */ |
260 | #define V3_SKIP_WEIGHT_1 0x8 /* (4 * ME_MV_STEP_WEIGHT_1 + 0x100) */ |
261 | #define V3_SKIP_WEIGHT_2 0x3 |
262 | |
263 | #define V3_LEVEL_1_F_SKIP_MAX_SAD 0x0 |
264 | #define V3_LEVEL_1_SKIP_MAX_SAD 0x6 |
265 | |
266 | #define I4_ipred_weight_most 0x18 |
267 | #define I4_ipred_weight_else 0x28 |
268 | |
269 | #define C_ipred_weight_V 0x04 |
270 | #define C_ipred_weight_H 0x08 |
271 | #define C_ipred_weight_DC 0x0c |
272 | |
273 | #define I16_ipred_weight_V 0x04 |
274 | #define I16_ipred_weight_H 0x08 |
275 | #define I16_ipred_weight_DC 0x0c |
276 | |
277 | /* 0x00 same as disable */ |
278 | #define v3_left_small_max_ie_sad 0x00 |
279 | #define v3_left_small_max_me_sad 0x40 |
280 | |
281 | #define v5_use_small_diff_cnt 0 |
282 | #define v5_simple_mb_inter_all_en 1 |
283 | #define v5_simple_mb_inter_8x8_en 1 |
284 | #define v5_simple_mb_inter_16_8_en 1 |
285 | #define v5_simple_mb_inter_16x16_en 1 |
286 | #define v5_simple_mb_intra_en 1 |
287 | #define v5_simple_mb_C_en 0 |
288 | #define v5_simple_mb_Y_en 1 |
289 | #define v5_small_diff_Y 0x10 |
290 | #define v5_small_diff_C 0x18 |
291 | /* shift 8-bits, 2, 1, 0, -1, -2, -3, -4 */ |
292 | #define v5_simple_dq_setting 0x43210fed |
293 | #define v5_simple_me_weight_setting 0 |
294 | |
295 | #ifdef H264_ENC_CBR |
296 | #define CBR_TABLE_SIZE 0x800 |
297 | #define CBR_SHORT_SHIFT 12 /* same as disable */ |
298 | #define CBR_LONG_MB_NUM 2 |
299 | #define START_TABLE_ID 8 |
300 | #define CBR_LONG_THRESH 4 |
301 | #endif |
302 | |
303 | static u32 v3_mv_sad[64] = { |
304 | /* For step0 */ |
305 | 0x00000004, |
306 | 0x00010008, |
307 | 0x00020010, |
308 | 0x00030018, |
309 | 0x00040020, |
310 | 0x00050028, |
311 | 0x00060038, |
312 | 0x00070048, |
313 | 0x00080058, |
314 | 0x00090068, |
315 | 0x000a0080, |
316 | 0x000b0098, |
317 | 0x000c00b0, |
318 | 0x000d00c8, |
319 | 0x000e00e8, |
320 | 0x000f0110, |
321 | /* For step1 */ |
322 | 0x00100002, |
323 | 0x00110004, |
324 | 0x00120008, |
325 | 0x0013000c, |
326 | 0x00140010, |
327 | 0x00150014, |
328 | 0x0016001c, |
329 | 0x00170024, |
330 | 0x0018002c, |
331 | 0x00190034, |
332 | 0x001a0044, |
333 | 0x001b0054, |
334 | 0x001c0064, |
335 | 0x001d0074, |
336 | 0x001e0094, |
337 | 0x001f00b4, |
338 | /* For step2 */ |
339 | 0x00200006, |
340 | 0x0021000c, |
341 | 0x0022000c, |
342 | 0x00230018, |
343 | 0x00240018, |
344 | 0x00250018, |
345 | 0x00260018, |
346 | 0x00270030, |
347 | 0x00280030, |
348 | 0x00290030, |
349 | 0x002a0030, |
350 | 0x002b0030, |
351 | 0x002c0030, |
352 | 0x002d0030, |
353 | 0x002e0030, |
354 | 0x002f0050, |
355 | /* For step2 4x4-8x8 */ |
356 | 0x00300001, |
357 | 0x00310002, |
358 | 0x00320002, |
359 | 0x00330004, |
360 | 0x00340004, |
361 | 0x00350004, |
362 | 0x00360004, |
363 | 0x00370006, |
364 | 0x00380006, |
365 | 0x00390006, |
366 | 0x003a0006, |
367 | 0x003b0006, |
368 | 0x003c0006, |
369 | 0x003d0006, |
370 | 0x003e0006, |
371 | 0x003f0006 |
372 | }; |
373 | |
374 | static struct BuffInfo_s amvenc_buffspec[] = { |
375 | { |
376 | .lev_id = 0, |
377 | .max_width = 1920, |
378 | .max_height = 1088, |
379 | .min_buffsize = 0x1400000, |
380 | .dct = { |
381 | .buf_start = 0, |
382 | .buf_size = 0x800000, /* 1920x1088x4 */ |
383 | }, |
384 | .dec0_y = { |
385 | .buf_start = 0x800000, |
386 | .buf_size = 0x300000, |
387 | }, |
388 | .dec1_y = { |
389 | .buf_start = 0xb00000, |
390 | .buf_size = 0x300000, |
391 | }, |
392 | .assit = { |
393 | .buf_start = 0xe10000, |
394 | .buf_size = 0xc0000, |
395 | }, |
396 | .bitstream = { |
397 | .buf_start = 0xf00000, |
398 | .buf_size = 0x100000, |
399 | }, |
400 | .scale_buff = { |
401 | .buf_start = 0x1000000, |
402 | .buf_size = 0x300000, |
403 | }, |
404 | .dump_info = { |
405 | .buf_start = 0x1300000, |
406 | .buf_size = 0xa0000, /* (1920x1088/256)x80 */ |
407 | }, |
408 | .cbr_info = { |
409 | .buf_start = 0x13b0000, |
410 | .buf_size = 0x2000, |
411 | } |
412 | } |
413 | }; |
414 | |
415 | enum ucode_type_e { |
416 | UCODE_GXL, |
417 | UCODE_TXL, |
418 | UCODE_G12A, |
419 | UCODE_MAX |
420 | }; |
421 | |
422 | const char *ucode_name[] = { |
423 | "gxl_h264_enc", |
424 | "txl_h264_enc_cavlc", |
425 | "ga_h264_enc_cabac", |
426 | }; |
427 | |
428 | static void dma_flush(u32 buf_start, u32 buf_size); |
429 | static void cache_flush(u32 buf_start, u32 buf_size); |
430 | static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr); |
431 | static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg); |
432 | |
433 | static const char *select_ucode(u32 ucode_index) |
434 | { |
435 | enum ucode_type_e ucode = UCODE_GXL; |
436 | |
437 | switch (ucode_index) { |
438 | case UCODE_MODE_FULL: |
439 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_G12A) |
440 | ucode = UCODE_G12A; |
441 | else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) |
442 | ucode = UCODE_TXL; |
443 | else /* (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) */ |
444 | ucode = UCODE_GXL; |
445 | break; |
446 | break; |
447 | default: |
448 | break; |
449 | } |
450 | return (const char *)ucode_name[ucode]; |
451 | } |
452 | |
453 | static void hcodec_prog_qtbl(struct encode_wq_s *wq) |
454 | { |
455 | WRITE_HREG(HCODEC_Q_QUANT_CONTROL, |
456 | (0 << 23) | /* quant_table_addr */ |
457 | (1 << 22)); /* quant_table_addr_update */ |
458 | |
459 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
460 | wq->quant_tbl_i4[0]); |
461 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
462 | wq->quant_tbl_i4[1]); |
463 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
464 | wq->quant_tbl_i4[2]); |
465 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
466 | wq->quant_tbl_i4[3]); |
467 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
468 | wq->quant_tbl_i4[4]); |
469 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
470 | wq->quant_tbl_i4[5]); |
471 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
472 | wq->quant_tbl_i4[6]); |
473 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
474 | wq->quant_tbl_i4[7]); |
475 | |
476 | WRITE_HREG(HCODEC_Q_QUANT_CONTROL, |
477 | (8 << 23) | /* quant_table_addr */ |
478 | (1 << 22)); /* quant_table_addr_update */ |
479 | |
480 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
481 | wq->quant_tbl_i16[0]); |
482 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
483 | wq->quant_tbl_i16[1]); |
484 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
485 | wq->quant_tbl_i16[2]); |
486 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
487 | wq->quant_tbl_i16[3]); |
488 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
489 | wq->quant_tbl_i16[4]); |
490 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
491 | wq->quant_tbl_i16[5]); |
492 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
493 | wq->quant_tbl_i16[6]); |
494 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
495 | wq->quant_tbl_i16[7]); |
496 | |
497 | WRITE_HREG(HCODEC_Q_QUANT_CONTROL, |
498 | (16 << 23) | /* quant_table_addr */ |
499 | (1 << 22)); /* quant_table_addr_update */ |
500 | |
501 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
502 | wq->quant_tbl_me[0]); |
503 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
504 | wq->quant_tbl_me[1]); |
505 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
506 | wq->quant_tbl_me[2]); |
507 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
508 | wq->quant_tbl_me[3]); |
509 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
510 | wq->quant_tbl_me[4]); |
511 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
512 | wq->quant_tbl_me[5]); |
513 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
514 | wq->quant_tbl_me[6]); |
515 | WRITE_HREG(HCODEC_QUANT_TABLE_DATA, |
516 | wq->quant_tbl_me[7]); |
517 | } |
518 | |
519 | static void InitEncodeWeight(void) |
520 | { |
521 | me_mv_merge_ctl = |
522 | (0x1 << 31) | /* [31] me_merge_mv_en_16 */ |
523 | (0x1 << 30) | /* [30] me_merge_small_mv_en_16 */ |
524 | (0x1 << 29) | /* [29] me_merge_flex_en_16 */ |
525 | (0x1 << 28) | /* [28] me_merge_sad_en_16 */ |
526 | (0x1 << 27) | /* [27] me_merge_mv_en_8 */ |
527 | (0x1 << 26) | /* [26] me_merge_small_mv_en_8 */ |
528 | (0x1 << 25) | /* [25] me_merge_flex_en_8 */ |
529 | (0x1 << 24) | /* [24] me_merge_sad_en_8 */ |
530 | (0x12 << 18) | |
531 | /* [23:18] me_merge_mv_diff_16 - MV diff |
532 | * <= n pixel can be merged |
533 | */ |
534 | (0x2b << 12) | |
535 | /* [17:12] me_merge_mv_diff_8 - MV diff |
536 | * <= n pixel can be merged |
537 | */ |
538 | (0x80 << 0); |
539 | /* [11:0] me_merge_min_sad - SAD |
540 | * >= 0x180 can be merged with other MV |
541 | */ |
542 | |
543 | me_mv_weight_01 = (ME_MV_STEP_WEIGHT_1 << 24) | |
544 | (ME_MV_PRE_WEIGHT_1 << 16) | |
545 | (ME_MV_STEP_WEIGHT_0 << 8) | |
546 | (ME_MV_PRE_WEIGHT_0 << 0); |
547 | |
548 | me_mv_weight_23 = (ME_MV_STEP_WEIGHT_3 << 24) | |
549 | (ME_MV_PRE_WEIGHT_3 << 16) | |
550 | (ME_MV_STEP_WEIGHT_2 << 8) | |
551 | (ME_MV_PRE_WEIGHT_2 << 0); |
552 | |
553 | me_sad_range_inc = (ME_SAD_RANGE_3 << 24) | |
554 | (ME_SAD_RANGE_2 << 16) | |
555 | (ME_SAD_RANGE_1 << 8) | |
556 | (ME_SAD_RANGE_0 << 0); |
557 | |
558 | me_step0_close_mv = (0x100 << 10) | |
559 | /* me_step0_big_sad -- two MV sad |
560 | * diff bigger will use use 1 |
561 | */ |
562 | (2 << 5) | /* me_step0_close_mv_y */ |
563 | (2 << 0); /* me_step0_close_mv_x */ |
564 | |
565 | me_f_skip_sad = (0x00 << 24) | /* force_skip_sad_3 */ |
566 | (STEP_2_SKIP_SAD << 16) | /* force_skip_sad_2 */ |
567 | (STEP_1_SKIP_SAD << 8) | /* force_skip_sad_1 */ |
568 | (STEP_0_SKIP_SAD << 0); /* force_skip_sad_0 */ |
569 | |
570 | me_f_skip_weight = (0x00 << 24) | /* force_skip_weight_3 */ |
571 | /* force_skip_weight_2 */ |
572 | (STEP_2_SKIP_WEIGHT << 16) | |
573 | /* force_skip_weight_1 */ |
574 | (STEP_1_SKIP_WEIGHT << 8) | |
575 | /* force_skip_weight_0 */ |
576 | (STEP_0_SKIP_WEIGHT << 0); |
577 | |
578 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
579 | me_f_skip_sad = 0; |
580 | me_f_skip_weight = 0; |
581 | me_mv_weight_01 = 0; |
582 | me_mv_weight_23 = 0; |
583 | } |
584 | |
585 | me_sad_enough_01 = (ME_SAD_ENOUGH_1_DATA << 12) | |
586 | /* me_sad_enough_1 */ |
587 | (ME_SAD_ENOUGH_0_DATA << 0) | |
588 | /* me_sad_enough_0 */ |
589 | (0 << 12) | /* me_sad_enough_1 */ |
590 | (0 << 0); /* me_sad_enough_0 */ |
591 | |
592 | me_sad_enough_23 = (ADV_MV_8x8_ENOUGH_DATA << 12) | |
593 | /* adv_mv_8x8_enough */ |
594 | (ME_SAD_ENOUGH_2_DATA << 0) | |
595 | /* me_sad_enough_2 */ |
596 | (0 << 12) | /* me_sad_enough_3 */ |
597 | (0 << 0); /* me_sad_enough_2 */ |
598 | } |
599 | |
600 | /*output stream buffer setting*/ |
601 | static void avc_init_output_buffer(struct encode_wq_s *wq) |
602 | { |
603 | WRITE_HREG(HCODEC_VLC_VB_MEM_CTL, |
604 | ((1 << 31) | (0x3f << 24) | |
605 | (0x20 << 16) | (2 << 0))); |
606 | WRITE_HREG(HCODEC_VLC_VB_START_PTR, |
607 | wq->mem.BitstreamStart); |
608 | WRITE_HREG(HCODEC_VLC_VB_WR_PTR, |
609 | wq->mem.BitstreamStart); |
610 | WRITE_HREG(HCODEC_VLC_VB_SW_RD_PTR, |
611 | wq->mem.BitstreamStart); |
612 | WRITE_HREG(HCODEC_VLC_VB_END_PTR, |
613 | wq->mem.BitstreamEnd); |
614 | WRITE_HREG(HCODEC_VLC_VB_CONTROL, 1); |
615 | WRITE_HREG(HCODEC_VLC_VB_CONTROL, |
616 | ((0 << 14) | (7 << 3) | |
617 | (1 << 1) | (0 << 0))); |
618 | } |
619 | |
620 | /*input dct buffer setting*/ |
621 | static void avc_init_input_buffer(struct encode_wq_s *wq) |
622 | { |
623 | WRITE_HREG(HCODEC_QDCT_MB_START_PTR, |
624 | wq->mem.dct_buff_start_addr); |
625 | WRITE_HREG(HCODEC_QDCT_MB_END_PTR, |
626 | wq->mem.dct_buff_end_addr); |
627 | WRITE_HREG(HCODEC_QDCT_MB_WR_PTR, |
628 | wq->mem.dct_buff_start_addr); |
629 | WRITE_HREG(HCODEC_QDCT_MB_RD_PTR, |
630 | wq->mem.dct_buff_start_addr); |
631 | WRITE_HREG(HCODEC_QDCT_MB_BUFF, 0); |
632 | } |
633 | |
634 | /*input reference buffer setting*/ |
635 | static void avc_init_reference_buffer(s32 canvas) |
636 | { |
637 | WRITE_HREG(HCODEC_ANC0_CANVAS_ADDR, canvas); |
638 | WRITE_HREG(HCODEC_VLC_HCMD_CONFIG, 0); |
639 | } |
640 | |
641 | static void avc_init_assit_buffer(struct encode_wq_s *wq) |
642 | { |
643 | WRITE_HREG(MEM_OFFSET_REG, wq->mem.assit_buffer_offset); |
644 | } |
645 | |
646 | /*deblock buffer setting, same as INI_CANVAS*/ |
647 | static void avc_init_dblk_buffer(s32 canvas) |
648 | { |
649 | WRITE_HREG(HCODEC_REC_CANVAS_ADDR, canvas); |
650 | WRITE_HREG(HCODEC_DBKR_CANVAS_ADDR, canvas); |
651 | WRITE_HREG(HCODEC_DBKW_CANVAS_ADDR, canvas); |
652 | } |
653 | |
654 | static void avc_init_encoder(struct encode_wq_s *wq, bool idr) |
655 | { |
656 | WRITE_HREG(HCODEC_VLC_TOTAL_BYTES, 0); |
657 | WRITE_HREG(HCODEC_VLC_CONFIG, 0x07); |
658 | WRITE_HREG(HCODEC_VLC_INT_CONTROL, 0); |
659 | |
660 | WRITE_HREG(HCODEC_ASSIST_AMR1_INT0, 0x15); |
661 | WRITE_HREG(HCODEC_ASSIST_AMR1_INT1, 0x8); |
662 | WRITE_HREG(HCODEC_ASSIST_AMR1_INT3, 0x14); |
663 | |
664 | WRITE_HREG(IDR_PIC_ID, wq->pic.idr_pic_id); |
665 | WRITE_HREG(FRAME_NUMBER, |
666 | (idr == true) ? 0 : wq->pic.frame_number); |
667 | WRITE_HREG(PIC_ORDER_CNT_LSB, |
668 | (idr == true) ? 0 : wq->pic.pic_order_cnt_lsb); |
669 | |
670 | WRITE_HREG(LOG2_MAX_PIC_ORDER_CNT_LSB, |
671 | wq->pic.log2_max_pic_order_cnt_lsb); |
672 | WRITE_HREG(LOG2_MAX_FRAME_NUM, |
673 | wq->pic.log2_max_frame_num); |
674 | WRITE_HREG(ANC0_BUFFER_ID, 0); |
675 | WRITE_HREG(QPPICTURE, wq->pic.init_qppicture); |
676 | } |
677 | |
678 | static void avc_canvas_init(struct encode_wq_s *wq) |
679 | { |
680 | u32 canvas_width, canvas_height; |
681 | u32 start_addr = wq->mem.buf_start; |
682 | |
683 | canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5; |
684 | canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4; |
685 | |
686 | canvas_config(ENC_CANVAS_OFFSET, |
687 | start_addr + wq->mem.bufspec.dec0_y.buf_start, |
688 | canvas_width, canvas_height, |
689 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
690 | canvas_config(1 + ENC_CANVAS_OFFSET, |
691 | start_addr + wq->mem.bufspec.dec0_uv.buf_start, |
692 | canvas_width, canvas_height / 2, |
693 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
694 | /*here the third plane use the same address as the second plane*/ |
695 | canvas_config(2 + ENC_CANVAS_OFFSET, |
696 | start_addr + wq->mem.bufspec.dec0_uv.buf_start, |
697 | canvas_width, canvas_height / 2, |
698 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
699 | |
700 | canvas_config(3 + ENC_CANVAS_OFFSET, |
701 | start_addr + wq->mem.bufspec.dec1_y.buf_start, |
702 | canvas_width, canvas_height, |
703 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
704 | canvas_config(4 + ENC_CANVAS_OFFSET, |
705 | start_addr + wq->mem.bufspec.dec1_uv.buf_start, |
706 | canvas_width, canvas_height / 2, |
707 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
708 | /*here the third plane use the same address as the second plane*/ |
709 | canvas_config(5 + ENC_CANVAS_OFFSET, |
710 | start_addr + wq->mem.bufspec.dec1_uv.buf_start, |
711 | canvas_width, canvas_height / 2, |
712 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
713 | } |
714 | |
715 | static void avc_buffspec_init(struct encode_wq_s *wq) |
716 | { |
717 | u32 canvas_width, canvas_height; |
718 | u32 start_addr = wq->mem.buf_start; |
719 | u32 mb_w = (wq->pic.encoder_width + 15) >> 4; |
720 | u32 mb_h = (wq->pic.encoder_height + 15) >> 4; |
721 | u32 mbs = mb_w * mb_h; |
722 | |
723 | canvas_width = ((wq->pic.encoder_width + 31) >> 5) << 5; |
724 | canvas_height = ((wq->pic.encoder_height + 15) >> 4) << 4; |
725 | |
726 | wq->mem.dct_buff_start_addr = start_addr + |
727 | wq->mem.bufspec.dct.buf_start; |
728 | wq->mem.dct_buff_end_addr = |
729 | wq->mem.dct_buff_start_addr + |
730 | wq->mem.bufspec.dct.buf_size - 1; |
731 | enc_pr(LOG_INFO, "dct_buff_start_addr is 0x%x, wq:%p.\n", |
732 | wq->mem.dct_buff_start_addr, (void *)wq); |
733 | |
734 | wq->mem.bufspec.dec0_uv.buf_start = |
735 | wq->mem.bufspec.dec0_y.buf_start + |
736 | canvas_width * canvas_height; |
737 | wq->mem.bufspec.dec0_uv.buf_size = canvas_width * canvas_height / 2; |
738 | wq->mem.bufspec.dec1_uv.buf_start = |
739 | wq->mem.bufspec.dec1_y.buf_start + |
740 | canvas_width * canvas_height; |
741 | wq->mem.bufspec.dec1_uv.buf_size = canvas_width * canvas_height / 2; |
742 | wq->mem.assit_buffer_offset = start_addr + |
743 | wq->mem.bufspec.assit.buf_start; |
744 | enc_pr(LOG_INFO, "assit_buffer_offset is 0x%x, wq: %p.\n", |
745 | wq->mem.assit_buffer_offset, (void *)wq); |
746 | /*output stream buffer config*/ |
747 | wq->mem.BitstreamStart = start_addr + |
748 | wq->mem.bufspec.bitstream.buf_start; |
749 | wq->mem.BitstreamEnd = |
750 | wq->mem.BitstreamStart + |
751 | wq->mem.bufspec.bitstream.buf_size - 1; |
752 | enc_pr(LOG_INFO, "BitstreamStart is 0x%x, wq: %p.\n", |
753 | wq->mem.BitstreamStart, (void *)wq); |
754 | |
755 | wq->mem.scaler_buff_start_addr = |
756 | wq->mem.buf_start + wq->mem.bufspec.scale_buff.buf_start; |
757 | wq->mem.dump_info_ddr_start_addr = |
758 | wq->mem.buf_start + wq->mem.bufspec.dump_info.buf_start; |
759 | enc_pr(LOG_INFO, |
760 | "CBR: dump_info_ddr_start_addr:%x.\n", |
761 | wq->mem.dump_info_ddr_start_addr); |
762 | enc_pr(LOG_INFO, "CBR: buf_start :%d.\n", |
763 | wq->mem.buf_start); |
764 | enc_pr(LOG_INFO, "CBR: dump_info.buf_start :%d.\n", |
765 | wq->mem.bufspec.dump_info.buf_start); |
766 | wq->mem.dump_info_ddr_size = |
767 | DUMP_INFO_BYTES_PER_MB * mbs; |
768 | wq->mem.dump_info_ddr_size = |
769 | (wq->mem.dump_info_ddr_size + PAGE_SIZE - 1) |
770 | & ~(PAGE_SIZE - 1); |
771 | wq->mem.cbr_info_ddr_start_addr = |
772 | wq->mem.buf_start + wq->mem.bufspec.cbr_info.buf_start; |
773 | wq->mem.cbr_info_ddr_size = |
774 | wq->mem.bufspec.cbr_info.buf_size; |
775 | wq->mem.cbr_info_ddr_virt_addr = |
776 | codec_mm_vmap(wq->mem.cbr_info_ddr_start_addr, |
777 | wq->mem.bufspec.cbr_info.buf_size); |
778 | |
779 | wq->mem.dblk_buf_canvas = |
780 | ((ENC_CANVAS_OFFSET + 2) << 16) | |
781 | ((ENC_CANVAS_OFFSET + 1) << 8) | |
782 | (ENC_CANVAS_OFFSET); |
783 | wq->mem.ref_buf_canvas = |
784 | ((ENC_CANVAS_OFFSET + 5) << 16) | |
785 | ((ENC_CANVAS_OFFSET + 4) << 8) | |
786 | (ENC_CANVAS_OFFSET + 3); |
787 | } |
788 | |
789 | static void avc_init_ie_me_parameter(struct encode_wq_s *wq, u32 quant) |
790 | { |
791 | ie_cur_ref_sel = 0; |
792 | ie_pippeline_block = 12; |
793 | /* currently disable half and sub pixel */ |
794 | ie_me_mode = |
795 | (ie_pippeline_block & IE_PIPPELINE_BLOCK_MASK) << |
796 | IE_PIPPELINE_BLOCK_SHIFT; |
797 | |
798 | WRITE_HREG(IE_ME_MODE, ie_me_mode); |
799 | WRITE_HREG(IE_REF_SEL, ie_cur_ref_sel); |
800 | WRITE_HREG(IE_ME_MB_TYPE, ie_me_mb_type); |
801 | #ifdef MULTI_SLICE_MC |
802 | if (fixed_slice_cfg) |
803 | WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); |
804 | else if (wq->pic.rows_per_slice != |
805 | (wq->pic.encoder_height + 15) >> 4) { |
806 | u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; |
807 | |
808 | mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; |
809 | WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); |
810 | } else |
811 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
812 | #else |
813 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
814 | #endif |
815 | } |
816 | |
817 | /* for temp */ |
818 | #define HCODEC_MFDIN_REGC_MBLP (HCODEC_MFDIN_REGB_AMPC + 0x1) |
819 | #define HCODEC_MFDIN_REG0D (HCODEC_MFDIN_REGB_AMPC + 0x2) |
820 | #define HCODEC_MFDIN_REG0E (HCODEC_MFDIN_REGB_AMPC + 0x3) |
821 | #define HCODEC_MFDIN_REG0F (HCODEC_MFDIN_REGB_AMPC + 0x4) |
822 | #define HCODEC_MFDIN_REG10 (HCODEC_MFDIN_REGB_AMPC + 0x5) |
823 | #define HCODEC_MFDIN_REG11 (HCODEC_MFDIN_REGB_AMPC + 0x6) |
824 | #define HCODEC_MFDIN_REG12 (HCODEC_MFDIN_REGB_AMPC + 0x7) |
825 | #define HCODEC_MFDIN_REG13 (HCODEC_MFDIN_REGB_AMPC + 0x8) |
826 | #define HCODEC_MFDIN_REG14 (HCODEC_MFDIN_REGB_AMPC + 0x9) |
827 | #define HCODEC_MFDIN_REG15 (HCODEC_MFDIN_REGB_AMPC + 0xa) |
828 | #define HCODEC_MFDIN_REG16 (HCODEC_MFDIN_REGB_AMPC + 0xb) |
829 | |
830 | static void mfdin_basic(u32 input, u8 iformat, |
831 | u8 oformat, u32 picsize_x, u32 picsize_y, |
832 | u8 r2y_en, u8 nr, u8 ifmt_extra) |
833 | { |
834 | u8 dsample_en; /* Downsample Enable */ |
835 | u8 interp_en; /* Interpolation Enable */ |
836 | u8 y_size; /* 0:16 Pixels for y direction pickup; 1:8 pixels */ |
837 | u8 r2y_mode; /* RGB2YUV Mode, range(0~3) */ |
838 | /* mfdin_reg3_canv[25:24]; |
839 | * // bytes per pixel in x direction for index0, 0:half 1:1 2:2 3:3 |
840 | */ |
841 | u8 canv_idx0_bppx; |
842 | /* mfdin_reg3_canv[27:26]; |
843 | * // bytes per pixel in x direction for index1-2, 0:half 1:1 2:2 3:3 |
844 | */ |
845 | u8 canv_idx1_bppx; |
846 | /* mfdin_reg3_canv[29:28]; |
847 | * // bytes per pixel in y direction for index0, 0:half 1:1 2:2 3:3 |
848 | */ |
849 | u8 canv_idx0_bppy; |
850 | /* mfdin_reg3_canv[31:30]; |
851 | * // bytes per pixel in y direction for index1-2, 0:half 1:1 2:2 3:3 |
852 | */ |
853 | u8 canv_idx1_bppy; |
854 | u8 ifmt444, ifmt422, ifmt420, linear_bytes4p; |
855 | u8 nr_enable; |
856 | u8 cfg_y_snr_en; |
857 | u8 cfg_y_tnr_en; |
858 | u8 cfg_c_snr_en; |
859 | u8 cfg_c_tnr_en; |
860 | u32 linear_bytesperline; |
861 | s32 reg_offset; |
862 | bool linear_enable = false; |
863 | bool format_err = false; |
864 | |
865 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) { |
866 | if ((iformat == 7) && (ifmt_extra > 2)) |
867 | format_err = true; |
868 | } else if (iformat == 7) |
869 | format_err = true; |
870 | |
871 | if (format_err) { |
872 | enc_pr(LOG_ERROR, |
873 | "mfdin format err, iformat:%d, ifmt_extra:%d\n", |
874 | iformat, ifmt_extra); |
875 | return; |
876 | } |
877 | if (iformat != 7) |
878 | ifmt_extra = 0; |
879 | |
880 | ifmt444 = ((iformat == 1) || (iformat == 5) || (iformat == 8) || |
881 | (iformat == 9) || (iformat == 12)) ? 1 : 0; |
882 | if (iformat == 7 && ifmt_extra == 1) |
883 | ifmt444 = 1; |
884 | ifmt422 = ((iformat == 0) || (iformat == 10)) ? 1 : 0; |
885 | if (iformat == 7 && ifmt_extra != 1) |
886 | ifmt422 = 1; |
887 | ifmt420 = ((iformat == 2) || (iformat == 3) || (iformat == 4) || |
888 | (iformat == 11)) ? 1 : 0; |
889 | dsample_en = ((ifmt444 && (oformat != 2)) || |
890 | (ifmt422 && (oformat == 0))) ? 1 : 0; |
891 | interp_en = ((ifmt422 && (oformat == 2)) || |
892 | (ifmt420 && (oformat != 0))) ? 1 : 0; |
893 | y_size = (oformat != 0) ? 1 : 0; |
894 | if (iformat == 12) |
895 | y_size = 0; |
896 | r2y_mode = (r2y_en == 1) ? 1 : 0; /* Fixed to 1 (TODO) */ |
897 | canv_idx0_bppx = (iformat == 1) ? 3 : (iformat == 0) ? 2 : 1; |
898 | canv_idx1_bppx = (iformat == 4) ? 0 : 1; |
899 | canv_idx0_bppy = 1; |
900 | canv_idx1_bppy = (iformat == 5) ? 1 : 0; |
901 | |
902 | if ((iformat == 8) || (iformat == 9) || (iformat == 12)) |
903 | linear_bytes4p = 3; |
904 | else if (iformat == 10) |
905 | linear_bytes4p = 2; |
906 | else if (iformat == 11) |
907 | linear_bytes4p = 1; |
908 | else |
909 | linear_bytes4p = 0; |
910 | if (iformat == 12) |
911 | linear_bytesperline = picsize_x * 4; |
912 | else |
913 | linear_bytesperline = picsize_x * linear_bytes4p; |
914 | |
915 | if (iformat < 8) |
916 | linear_enable = false; |
917 | else |
918 | linear_enable = true; |
919 | |
920 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXBB) { |
921 | reg_offset = -8; |
922 | /* nr_mode: 0:Disabled 1:SNR Only 2:TNR Only 3:3DNR */ |
923 | nr_enable = (nr) ? 1 : 0; |
924 | cfg_y_snr_en = ((nr == 1) || (nr == 3)) ? 1 : 0; |
925 | cfg_y_tnr_en = ((nr == 2) || (nr == 3)) ? 1 : 0; |
926 | cfg_c_snr_en = cfg_y_snr_en; |
927 | /* cfg_c_tnr_en = cfg_y_tnr_en; */ |
928 | cfg_c_tnr_en = 0; |
929 | |
930 | /* NR For Y */ |
931 | WRITE_HREG((HCODEC_MFDIN_REG0D + reg_offset), |
932 | ((cfg_y_snr_en << 0) | |
933 | (y_snr_err_norm << 1) | |
934 | (y_snr_gau_bld_core << 2) | |
935 | (((y_snr_gau_bld_ofst) & 0xff) << 6) | |
936 | (y_snr_gau_bld_rate << 14) | |
937 | (y_snr_gau_alp0_min << 20) | |
938 | (y_snr_gau_alp0_max << 26))); |
939 | WRITE_HREG((HCODEC_MFDIN_REG0E + reg_offset), |
940 | ((cfg_y_tnr_en << 0) | |
941 | (y_tnr_mc_en << 1) | |
942 | (y_tnr_txt_mode << 2) | |
943 | (y_tnr_mot_sad_margin << 3) | |
944 | (y_tnr_alpha_min << 7) | |
945 | (y_tnr_alpha_max << 13) | |
946 | (y_tnr_deghost_os << 19))); |
947 | WRITE_HREG((HCODEC_MFDIN_REG0F + reg_offset), |
948 | ((y_tnr_mot_cortxt_rate << 0) | |
949 | (y_tnr_mot_distxt_ofst << 8) | |
950 | (y_tnr_mot_distxt_rate << 4) | |
951 | (y_tnr_mot_dismot_ofst << 16) | |
952 | (y_tnr_mot_frcsad_lock << 24))); |
953 | WRITE_HREG((HCODEC_MFDIN_REG10 + reg_offset), |
954 | ((y_tnr_mot2alp_frc_gain << 0) | |
955 | (y_tnr_mot2alp_nrm_gain << 8) | |
956 | (y_tnr_mot2alp_dis_gain << 16) | |
957 | (y_tnr_mot2alp_dis_ofst << 24))); |
958 | WRITE_HREG((HCODEC_MFDIN_REG11 + reg_offset), |
959 | ((y_bld_beta2alp_rate << 0) | |
960 | (y_bld_beta_min << 8) | |
961 | (y_bld_beta_max << 14))); |
962 | |
963 | /* NR For C */ |
964 | WRITE_HREG((HCODEC_MFDIN_REG12 + reg_offset), |
965 | ((cfg_y_snr_en << 0) | |
966 | (c_snr_err_norm << 1) | |
967 | (c_snr_gau_bld_core << 2) | |
968 | (((c_snr_gau_bld_ofst) & 0xff) << 6) | |
969 | (c_snr_gau_bld_rate << 14) | |
970 | (c_snr_gau_alp0_min << 20) | |
971 | (c_snr_gau_alp0_max << 26))); |
972 | |
973 | WRITE_HREG((HCODEC_MFDIN_REG13 + reg_offset), |
974 | ((cfg_c_tnr_en << 0) | |
975 | (c_tnr_mc_en << 1) | |
976 | (c_tnr_txt_mode << 2) | |
977 | (c_tnr_mot_sad_margin << 3) | |
978 | (c_tnr_alpha_min << 7) | |
979 | (c_tnr_alpha_max << 13) | |
980 | (c_tnr_deghost_os << 19))); |
981 | WRITE_HREG((HCODEC_MFDIN_REG14 + reg_offset), |
982 | ((c_tnr_mot_cortxt_rate << 0) | |
983 | (c_tnr_mot_distxt_ofst << 8) | |
984 | (c_tnr_mot_distxt_rate << 4) | |
985 | (c_tnr_mot_dismot_ofst << 16) | |
986 | (c_tnr_mot_frcsad_lock << 24))); |
987 | WRITE_HREG((HCODEC_MFDIN_REG15 + reg_offset), |
988 | ((c_tnr_mot2alp_frc_gain << 0) | |
989 | (c_tnr_mot2alp_nrm_gain << 8) | |
990 | (c_tnr_mot2alp_dis_gain << 16) | |
991 | (c_tnr_mot2alp_dis_ofst << 24))); |
992 | |
993 | WRITE_HREG((HCODEC_MFDIN_REG16 + reg_offset), |
994 | ((c_bld_beta2alp_rate << 0) | |
995 | (c_bld_beta_min << 8) | |
996 | (c_bld_beta_max << 14))); |
997 | |
998 | WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset), |
999 | (iformat << 0) | (oformat << 4) | |
1000 | (dsample_en << 6) | (y_size << 8) | |
1001 | (interp_en << 9) | (r2y_en << 12) | |
1002 | (r2y_mode << 13) | (ifmt_extra << 16) | |
1003 | (nr_enable << 19)); |
1004 | WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), |
1005 | (picsize_x << 14) | (picsize_y << 0)); |
1006 | } else { |
1007 | reg_offset = 0; |
1008 | WRITE_HREG((HCODEC_MFDIN_REG1_CTRL + reg_offset), |
1009 | (iformat << 0) | (oformat << 4) | |
1010 | (dsample_en << 6) | (y_size << 8) | |
1011 | (interp_en << 9) | (r2y_en << 12) | |
1012 | (r2y_mode << 13)); |
1013 | WRITE_HREG((HCODEC_MFDIN_REG8_DMBL + reg_offset), |
1014 | (picsize_x << 12) | (picsize_y << 0)); |
1015 | } |
1016 | |
1017 | if (linear_enable == false) { |
1018 | WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), |
1019 | (input & 0xffffff) | |
1020 | (canv_idx1_bppy << 30) | |
1021 | (canv_idx0_bppy << 28) | |
1022 | (canv_idx1_bppx << 26) | |
1023 | (canv_idx0_bppx << 24)); |
1024 | WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), |
1025 | (0 << 16) | (0 << 0)); |
1026 | WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), 0); |
1027 | } else { |
1028 | WRITE_HREG((HCODEC_MFDIN_REG3_CANV + reg_offset), |
1029 | (canv_idx1_bppy << 30) | |
1030 | (canv_idx0_bppy << 28) | |
1031 | (canv_idx1_bppx << 26) | |
1032 | (canv_idx0_bppx << 24)); |
1033 | WRITE_HREG((HCODEC_MFDIN_REG4_LNR0 + reg_offset), |
1034 | (linear_bytes4p << 16) | (linear_bytesperline << 0)); |
1035 | WRITE_HREG((HCODEC_MFDIN_REG5_LNR1 + reg_offset), input); |
1036 | } |
1037 | |
1038 | if (iformat == 12) |
1039 | WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset), |
1040 | (2 << 0) | (1 << 3) | (0 << 6) | |
1041 | (3 << 9) | (6 << 12) | (5 << 15) | |
1042 | (4 << 18) | (7 << 21)); |
1043 | else |
1044 | WRITE_HREG((HCODEC_MFDIN_REG9_ENDN + reg_offset), |
1045 | (7 << 0) | (6 << 3) | (5 << 6) | |
1046 | (4 << 9) | (3 << 12) | (2 << 15) | |
1047 | (1 << 18) | (0 << 21)); |
1048 | } |
1049 | |
1050 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
1051 | static int scale_frame(struct encode_wq_s *wq, |
1052 | struct encode_request_s *request, |
1053 | struct config_para_ex_s *ge2d_config, |
1054 | u32 src_addr, bool canvas) |
1055 | { |
1056 | struct ge2d_context_s *context = encode_manager.context; |
1057 | int src_top, src_left, src_width, src_height; |
1058 | struct canvas_s cs0, cs1, cs2, cd; |
1059 | u32 src_canvas, dst_canvas; |
1060 | u32 src_canvas_w, dst_canvas_w; |
1061 | u32 src_h = request->src_h; |
1062 | u32 dst_w = ((wq->pic.encoder_width + 15) >> 4) << 4; |
1063 | u32 dst_h = ((wq->pic.encoder_height + 15) >> 4) << 4; |
1064 | int input_format = GE2D_FORMAT_M24_NV21; |
1065 | |
1066 | src_top = request->crop_top; |
1067 | src_left = request->crop_left; |
1068 | src_width = request->src_w - src_left - request->crop_right; |
1069 | src_height = request->src_h - src_top - request->crop_bottom; |
1070 | if (canvas) { |
1071 | if ((request->fmt == FMT_NV21) |
1072 | || (request->fmt == FMT_NV12)) { |
1073 | src_canvas = src_addr & 0xffff; |
1074 | input_format = GE2D_FORMAT_M24_NV21; |
1075 | } else if (request->fmt == FMT_BGR888) { |
1076 | src_canvas = src_addr & 0xffffff; |
1077 | input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d |
1078 | } else { |
1079 | src_canvas = src_addr & 0xffffff; |
1080 | input_format = GE2D_FORMAT_M24_YUV420; |
1081 | } |
1082 | } else { |
1083 | if ((request->fmt == FMT_NV21) |
1084 | || (request->fmt == FMT_NV12)) { |
1085 | src_canvas_w = |
1086 | ((request->src_w + 31) >> 5) << 5; |
1087 | canvas_config(ENC_CANVAS_OFFSET + 9, |
1088 | src_addr, |
1089 | src_canvas_w, src_h, |
1090 | CANVAS_ADDR_NOWRAP, |
1091 | CANVAS_BLKMODE_LINEAR); |
1092 | canvas_config(ENC_CANVAS_OFFSET + 10, |
1093 | src_addr + src_canvas_w * src_h, |
1094 | src_canvas_w, src_h / 2, |
1095 | CANVAS_ADDR_NOWRAP, |
1096 | CANVAS_BLKMODE_LINEAR); |
1097 | src_canvas = |
1098 | ((ENC_CANVAS_OFFSET + 10) << 8) |
1099 | | (ENC_CANVAS_OFFSET + 9); |
1100 | input_format = GE2D_FORMAT_M24_NV21; |
1101 | } else if (request->fmt == FMT_BGR888) { |
1102 | src_canvas_w = |
1103 | ((request->src_w + 31) >> 5) << 5; |
1104 | canvas_config(ENC_CANVAS_OFFSET + 9, |
1105 | src_addr, |
1106 | src_canvas_w * 3, src_h, |
1107 | CANVAS_ADDR_NOWRAP, |
1108 | CANVAS_BLKMODE_LINEAR); |
1109 | src_canvas = ENC_CANVAS_OFFSET + 9; |
1110 | input_format = GE2D_FORMAT_S24_RGB; //Opposite color after ge2d |
1111 | } else { |
1112 | src_canvas_w = |
1113 | ((request->src_w + 63) >> 6) << 6; |
1114 | canvas_config(ENC_CANVAS_OFFSET + 9, |
1115 | src_addr, |
1116 | src_canvas_w, src_h, |
1117 | CANVAS_ADDR_NOWRAP, |
1118 | CANVAS_BLKMODE_LINEAR); |
1119 | canvas_config(ENC_CANVAS_OFFSET + 10, |
1120 | src_addr + src_canvas_w * src_h, |
1121 | src_canvas_w / 2, src_h / 2, |
1122 | CANVAS_ADDR_NOWRAP, |
1123 | CANVAS_BLKMODE_LINEAR); |
1124 | canvas_config(ENC_CANVAS_OFFSET + 11, |
1125 | src_addr + src_canvas_w * src_h * 5 / 4, |
1126 | src_canvas_w / 2, src_h / 2, |
1127 | CANVAS_ADDR_NOWRAP, |
1128 | CANVAS_BLKMODE_LINEAR); |
1129 | src_canvas = |
1130 | ((ENC_CANVAS_OFFSET + 11) << 16) | |
1131 | ((ENC_CANVAS_OFFSET + 10) << 8) | |
1132 | (ENC_CANVAS_OFFSET + 9); |
1133 | input_format = GE2D_FORMAT_M24_YUV420; |
1134 | } |
1135 | } |
1136 | dst_canvas_w = ((dst_w + 31) >> 5) << 5; |
1137 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1138 | wq->mem.scaler_buff_start_addr, |
1139 | dst_canvas_w, dst_h, |
1140 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
1141 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1142 | wq->mem.scaler_buff_start_addr + dst_canvas_w * dst_h, |
1143 | dst_canvas_w, dst_h / 2, |
1144 | CANVAS_ADDR_NOWRAP, CANVAS_BLKMODE_LINEAR); |
1145 | dst_canvas = ((ENC_CANVAS_OFFSET + 7) << 8) | |
1146 | (ENC_CANVAS_OFFSET + 6); |
1147 | ge2d_config->alu_const_color = 0; |
1148 | ge2d_config->bitmask_en = 0; |
1149 | ge2d_config->src1_gb_alpha = 0; |
1150 | ge2d_config->dst_xy_swap = 0; |
1151 | canvas_read(src_canvas & 0xff, &cs0); |
1152 | canvas_read((src_canvas >> 8) & 0xff, &cs1); |
1153 | canvas_read((src_canvas >> 16) & 0xff, &cs2); |
1154 | ge2d_config->src_planes[0].addr = cs0.addr; |
1155 | ge2d_config->src_planes[0].w = cs0.width; |
1156 | ge2d_config->src_planes[0].h = cs0.height; |
1157 | ge2d_config->src_planes[1].addr = cs1.addr; |
1158 | ge2d_config->src_planes[1].w = cs1.width; |
1159 | ge2d_config->src_planes[1].h = cs1.height; |
1160 | ge2d_config->src_planes[2].addr = cs2.addr; |
1161 | ge2d_config->src_planes[2].w = cs2.width; |
1162 | ge2d_config->src_planes[2].h = cs2.height; |
1163 | canvas_read(dst_canvas & 0xff, &cd); |
1164 | ge2d_config->dst_planes[0].addr = cd.addr; |
1165 | ge2d_config->dst_planes[0].w = cd.width; |
1166 | ge2d_config->dst_planes[0].h = cd.height; |
1167 | ge2d_config->src_key.key_enable = 0; |
1168 | ge2d_config->src_key.key_mask = 0; |
1169 | ge2d_config->src_key.key_mode = 0; |
1170 | ge2d_config->src_para.canvas_index = src_canvas; |
1171 | ge2d_config->src_para.mem_type = CANVAS_TYPE_INVALID; |
1172 | ge2d_config->src_para.format = input_format | GE2D_LITTLE_ENDIAN; |
1173 | ge2d_config->src_para.fill_color_en = 0; |
1174 | ge2d_config->src_para.fill_mode = 0; |
1175 | ge2d_config->src_para.x_rev = 0; |
1176 | ge2d_config->src_para.y_rev = 0; |
1177 | ge2d_config->src_para.color = 0xffffffff; |
1178 | ge2d_config->src_para.top = 0; |
1179 | ge2d_config->src_para.left = 0; |
1180 | ge2d_config->src_para.width = request->src_w; |
1181 | ge2d_config->src_para.height = request->src_h; |
1182 | ge2d_config->src2_para.mem_type = CANVAS_TYPE_INVALID; |
1183 | ge2d_config->dst_para.canvas_index = dst_canvas; |
1184 | ge2d_config->dst_para.mem_type = CANVAS_TYPE_INVALID; |
1185 | ge2d_config->dst_para.format = |
1186 | GE2D_FORMAT_M24_NV21 | GE2D_LITTLE_ENDIAN; |
1187 | ge2d_config->dst_para.fill_color_en = 0; |
1188 | ge2d_config->dst_para.fill_mode = 0; |
1189 | ge2d_config->dst_para.x_rev = 0; |
1190 | ge2d_config->dst_para.y_rev = 0; |
1191 | ge2d_config->dst_para.color = 0; |
1192 | ge2d_config->dst_para.top = 0; |
1193 | ge2d_config->dst_para.left = 0; |
1194 | ge2d_config->dst_para.width = dst_w; |
1195 | ge2d_config->dst_para.height = dst_h; |
1196 | ge2d_config->dst_para.x_rev = 0; |
1197 | ge2d_config->dst_para.y_rev = 0; |
1198 | |
1199 | if (ge2d_context_config_ex(context, ge2d_config) < 0) { |
1200 | pr_err("++ge2d configing error.\n"); |
1201 | return -1; |
1202 | } |
1203 | stretchblt_noalpha(context, src_left, src_top, src_width, src_height, |
1204 | 0, 0, wq->pic.encoder_width, wq->pic.encoder_height); |
1205 | return dst_canvas_w*dst_h * 3 / 2; |
1206 | } |
1207 | #endif |
1208 | |
1209 | static s32 set_input_format(struct encode_wq_s *wq, |
1210 | struct encode_request_s *request) |
1211 | { |
1212 | s32 ret = 0; |
1213 | u8 iformat = MAX_FRAME_FMT, oformat = MAX_FRAME_FMT, r2y_en = 0; |
1214 | u32 picsize_x, picsize_y, src_addr; |
1215 | u32 canvas_w = 0; |
1216 | u32 input = request->src; |
1217 | u32 input_y = 0; |
1218 | u32 input_u = 0; |
1219 | u32 input_v = 0; |
1220 | u8 ifmt_extra = 0; |
1221 | |
1222 | if ((request->fmt == FMT_RGB565) || (request->fmt >= MAX_FRAME_FMT)) |
1223 | return -1; |
1224 | |
1225 | picsize_x = ((wq->pic.encoder_width + 15) >> 4) << 4; |
1226 | picsize_y = ((wq->pic.encoder_height + 15) >> 4) << 4; |
1227 | oformat = 0; |
1228 | if ((request->type == LOCAL_BUFF) |
1229 | || (request->type == PHYSICAL_BUFF) |
1230 | || (request->type == DMA_BUFF)) { |
1231 | if ((request->type == LOCAL_BUFF) && |
1232 | (request->flush_flag & AMVENC_FLUSH_FLAG_INPUT)) |
1233 | dma_flush(wq->mem.dct_buff_start_addr, |
1234 | request->framesize); |
1235 | if (request->type == LOCAL_BUFF) { |
1236 | input = wq->mem.dct_buff_start_addr; |
1237 | src_addr = |
1238 | wq->mem.dct_buff_start_addr; |
1239 | } else if (request->type == DMA_BUFF) { |
1240 | if (request->plane_num == 3) { |
1241 | input_y = (unsigned long)request->dma_cfg[0].paddr; |
1242 | input_u = (unsigned long)request->dma_cfg[1].paddr; |
1243 | input_v = (unsigned long)request->dma_cfg[2].paddr; |
1244 | } else if (request->plane_num == 2) { |
1245 | input_y = (unsigned long)request->dma_cfg[0].paddr; |
1246 | input_u = (unsigned long)request->dma_cfg[1].paddr; |
1247 | input_v = input_u; |
1248 | } else if (request->plane_num == 1) { |
1249 | input_y = (unsigned long)request->dma_cfg[0].paddr; |
1250 | if (request->fmt == FMT_NV21 |
1251 | || request->fmt == FMT_NV12) { |
1252 | input_u = input_y + picsize_x * picsize_y; |
1253 | input_v = input_u; |
1254 | } |
1255 | if (request->fmt == FMT_YUV420) { |
1256 | input_u = input_y + picsize_x * picsize_y; |
1257 | input_v = input_u + picsize_x * picsize_y / 4; |
1258 | } |
1259 | } |
1260 | src_addr = input_y; |
1261 | picsize_y = wq->pic.encoder_height; |
1262 | enc_pr(LOG_INFO, "dma addr[0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx]\n", |
1263 | (unsigned long)request->dma_cfg[0].vaddr, |
1264 | (unsigned long)request->dma_cfg[0].paddr, |
1265 | (unsigned long)request->dma_cfg[1].vaddr, |
1266 | (unsigned long)request->dma_cfg[1].paddr, |
1267 | (unsigned long)request->dma_cfg[2].vaddr, |
1268 | (unsigned long)request->dma_cfg[2].paddr); |
1269 | } else { |
1270 | src_addr = input; |
1271 | picsize_y = wq->pic.encoder_height; |
1272 | } |
1273 | if (request->scale_enable) { |
1274 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
1275 | struct config_para_ex_s ge2d_config; |
1276 | |
1277 | memset(&ge2d_config, 0, |
1278 | sizeof(struct config_para_ex_s)); |
1279 | scale_frame( |
1280 | wq, request, |
1281 | &ge2d_config, |
1282 | src_addr, |
1283 | false); |
1284 | iformat = 2; |
1285 | r2y_en = 0; |
1286 | input = ((ENC_CANVAS_OFFSET + 7) << 8) | |
1287 | (ENC_CANVAS_OFFSET + 6); |
1288 | ret = 0; |
1289 | goto MFDIN; |
1290 | #else |
1291 | enc_pr(LOG_ERROR, |
1292 | "Warning: need enable ge2d for scale frame!\n"); |
1293 | return -1; |
1294 | #endif |
1295 | } |
1296 | if ((request->fmt <= FMT_YUV444_PLANE) || |
1297 | (request->fmt >= FMT_YUV422_12BIT)) |
1298 | r2y_en = 0; |
1299 | else |
1300 | r2y_en = 1; |
1301 | |
1302 | if (request->fmt >= FMT_YUV422_12BIT) { |
1303 | iformat = 7; |
1304 | ifmt_extra = request->fmt - FMT_YUV422_12BIT; |
1305 | if (request->fmt == FMT_YUV422_12BIT) |
1306 | canvas_w = picsize_x * 24 / 8; |
1307 | else if (request->fmt == FMT_YUV444_10BIT) |
1308 | canvas_w = picsize_x * 32 / 8; |
1309 | else |
1310 | canvas_w = (picsize_x * 20 + 7) / 8; |
1311 | canvas_w = ((canvas_w + 31) >> 5) << 5; |
1312 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1313 | input, |
1314 | canvas_w, picsize_y, |
1315 | CANVAS_ADDR_NOWRAP, |
1316 | CANVAS_BLKMODE_LINEAR); |
1317 | input = ENC_CANVAS_OFFSET + 6; |
1318 | input = input & 0xff; |
1319 | } else if (request->fmt == FMT_YUV422_SINGLE) |
1320 | iformat = 10; |
1321 | else if ((request->fmt == FMT_YUV444_SINGLE) |
1322 | || (request->fmt == FMT_RGB888)) { |
1323 | iformat = 1; |
1324 | if (request->fmt == FMT_RGB888) |
1325 | r2y_en = 1; |
1326 | canvas_w = picsize_x * 3; |
1327 | canvas_w = ((canvas_w + 31) >> 5) << 5; |
1328 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1329 | input, |
1330 | canvas_w, picsize_y, |
1331 | CANVAS_ADDR_NOWRAP, |
1332 | CANVAS_BLKMODE_LINEAR); |
1333 | input = ENC_CANVAS_OFFSET + 6; |
1334 | } else if ((request->fmt == FMT_NV21) |
1335 | || (request->fmt == FMT_NV12)) { |
1336 | canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; |
1337 | iformat = (request->fmt == FMT_NV21) ? 2 : 3; |
1338 | if (request->type == DMA_BUFF) { |
1339 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1340 | input_y, |
1341 | canvas_w, picsize_y, |
1342 | CANVAS_ADDR_NOWRAP, |
1343 | CANVAS_BLKMODE_LINEAR); |
1344 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1345 | input_u, |
1346 | canvas_w, picsize_y / 2, |
1347 | CANVAS_ADDR_NOWRAP, |
1348 | CANVAS_BLKMODE_LINEAR); |
1349 | } else { |
1350 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1351 | input, |
1352 | canvas_w, picsize_y, |
1353 | CANVAS_ADDR_NOWRAP, |
1354 | CANVAS_BLKMODE_LINEAR); |
1355 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1356 | input + canvas_w * picsize_y, |
1357 | canvas_w, picsize_y / 2, |
1358 | CANVAS_ADDR_NOWRAP, |
1359 | CANVAS_BLKMODE_LINEAR); |
1360 | } |
1361 | input = ((ENC_CANVAS_OFFSET + 7) << 8) | |
1362 | (ENC_CANVAS_OFFSET + 6); |
1363 | } else if (request->fmt == FMT_YUV420) { |
1364 | iformat = 4; |
1365 | canvas_w = ((wq->pic.encoder_width + 63) >> 6) << 6; |
1366 | if (request->type == DMA_BUFF) { |
1367 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1368 | input_y, |
1369 | canvas_w, picsize_y, |
1370 | CANVAS_ADDR_NOWRAP, |
1371 | CANVAS_BLKMODE_LINEAR); |
1372 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1373 | input_u, |
1374 | canvas_w / 2, picsize_y / 2, |
1375 | CANVAS_ADDR_NOWRAP, |
1376 | CANVAS_BLKMODE_LINEAR); |
1377 | canvas_config(ENC_CANVAS_OFFSET + 8, |
1378 | input_v, |
1379 | canvas_w / 2, picsize_y / 2, |
1380 | CANVAS_ADDR_NOWRAP, |
1381 | CANVAS_BLKMODE_LINEAR); |
1382 | } else { |
1383 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1384 | input, |
1385 | canvas_w, picsize_y, |
1386 | CANVAS_ADDR_NOWRAP, |
1387 | CANVAS_BLKMODE_LINEAR); |
1388 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1389 | input + canvas_w * picsize_y, |
1390 | canvas_w / 2, picsize_y / 2, |
1391 | CANVAS_ADDR_NOWRAP, |
1392 | CANVAS_BLKMODE_LINEAR); |
1393 | canvas_config(ENC_CANVAS_OFFSET + 8, |
1394 | input + canvas_w * picsize_y * 5 / 4, |
1395 | canvas_w / 2, picsize_y / 2, |
1396 | CANVAS_ADDR_NOWRAP, |
1397 | CANVAS_BLKMODE_LINEAR); |
1398 | |
1399 | } |
1400 | input = ((ENC_CANVAS_OFFSET + 8) << 16) | |
1401 | ((ENC_CANVAS_OFFSET + 7) << 8) | |
1402 | (ENC_CANVAS_OFFSET + 6); |
1403 | } else if ((request->fmt == FMT_YUV444_PLANE) |
1404 | || (request->fmt == FMT_RGB888_PLANE)) { |
1405 | if (request->fmt == FMT_RGB888_PLANE) |
1406 | r2y_en = 1; |
1407 | iformat = 5; |
1408 | canvas_w = ((wq->pic.encoder_width + 31) >> 5) << 5; |
1409 | canvas_config(ENC_CANVAS_OFFSET + 6, |
1410 | input, |
1411 | canvas_w, picsize_y, |
1412 | CANVAS_ADDR_NOWRAP, |
1413 | CANVAS_BLKMODE_LINEAR); |
1414 | canvas_config(ENC_CANVAS_OFFSET + 7, |
1415 | input + canvas_w * picsize_y, |
1416 | canvas_w, picsize_y, |
1417 | CANVAS_ADDR_NOWRAP, |
1418 | CANVAS_BLKMODE_LINEAR); |
1419 | canvas_config(ENC_CANVAS_OFFSET + 8, |
1420 | input + canvas_w * picsize_y * 2, |
1421 | canvas_w, picsize_y, |
1422 | CANVAS_ADDR_NOWRAP, |
1423 | CANVAS_BLKMODE_LINEAR); |
1424 | input = ((ENC_CANVAS_OFFSET + 8) << 16) | |
1425 | ((ENC_CANVAS_OFFSET + 7) << 8) | |
1426 | (ENC_CANVAS_OFFSET + 6); |
1427 | } else if (request->fmt == FMT_RGBA8888) { |
1428 | r2y_en = 1; |
1429 | iformat = 12; |
1430 | } |
1431 | ret = 0; |
1432 | } else if (request->type == CANVAS_BUFF) { |
1433 | r2y_en = 0; |
1434 | if (request->scale_enable) { |
1435 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
1436 | struct config_para_ex_s ge2d_config; |
1437 | memset(&ge2d_config, 0, |
1438 | sizeof(struct config_para_ex_s)); |
1439 | scale_frame( |
1440 | wq, request, |
1441 | &ge2d_config, |
1442 | input, true); |
1443 | iformat = 2; |
1444 | r2y_en = 0; |
1445 | input = ((ENC_CANVAS_OFFSET + 7) << 8) | |
1446 | (ENC_CANVAS_OFFSET + 6); |
1447 | ret = 0; |
1448 | goto MFDIN; |
1449 | #else |
1450 | enc_pr(LOG_ERROR, |
1451 | "Warning: need enable ge2d for scale frame!\n"); |
1452 | return -1; |
1453 | #endif |
1454 | } |
1455 | if (request->fmt == FMT_YUV422_SINGLE) { |
1456 | iformat = 0; |
1457 | input = input & 0xff; |
1458 | } else if (request->fmt == FMT_YUV444_SINGLE) { |
1459 | iformat = 1; |
1460 | input = input & 0xff; |
1461 | } else if ((request->fmt == FMT_NV21) |
1462 | || (request->fmt == FMT_NV12)) { |
1463 | iformat = (request->fmt == FMT_NV21) ? 2 : 3; |
1464 | input = input & 0xffff; |
1465 | } else if (request->fmt == FMT_YUV420) { |
1466 | iformat = 4; |
1467 | input = input & 0xffffff; |
1468 | } else if ((request->fmt == FMT_YUV444_PLANE) |
1469 | || (request->fmt == FMT_RGB888_PLANE)) { |
1470 | if (request->fmt == FMT_RGB888_PLANE) |
1471 | r2y_en = 1; |
1472 | iformat = 5; |
1473 | input = input & 0xffffff; |
1474 | } else if ((request->fmt == FMT_YUV422_12BIT) |
1475 | || (request->fmt == FMT_YUV444_10BIT) |
1476 | || (request->fmt == FMT_YUV422_10BIT)) { |
1477 | iformat = 7; |
1478 | ifmt_extra = request->fmt - FMT_YUV422_12BIT; |
1479 | input = input & 0xff; |
1480 | } else |
1481 | ret = -1; |
1482 | } |
1483 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
1484 | MFDIN: |
1485 | #endif |
1486 | if (ret == 0) |
1487 | mfdin_basic(input, iformat, oformat, |
1488 | picsize_x, picsize_y, r2y_en, |
1489 | request->nr_mode, ifmt_extra); |
1490 | return ret; |
1491 | } |
1492 | |
1493 | #ifdef H264_ENC_CBR |
1494 | static void ConvertTable2Risc(void *table, u32 len) |
1495 | { |
1496 | u32 i, j; |
1497 | u16 temp; |
1498 | u16 *tbl = (u16 *)table; |
1499 | |
1500 | if ((len < 8) || (len % 8) || (!table)) { |
1501 | enc_pr(LOG_ERROR, "ConvertTable2Risc tbl %p, len %d error\n", |
1502 | table, len); |
1503 | return; |
1504 | } |
1505 | for (i = 0; i < len / 8; i++) { |
1506 | j = i << 2; |
1507 | temp = tbl[j]; |
1508 | tbl[j] = tbl[j + 3]; |
1509 | tbl[j + 3] = temp; |
1510 | |
1511 | temp = tbl[j + 1]; |
1512 | tbl[j + 1] = tbl[j + 2]; |
1513 | tbl[j + 2] = temp; |
1514 | } |
1515 | |
1516 | } |
1517 | #endif |
1518 | |
1519 | static void avc_prot_init(struct encode_wq_s *wq, |
1520 | struct encode_request_s *request, u32 quant, bool IDR) |
1521 | { |
1522 | u32 data32; |
1523 | u32 pic_width, pic_height; |
1524 | u32 pic_mb_nr; |
1525 | u32 pic_mbx, pic_mby; |
1526 | u32 i_pic_qp, p_pic_qp; |
1527 | u32 i_pic_qp_c, p_pic_qp_c; |
1528 | u32 pic_width_in_mb; |
1529 | u32 slice_qp; |
1530 | |
1531 | pic_width = wq->pic.encoder_width; |
1532 | pic_height = wq->pic.encoder_height; |
1533 | pic_mb_nr = 0; |
1534 | pic_mbx = 0; |
1535 | pic_mby = 0; |
1536 | i_pic_qp = quant; |
1537 | p_pic_qp = quant; |
1538 | |
1539 | pic_width_in_mb = (pic_width + 15) / 16; |
1540 | WRITE_HREG(HCODEC_HDEC_MC_OMEM_AUTO, |
1541 | (1 << 31) | /* use_omem_mb_xy */ |
1542 | ((pic_width_in_mb - 1) << 16)); /* omem_max_mb_x */ |
1543 | |
1544 | WRITE_HREG(HCODEC_VLC_ADV_CONFIG, |
1545 | /* early_mix_mc_hcmd -- will enable in P Picture */ |
1546 | (0 << 10) | |
1547 | (1 << 9) | /* update_top_left_mix */ |
1548 | (1 << 8) | /* p_top_left_mix */ |
1549 | /* mv_cal_mixed_type -- will enable in P Picture */ |
1550 | (0 << 7) | |
1551 | /* mc_hcmd_mixed_type -- will enable in P Picture */ |
1552 | (0 << 6) | |
1553 | (1 << 5) | /* use_separate_int_control */ |
1554 | (1 << 4) | /* hcmd_intra_use_q_info */ |
1555 | (1 << 3) | /* hcmd_left_use_prev_info */ |
1556 | (1 << 2) | /* hcmd_use_q_info */ |
1557 | (1 << 1) | /* use_q_delta_quant */ |
1558 | /* detect_I16_from_I4 use qdct detected mb_type */ |
1559 | (0 << 0)); |
1560 | |
1561 | WRITE_HREG(HCODEC_QDCT_ADV_CONFIG, |
1562 | (1 << 29) | /* mb_info_latch_no_I16_pred_mode */ |
1563 | (1 << 28) | /* ie_dma_mbxy_use_i_pred */ |
1564 | (1 << 27) | /* ie_dma_read_write_use_ip_idx */ |
1565 | (1 << 26) | /* ie_start_use_top_dma_count */ |
1566 | (1 << 25) | /* i_pred_top_dma_rd_mbbot */ |
1567 | (1 << 24) | /* i_pred_top_dma_wr_disable */ |
1568 | /* i_pred_mix -- will enable in P Picture */ |
1569 | (0 << 23) | |
1570 | (1 << 22) | /* me_ab_rd_when_intra_in_p */ |
1571 | (1 << 21) | /* force_mb_skip_run_when_intra */ |
1572 | /* mc_out_mixed_type -- will enable in P Picture */ |
1573 | (0 << 20) | |
1574 | (1 << 19) | /* ie_start_when_quant_not_full */ |
1575 | (1 << 18) | /* mb_info_state_mix */ |
1576 | /* mb_type_use_mix_result -- will enable in P Picture */ |
1577 | (0 << 17) | |
1578 | /* me_cb_ie_read_enable -- will enable in P Picture */ |
1579 | (0 << 16) | |
1580 | /* ie_cur_data_from_me -- will enable in P Picture */ |
1581 | (0 << 15) | |
1582 | (1 << 14) | /* rem_per_use_table */ |
1583 | (0 << 13) | /* q_latch_int_enable */ |
1584 | (1 << 12) | /* q_use_table */ |
1585 | (0 << 11) | /* q_start_wait */ |
1586 | (1 << 10) | /* LUMA_16_LEFT_use_cur */ |
1587 | (1 << 9) | /* DC_16_LEFT_SUM_use_cur */ |
1588 | (1 << 8) | /* c_ref_ie_sel_cur */ |
1589 | (0 << 7) | /* c_ipred_perfect_mode */ |
1590 | (1 << 6) | /* ref_ie_ul_sel */ |
1591 | (1 << 5) | /* mb_type_use_ie_result */ |
1592 | (1 << 4) | /* detect_I16_from_I4 */ |
1593 | (1 << 3) | /* ie_not_wait_ref_busy */ |
1594 | (1 << 2) | /* ie_I16_enable */ |
1595 | (3 << 0)); /* ie_done_sel // fastest when waiting */ |
1596 | |
1597 | if (request != NULL) { |
1598 | WRITE_HREG(HCODEC_IE_WEIGHT, |
1599 | (request->i16_weight << 16) | |
1600 | (request->i4_weight << 0)); |
1601 | WRITE_HREG(HCODEC_ME_WEIGHT, |
1602 | (request->me_weight << 0)); |
1603 | WRITE_HREG(HCODEC_SAD_CONTROL_0, |
1604 | /* ie_sad_offset_I16 */ |
1605 | (request->i16_weight << 16) | |
1606 | /* ie_sad_offset_I4 */ |
1607 | (request->i4_weight << 0)); |
1608 | WRITE_HREG(HCODEC_SAD_CONTROL_1, |
1609 | /* ie_sad_shift_I16 */ |
1610 | (IE_SAD_SHIFT_I16 << 24) | |
1611 | /* ie_sad_shift_I4 */ |
1612 | (IE_SAD_SHIFT_I4 << 20) | |
1613 | /* me_sad_shift_INTER */ |
1614 | (ME_SAD_SHIFT_INTER << 16) | |
1615 | /* me_sad_offset_INTER */ |
1616 | (request->me_weight << 0)); |
1617 | wq->me_weight = request->me_weight; |
1618 | wq->i4_weight = request->i4_weight; |
1619 | wq->i16_weight = request->i16_weight; |
1620 | } else { |
1621 | WRITE_HREG(HCODEC_IE_WEIGHT, |
1622 | (I16MB_WEIGHT_OFFSET << 16) | |
1623 | (I4MB_WEIGHT_OFFSET << 0)); |
1624 | WRITE_HREG(HCODEC_ME_WEIGHT, |
1625 | (ME_WEIGHT_OFFSET << 0)); |
1626 | WRITE_HREG(HCODEC_SAD_CONTROL_0, |
1627 | /* ie_sad_offset_I16 */ |
1628 | (I16MB_WEIGHT_OFFSET << 16) | |
1629 | /* ie_sad_offset_I4 */ |
1630 | (I4MB_WEIGHT_OFFSET << 0)); |
1631 | WRITE_HREG(HCODEC_SAD_CONTROL_1, |
1632 | /* ie_sad_shift_I16 */ |
1633 | (IE_SAD_SHIFT_I16 << 24) | |
1634 | /* ie_sad_shift_I4 */ |
1635 | (IE_SAD_SHIFT_I4 << 20) | |
1636 | /* me_sad_shift_INTER */ |
1637 | (ME_SAD_SHIFT_INTER << 16) | |
1638 | /* me_sad_offset_INTER */ |
1639 | (ME_WEIGHT_OFFSET << 0)); |
1640 | } |
1641 | |
1642 | WRITE_HREG(HCODEC_ADV_MV_CTL0, |
1643 | (ADV_MV_LARGE_16x8 << 31) | |
1644 | (ADV_MV_LARGE_8x16 << 30) | |
1645 | (ADV_MV_8x8_WEIGHT << 16) | /* adv_mv_8x8_weight */ |
1646 | /* adv_mv_4x4x4_weight should be set bigger */ |
1647 | (ADV_MV_4x4x4_WEIGHT << 0)); |
1648 | WRITE_HREG(HCODEC_ADV_MV_CTL1, |
1649 | /* adv_mv_16x16_weight */ |
1650 | (ADV_MV_16x16_WEIGHT << 16) | |
1651 | (ADV_MV_LARGE_16x16 << 15) | |
1652 | (ADV_MV_16_8_WEIGHT << 0)); /* adv_mv_16_8_weight */ |
1653 | |
1654 | hcodec_prog_qtbl(wq); |
1655 | if (IDR) { |
1656 | i_pic_qp = |
1657 | wq->quant_tbl_i4[0] & 0xff; |
1658 | i_pic_qp += |
1659 | wq->quant_tbl_i16[0] & 0xff; |
1660 | i_pic_qp /= 2; |
1661 | p_pic_qp = i_pic_qp; |
1662 | } else { |
1663 | i_pic_qp = |
1664 | wq->quant_tbl_i4[0] & 0xff; |
1665 | i_pic_qp += |
1666 | wq->quant_tbl_i16[0] & 0xff; |
1667 | p_pic_qp = wq->quant_tbl_me[0] & 0xff; |
1668 | slice_qp = (i_pic_qp + p_pic_qp) / 3; |
1669 | i_pic_qp = slice_qp; |
1670 | p_pic_qp = i_pic_qp; |
1671 | } |
1672 | #ifdef H264_ENC_CBR |
1673 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
1674 | data32 = READ_HREG(HCODEC_SAD_CONTROL_1); |
1675 | data32 = data32 & 0xffff; /* remove sad shift */ |
1676 | WRITE_HREG(HCODEC_SAD_CONTROL_1, data32); |
1677 | WRITE_HREG(H264_ENC_CBR_TABLE_ADDR, |
1678 | wq->mem.cbr_info_ddr_start_addr); |
1679 | WRITE_HREG(H264_ENC_CBR_MB_SIZE_ADDR, |
1680 | wq->mem.cbr_info_ddr_start_addr |
1681 | + CBR_TABLE_SIZE); |
1682 | WRITE_HREG(H264_ENC_CBR_CTL, |
1683 | (wq->cbr_info.start_tbl_id << 28) | |
1684 | (wq->cbr_info.short_shift << 24) | |
1685 | (wq->cbr_info.long_mb_num << 16) | |
1686 | (wq->cbr_info.long_th << 0)); |
1687 | WRITE_HREG(H264_ENC_CBR_REGION_SIZE, |
1688 | (wq->cbr_info.block_w << 16) | |
1689 | (wq->cbr_info.block_h << 0)); |
1690 | } |
1691 | #endif |
1692 | WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_0, |
1693 | (0 << 19) | /* vlc_delta_quant_1 */ |
1694 | (i_pic_qp << 13) | /* vlc_quant_1 */ |
1695 | (0 << 6) | /* vlc_delta_quant_0 */ |
1696 | (i_pic_qp << 0)); /* vlc_quant_0 */ |
1697 | WRITE_HREG(HCODEC_QDCT_VLC_QUANT_CTL_1, |
1698 | (14 << 6) | /* vlc_max_delta_q_neg */ |
1699 | (13 << 0)); /* vlc_max_delta_q_pos */ |
1700 | WRITE_HREG(HCODEC_VLC_PIC_SIZE, |
1701 | pic_width | (pic_height << 16)); |
1702 | WRITE_HREG(HCODEC_VLC_PIC_POSITION, |
1703 | (pic_mb_nr << 16) | |
1704 | (pic_mby << 8) | |
1705 | (pic_mbx << 0)); |
1706 | |
1707 | /* synopsys parallel_case full_case */ |
1708 | switch (i_pic_qp) { |
1709 | case 0: |
1710 | i_pic_qp_c = 0; |
1711 | break; |
1712 | case 1: |
1713 | i_pic_qp_c = 1; |
1714 | break; |
1715 | case 2: |
1716 | i_pic_qp_c = 2; |
1717 | break; |
1718 | case 3: |
1719 | i_pic_qp_c = 3; |
1720 | break; |
1721 | case 4: |
1722 | i_pic_qp_c = 4; |
1723 | break; |
1724 | case 5: |
1725 | i_pic_qp_c = 5; |
1726 | break; |
1727 | case 6: |
1728 | i_pic_qp_c = 6; |
1729 | break; |
1730 | case 7: |
1731 | i_pic_qp_c = 7; |
1732 | break; |
1733 | case 8: |
1734 | i_pic_qp_c = 8; |
1735 | break; |
1736 | case 9: |
1737 | i_pic_qp_c = 9; |
1738 | break; |
1739 | case 10: |
1740 | i_pic_qp_c = 10; |
1741 | break; |
1742 | case 11: |
1743 | i_pic_qp_c = 11; |
1744 | break; |
1745 | case 12: |
1746 | i_pic_qp_c = 12; |
1747 | break; |
1748 | case 13: |
1749 | i_pic_qp_c = 13; |
1750 | break; |
1751 | case 14: |
1752 | i_pic_qp_c = 14; |
1753 | break; |
1754 | case 15: |
1755 | i_pic_qp_c = 15; |
1756 | break; |
1757 | case 16: |
1758 | i_pic_qp_c = 16; |
1759 | break; |
1760 | case 17: |
1761 | i_pic_qp_c = 17; |
1762 | break; |
1763 | case 18: |
1764 | i_pic_qp_c = 18; |
1765 | break; |
1766 | case 19: |
1767 | i_pic_qp_c = 19; |
1768 | break; |
1769 | case 20: |
1770 | i_pic_qp_c = 20; |
1771 | break; |
1772 | case 21: |
1773 | i_pic_qp_c = 21; |
1774 | break; |
1775 | case 22: |
1776 | i_pic_qp_c = 22; |
1777 | break; |
1778 | case 23: |
1779 | i_pic_qp_c = 23; |
1780 | break; |
1781 | case 24: |
1782 | i_pic_qp_c = 24; |
1783 | break; |
1784 | case 25: |
1785 | i_pic_qp_c = 25; |
1786 | break; |
1787 | case 26: |
1788 | i_pic_qp_c = 26; |
1789 | break; |
1790 | case 27: |
1791 | i_pic_qp_c = 27; |
1792 | break; |
1793 | case 28: |
1794 | i_pic_qp_c = 28; |
1795 | break; |
1796 | case 29: |
1797 | i_pic_qp_c = 29; |
1798 | break; |
1799 | case 30: |
1800 | i_pic_qp_c = 29; |
1801 | break; |
1802 | case 31: |
1803 | i_pic_qp_c = 30; |
1804 | break; |
1805 | case 32: |
1806 | i_pic_qp_c = 31; |
1807 | break; |
1808 | case 33: |
1809 | i_pic_qp_c = 32; |
1810 | break; |
1811 | case 34: |
1812 | i_pic_qp_c = 32; |
1813 | break; |
1814 | case 35: |
1815 | i_pic_qp_c = 33; |
1816 | break; |
1817 | case 36: |
1818 | i_pic_qp_c = 34; |
1819 | break; |
1820 | case 37: |
1821 | i_pic_qp_c = 34; |
1822 | break; |
1823 | case 38: |
1824 | i_pic_qp_c = 35; |
1825 | break; |
1826 | case 39: |
1827 | i_pic_qp_c = 35; |
1828 | break; |
1829 | case 40: |
1830 | i_pic_qp_c = 36; |
1831 | break; |
1832 | case 41: |
1833 | i_pic_qp_c = 36; |
1834 | break; |
1835 | case 42: |
1836 | i_pic_qp_c = 37; |
1837 | break; |
1838 | case 43: |
1839 | i_pic_qp_c = 37; |
1840 | break; |
1841 | case 44: |
1842 | i_pic_qp_c = 37; |
1843 | break; |
1844 | case 45: |
1845 | i_pic_qp_c = 38; |
1846 | break; |
1847 | case 46: |
1848 | i_pic_qp_c = 38; |
1849 | break; |
1850 | case 47: |
1851 | i_pic_qp_c = 38; |
1852 | break; |
1853 | case 48: |
1854 | i_pic_qp_c = 39; |
1855 | break; |
1856 | case 49: |
1857 | i_pic_qp_c = 39; |
1858 | break; |
1859 | case 50: |
1860 | i_pic_qp_c = 39; |
1861 | break; |
1862 | default: |
1863 | i_pic_qp_c = 39; |
1864 | break; |
1865 | } |
1866 | |
1867 | /* synopsys parallel_case full_case */ |
1868 | switch (p_pic_qp) { |
1869 | case 0: |
1870 | p_pic_qp_c = 0; |
1871 | break; |
1872 | case 1: |
1873 | p_pic_qp_c = 1; |
1874 | break; |
1875 | case 2: |
1876 | p_pic_qp_c = 2; |
1877 | break; |
1878 | case 3: |
1879 | p_pic_qp_c = 3; |
1880 | break; |
1881 | case 4: |
1882 | p_pic_qp_c = 4; |
1883 | break; |
1884 | case 5: |
1885 | p_pic_qp_c = 5; |
1886 | break; |
1887 | case 6: |
1888 | p_pic_qp_c = 6; |
1889 | break; |
1890 | case 7: |
1891 | p_pic_qp_c = 7; |
1892 | break; |
1893 | case 8: |
1894 | p_pic_qp_c = 8; |
1895 | break; |
1896 | case 9: |
1897 | p_pic_qp_c = 9; |
1898 | break; |
1899 | case 10: |
1900 | p_pic_qp_c = 10; |
1901 | break; |
1902 | case 11: |
1903 | p_pic_qp_c = 11; |
1904 | break; |
1905 | case 12: |
1906 | p_pic_qp_c = 12; |
1907 | break; |
1908 | case 13: |
1909 | p_pic_qp_c = 13; |
1910 | break; |
1911 | case 14: |
1912 | p_pic_qp_c = 14; |
1913 | break; |
1914 | case 15: |
1915 | p_pic_qp_c = 15; |
1916 | break; |
1917 | case 16: |
1918 | p_pic_qp_c = 16; |
1919 | break; |
1920 | case 17: |
1921 | p_pic_qp_c = 17; |
1922 | break; |
1923 | case 18: |
1924 | p_pic_qp_c = 18; |
1925 | break; |
1926 | case 19: |
1927 | p_pic_qp_c = 19; |
1928 | break; |
1929 | case 20: |
1930 | p_pic_qp_c = 20; |
1931 | break; |
1932 | case 21: |
1933 | p_pic_qp_c = 21; |
1934 | break; |
1935 | case 22: |
1936 | p_pic_qp_c = 22; |
1937 | break; |
1938 | case 23: |
1939 | p_pic_qp_c = 23; |
1940 | break; |
1941 | case 24: |
1942 | p_pic_qp_c = 24; |
1943 | break; |
1944 | case 25: |
1945 | p_pic_qp_c = 25; |
1946 | break; |
1947 | case 26: |
1948 | p_pic_qp_c = 26; |
1949 | break; |
1950 | case 27: |
1951 | p_pic_qp_c = 27; |
1952 | break; |
1953 | case 28: |
1954 | p_pic_qp_c = 28; |
1955 | break; |
1956 | case 29: |
1957 | p_pic_qp_c = 29; |
1958 | break; |
1959 | case 30: |
1960 | p_pic_qp_c = 29; |
1961 | break; |
1962 | case 31: |
1963 | p_pic_qp_c = 30; |
1964 | break; |
1965 | case 32: |
1966 | p_pic_qp_c = 31; |
1967 | break; |
1968 | case 33: |
1969 | p_pic_qp_c = 32; |
1970 | break; |
1971 | case 34: |
1972 | p_pic_qp_c = 32; |
1973 | break; |
1974 | case 35: |
1975 | p_pic_qp_c = 33; |
1976 | break; |
1977 | case 36: |
1978 | p_pic_qp_c = 34; |
1979 | break; |
1980 | case 37: |
1981 | p_pic_qp_c = 34; |
1982 | break; |
1983 | case 38: |
1984 | p_pic_qp_c = 35; |
1985 | break; |
1986 | case 39: |
1987 | p_pic_qp_c = 35; |
1988 | break; |
1989 | case 40: |
1990 | p_pic_qp_c = 36; |
1991 | break; |
1992 | case 41: |
1993 | p_pic_qp_c = 36; |
1994 | break; |
1995 | case 42: |
1996 | p_pic_qp_c = 37; |
1997 | break; |
1998 | case 43: |
1999 | p_pic_qp_c = 37; |
2000 | break; |
2001 | case 44: |
2002 | p_pic_qp_c = 37; |
2003 | break; |
2004 | case 45: |
2005 | p_pic_qp_c = 38; |
2006 | break; |
2007 | case 46: |
2008 | p_pic_qp_c = 38; |
2009 | break; |
2010 | case 47: |
2011 | p_pic_qp_c = 38; |
2012 | break; |
2013 | case 48: |
2014 | p_pic_qp_c = 39; |
2015 | break; |
2016 | case 49: |
2017 | p_pic_qp_c = 39; |
2018 | break; |
2019 | case 50: |
2020 | p_pic_qp_c = 39; |
2021 | break; |
2022 | default: |
2023 | p_pic_qp_c = 39; |
2024 | break; |
2025 | } |
2026 | WRITE_HREG(HCODEC_QDCT_Q_QUANT_I, |
2027 | (i_pic_qp_c << 22) | |
2028 | (i_pic_qp << 16) | |
2029 | ((i_pic_qp_c % 6) << 12) | |
2030 | ((i_pic_qp_c / 6) << 8) | |
2031 | ((i_pic_qp % 6) << 4) | |
2032 | ((i_pic_qp / 6) << 0)); |
2033 | |
2034 | WRITE_HREG(HCODEC_QDCT_Q_QUANT_P, |
2035 | (p_pic_qp_c << 22) | |
2036 | (p_pic_qp << 16) | |
2037 | ((p_pic_qp_c % 6) << 12) | |
2038 | ((p_pic_qp_c / 6) << 8) | |
2039 | ((p_pic_qp % 6) << 4) | |
2040 | ((p_pic_qp / 6) << 0)); |
2041 | |
2042 | #ifdef ENABLE_IGNORE_FUNCTION |
2043 | WRITE_HREG(HCODEC_IGNORE_CONFIG, |
2044 | (1 << 31) | /* ignore_lac_coeff_en */ |
2045 | (1 << 26) | /* ignore_lac_coeff_else (<1) */ |
2046 | (1 << 21) | /* ignore_lac_coeff_2 (<1) */ |
2047 | (2 << 16) | /* ignore_lac_coeff_1 (<2) */ |
2048 | (1 << 15) | /* ignore_cac_coeff_en */ |
2049 | (1 << 10) | /* ignore_cac_coeff_else (<1) */ |
2050 | (1 << 5) | /* ignore_cac_coeff_2 (<1) */ |
2051 | (3 << 0)); /* ignore_cac_coeff_1 (<2) */ |
2052 | |
2053 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) |
2054 | WRITE_HREG(HCODEC_IGNORE_CONFIG_2, |
2055 | (1 << 31) | /* ignore_t_lac_coeff_en */ |
2056 | (1 << 26) | /* ignore_t_lac_coeff_else (<1) */ |
2057 | (2 << 21) | /* ignore_t_lac_coeff_2 (<2) */ |
2058 | (6 << 16) | /* ignore_t_lac_coeff_1 (<6) */ |
2059 | (1<<15) | /* ignore_cdc_coeff_en */ |
2060 | (0<<14) | /* ignore_t_lac_coeff_else_le_3 */ |
2061 | (1<<13) | /* ignore_t_lac_coeff_else_le_4 */ |
2062 | (1<<12) | /* ignore_cdc_only_when_empty_cac_inter */ |
2063 | (1<<11) | /* ignore_cdc_only_when_one_empty_inter */ |
2064 | /* ignore_cdc_range_max_inter 0-0, 1-1, 2-2, 3-3 */ |
2065 | (2<<9) | |
2066 | /* ignore_cdc_abs_max_inter 0-1, 1-2, 2-3, 3-4 */ |
2067 | (0<<7) | |
2068 | /* ignore_cdc_only_when_empty_cac_intra */ |
2069 | (1<<5) | |
2070 | /* ignore_cdc_only_when_one_empty_intra */ |
2071 | (1<<4) | |
2072 | /* ignore_cdc_range_max_intra 0-0, 1-1, 2-2, 3-3 */ |
2073 | (1<<2) | |
2074 | /* ignore_cdc_abs_max_intra 0-1, 1-2, 2-3, 3-4 */ |
2075 | (0<<0)); |
2076 | else |
2077 | WRITE_HREG(HCODEC_IGNORE_CONFIG_2, |
2078 | (1 << 31) | /* ignore_t_lac_coeff_en */ |
2079 | (1 << 26) | /* ignore_t_lac_coeff_else (<1) */ |
2080 | (1 << 21) | /* ignore_t_lac_coeff_2 (<1) */ |
2081 | (5 << 16) | /* ignore_t_lac_coeff_1 (<5) */ |
2082 | (0 << 0)); |
2083 | #else |
2084 | WRITE_HREG(HCODEC_IGNORE_CONFIG, 0); |
2085 | WRITE_HREG(HCODEC_IGNORE_CONFIG_2, 0); |
2086 | #endif |
2087 | |
2088 | WRITE_HREG(HCODEC_QDCT_MB_CONTROL, |
2089 | (1 << 9) | /* mb_info_soft_reset */ |
2090 | (1 << 0)); /* mb read buffer soft reset */ |
2091 | |
2092 | WRITE_HREG(HCODEC_QDCT_MB_CONTROL, |
2093 | (1 << 28) | /* ignore_t_p8x8 */ |
2094 | (0 << 27) | /* zero_mc_out_null_non_skipped_mb */ |
2095 | (0 << 26) | /* no_mc_out_null_non_skipped_mb */ |
2096 | (0 << 25) | /* mc_out_even_skipped_mb */ |
2097 | (0 << 24) | /* mc_out_wait_cbp_ready */ |
2098 | (0 << 23) | /* mc_out_wait_mb_type_ready */ |
2099 | (1 << 29) | /* ie_start_int_enable */ |
2100 | (1 << 19) | /* i_pred_enable */ |
2101 | (1 << 20) | /* ie_sub_enable */ |
2102 | (1 << 18) | /* iq_enable */ |
2103 | (1 << 17) | /* idct_enable */ |
2104 | (1 << 14) | /* mb_pause_enable */ |
2105 | (1 << 13) | /* q_enable */ |
2106 | (1 << 12) | /* dct_enable */ |
2107 | (1 << 10) | /* mb_info_en */ |
2108 | (0 << 3) | /* endian */ |
2109 | (0 << 1) | /* mb_read_en */ |
2110 | (0 << 0)); /* soft reset */ |
2111 | |
2112 | WRITE_HREG(HCODEC_SAD_CONTROL, |
2113 | (0 << 3) | /* ie_result_buff_enable */ |
2114 | (1 << 2) | /* ie_result_buff_soft_reset */ |
2115 | (0 << 1) | /* sad_enable */ |
2116 | (1 << 0)); /* sad soft reset */ |
2117 | WRITE_HREG(HCODEC_IE_RESULT_BUFFER, 0); |
2118 | |
2119 | WRITE_HREG(HCODEC_SAD_CONTROL, |
2120 | (1 << 3) | /* ie_result_buff_enable */ |
2121 | (0 << 2) | /* ie_result_buff_soft_reset */ |
2122 | (1 << 1) | /* sad_enable */ |
2123 | (0 << 0)); /* sad soft reset */ |
2124 | |
2125 | WRITE_HREG(HCODEC_IE_CONTROL, |
2126 | (1 << 30) | /* active_ul_block */ |
2127 | (0 << 1) | /* ie_enable */ |
2128 | (1 << 0)); /* ie soft reset */ |
2129 | |
2130 | WRITE_HREG(HCODEC_IE_CONTROL, |
2131 | (1 << 30) | /* active_ul_block */ |
2132 | (0 << 1) | /* ie_enable */ |
2133 | (0 << 0)); /* ie soft reset */ |
2134 | |
2135 | WRITE_HREG(HCODEC_ME_SKIP_LINE, |
2136 | (8 << 24) | /* step_3_skip_line */ |
2137 | (8 << 18) | /* step_2_skip_line */ |
2138 | (2 << 12) | /* step_1_skip_line */ |
2139 | (0 << 6) | /* step_0_skip_line */ |
2140 | (0 << 0)); |
2141 | |
2142 | WRITE_HREG(HCODEC_ME_MV_MERGE_CTL, me_mv_merge_ctl); |
2143 | WRITE_HREG(HCODEC_ME_STEP0_CLOSE_MV, me_step0_close_mv); |
2144 | WRITE_HREG(HCODEC_ME_SAD_ENOUGH_01, me_sad_enough_01); |
2145 | WRITE_HREG(HCODEC_ME_SAD_ENOUGH_23, me_sad_enough_23); |
2146 | WRITE_HREG(HCODEC_ME_F_SKIP_SAD, me_f_skip_sad); |
2147 | WRITE_HREG(HCODEC_ME_F_SKIP_WEIGHT, me_f_skip_weight); |
2148 | WRITE_HREG(HCODEC_ME_MV_WEIGHT_01, me_mv_weight_01); |
2149 | WRITE_HREG(HCODEC_ME_MV_WEIGHT_23, me_mv_weight_23); |
2150 | WRITE_HREG(HCODEC_ME_SAD_RANGE_INC, me_sad_range_inc); |
2151 | |
2152 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_TXL) { |
2153 | WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, 0); |
2154 | WRITE_HREG(HCODEC_V5_SIMPLE_MB_CTL, |
2155 | (v5_use_small_diff_cnt << 7) | |
2156 | (v5_simple_mb_inter_all_en << 6) | |
2157 | (v5_simple_mb_inter_8x8_en << 5) | |
2158 | (v5_simple_mb_inter_16_8_en << 4) | |
2159 | (v5_simple_mb_inter_16x16_en << 3) | |
2160 | (v5_simple_mb_intra_en << 2) | |
2161 | (v5_simple_mb_C_en << 1) | |
2162 | (v5_simple_mb_Y_en << 0)); |
2163 | WRITE_HREG(HCODEC_V5_MB_DIFF_SUM, 0); |
2164 | WRITE_HREG(HCODEC_V5_SMALL_DIFF_CNT, |
2165 | (v5_small_diff_C<<16) | |
2166 | (v5_small_diff_Y<<0)); |
2167 | if (qp_mode == 1) { |
2168 | WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT, |
2169 | 0); |
2170 | } else { |
2171 | WRITE_HREG(HCODEC_V5_SIMPLE_MB_DQUANT, |
2172 | v5_simple_dq_setting); |
2173 | } |
2174 | WRITE_HREG(HCODEC_V5_SIMPLE_MB_ME_WEIGHT, |
2175 | v5_simple_me_weight_setting); |
2176 | /* txlx can remove it */ |
2177 | WRITE_HREG(HCODEC_QDCT_CONFIG, 1 << 0); |
2178 | } |
2179 | |
2180 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) { |
2181 | WRITE_HREG(HCODEC_V4_FORCE_SKIP_CFG, |
2182 | (i_pic_qp << 26) | /* v4_force_q_r_intra */ |
2183 | (i_pic_qp << 20) | /* v4_force_q_r_inter */ |
2184 | (0 << 19) | /* v4_force_q_y_enable */ |
2185 | (5 << 16) | /* v4_force_qr_y */ |
2186 | (6 << 12) | /* v4_force_qp_y */ |
2187 | (0 << 0)); /* v4_force_skip_sad */ |
2188 | |
2189 | /* V3 Force skip */ |
2190 | WRITE_HREG(HCODEC_V3_SKIP_CONTROL, |
2191 | (1 << 31) | /* v3_skip_enable */ |
2192 | (0 << 30) | /* v3_step_1_weight_enable */ |
2193 | (1 << 28) | /* v3_mv_sad_weight_enable */ |
2194 | (1 << 27) | /* v3_ipred_type_enable */ |
2195 | (V3_FORCE_SKIP_SAD_1 << 12) | |
2196 | (V3_FORCE_SKIP_SAD_0 << 0)); |
2197 | WRITE_HREG(HCODEC_V3_SKIP_WEIGHT, |
2198 | (V3_SKIP_WEIGHT_1 << 16) | |
2199 | (V3_SKIP_WEIGHT_0 << 0)); |
2200 | WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD, |
2201 | (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) | |
2202 | (V3_LEVEL_1_SKIP_MAX_SAD << 0)); |
2203 | WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT, |
2204 | (V3_FORCE_SKIP_SAD_2 << 16) | |
2205 | (V3_SKIP_WEIGHT_2 << 0)); |
2206 | if (request != NULL) { |
2207 | unsigned int off1, off2; |
2208 | |
2209 | off1 = V3_IE_F_ZERO_SAD_I4 - I4MB_WEIGHT_OFFSET; |
2210 | off2 = V3_IE_F_ZERO_SAD_I16 |
2211 | - I16MB_WEIGHT_OFFSET; |
2212 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, |
2213 | ((request->i16_weight + off2) << 16) | |
2214 | ((request->i4_weight + off1) << 0)); |
2215 | off1 = V3_ME_F_ZERO_SAD - ME_WEIGHT_OFFSET; |
2216 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, |
2217 | (0 << 25) | |
2218 | /* v3_no_ver_when_top_zero_en */ |
2219 | (0 << 24) | |
2220 | /* v3_no_hor_when_left_zero_en */ |
2221 | (3 << 16) | /* type_hor break */ |
2222 | ((request->me_weight + off1) << 0)); |
2223 | } else { |
2224 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, |
2225 | (V3_IE_F_ZERO_SAD_I16 << 16) | |
2226 | (V3_IE_F_ZERO_SAD_I4 << 0)); |
2227 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, |
2228 | (0 << 25) | |
2229 | /* v3_no_ver_when_top_zero_en */ |
2230 | (0 << 24) | |
2231 | /* v3_no_hor_when_left_zero_en */ |
2232 | (3 << 16) | /* type_hor break */ |
2233 | (V3_ME_F_ZERO_SAD << 0)); |
2234 | } |
2235 | } else if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
2236 | /* V3 Force skip */ |
2237 | WRITE_HREG(HCODEC_V3_SKIP_CONTROL, |
2238 | (1 << 31) | /* v3_skip_enable */ |
2239 | (0 << 30) | /* v3_step_1_weight_enable */ |
2240 | (1 << 28) | /* v3_mv_sad_weight_enable */ |
2241 | (1 << 27) | /* v3_ipred_type_enable */ |
2242 | (0 << 12) | /* V3_FORCE_SKIP_SAD_1 */ |
2243 | (0 << 0)); /* V3_FORCE_SKIP_SAD_0 */ |
2244 | WRITE_HREG(HCODEC_V3_SKIP_WEIGHT, |
2245 | (V3_SKIP_WEIGHT_1 << 16) | |
2246 | (V3_SKIP_WEIGHT_0 << 0)); |
2247 | WRITE_HREG(HCODEC_V3_L1_SKIP_MAX_SAD, |
2248 | (V3_LEVEL_1_F_SKIP_MAX_SAD << 16) | |
2249 | (V3_LEVEL_1_SKIP_MAX_SAD << 0)); |
2250 | WRITE_HREG(HCODEC_V3_L2_SKIP_WEIGHT, |
2251 | (0 << 16) | /* V3_FORCE_SKIP_SAD_2 */ |
2252 | (V3_SKIP_WEIGHT_2 << 0)); |
2253 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_0, |
2254 | (0 << 16) | /* V3_IE_F_ZERO_SAD_I16 */ |
2255 | (0 << 0)); /* V3_IE_F_ZERO_SAD_I4 */ |
2256 | WRITE_HREG(HCODEC_V3_F_ZERO_CTL_1, |
2257 | (0 << 25) | /* v3_no_ver_when_top_zero_en */ |
2258 | (0 << 24) | /* v3_no_hor_when_left_zero_en */ |
2259 | (3 << 16) | /* type_hor break */ |
2260 | (0 << 0)); /* V3_ME_F_ZERO_SAD */ |
2261 | } |
2262 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
2263 | int i; |
2264 | /* MV SAD Table */ |
2265 | for (i = 0; i < 64; i++) |
2266 | WRITE_HREG(HCODEC_V3_MV_SAD_TABLE, |
2267 | v3_mv_sad[i]); |
2268 | |
2269 | /* IE PRED SAD Table*/ |
2270 | WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_0, |
2271 | (C_ipred_weight_H << 24) | |
2272 | (C_ipred_weight_V << 16) | |
2273 | (I4_ipred_weight_else << 8) | |
2274 | (I4_ipred_weight_most << 0)); |
2275 | WRITE_HREG(HCODEC_V3_IPRED_TYPE_WEIGHT_1, |
2276 | (I16_ipred_weight_DC << 24) | |
2277 | (I16_ipred_weight_H << 16) | |
2278 | (I16_ipred_weight_V << 8) | |
2279 | (C_ipred_weight_DC << 0)); |
2280 | WRITE_HREG(HCODEC_V3_LEFT_SMALL_MAX_SAD, |
2281 | (v3_left_small_max_me_sad << 16) | |
2282 | (v3_left_small_max_ie_sad << 0)); |
2283 | } |
2284 | WRITE_HREG(HCODEC_IE_DATA_FEED_BUFF_INFO, 0); |
2285 | WRITE_HREG(HCODEC_CURR_CANVAS_CTRL, 0); |
2286 | data32 = READ_HREG(HCODEC_VLC_CONFIG); |
2287 | data32 = data32 | (1 << 0); /* set pop_coeff_even_all_zero */ |
2288 | WRITE_HREG(HCODEC_VLC_CONFIG, data32); |
2289 | |
2290 | WRITE_HREG(INFO_DUMP_START_ADDR, |
2291 | wq->mem.dump_info_ddr_start_addr); |
2292 | |
2293 | /* clear mailbox interrupt */ |
2294 | WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1); |
2295 | |
2296 | /* enable mailbox interrupt */ |
2297 | WRITE_HREG(HCODEC_IRQ_MBOX_MASK, 1); |
2298 | } |
2299 | |
2300 | void amvenc_reset(void) |
2301 | { |
2302 | READ_VREG(DOS_SW_RESET1); |
2303 | READ_VREG(DOS_SW_RESET1); |
2304 | READ_VREG(DOS_SW_RESET1); |
2305 | WRITE_VREG(DOS_SW_RESET1, |
2306 | (1 << 2) | (1 << 6) | |
2307 | (1 << 7) | (1 << 8) | |
2308 | (1 << 14) | (1 << 16) | |
2309 | (1 << 17)); |
2310 | WRITE_VREG(DOS_SW_RESET1, 0); |
2311 | READ_VREG(DOS_SW_RESET1); |
2312 | READ_VREG(DOS_SW_RESET1); |
2313 | READ_VREG(DOS_SW_RESET1); |
2314 | } |
2315 | |
2316 | void amvenc_start(void) |
2317 | { |
2318 | READ_VREG(DOS_SW_RESET1); |
2319 | READ_VREG(DOS_SW_RESET1); |
2320 | READ_VREG(DOS_SW_RESET1); |
2321 | WRITE_VREG(DOS_SW_RESET1, |
2322 | (1 << 12) | (1 << 11)); |
2323 | WRITE_VREG(DOS_SW_RESET1, 0); |
2324 | |
2325 | READ_VREG(DOS_SW_RESET1); |
2326 | READ_VREG(DOS_SW_RESET1); |
2327 | READ_VREG(DOS_SW_RESET1); |
2328 | |
2329 | WRITE_HREG(HCODEC_MPSR, 0x0001); |
2330 | } |
2331 | |
2332 | void amvenc_stop(void) |
2333 | { |
2334 | ulong timeout = jiffies + HZ; |
2335 | |
2336 | WRITE_HREG(HCODEC_MPSR, 0); |
2337 | WRITE_HREG(HCODEC_CPSR, 0); |
2338 | while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { |
2339 | if (time_after(jiffies, timeout)) |
2340 | break; |
2341 | } |
2342 | READ_VREG(DOS_SW_RESET1); |
2343 | READ_VREG(DOS_SW_RESET1); |
2344 | READ_VREG(DOS_SW_RESET1); |
2345 | |
2346 | WRITE_VREG(DOS_SW_RESET1, |
2347 | (1 << 12) | (1 << 11) | |
2348 | (1 << 2) | (1 << 6) | |
2349 | (1 << 7) | (1 << 8) | |
2350 | (1 << 14) | (1 << 16) | |
2351 | (1 << 17)); |
2352 | |
2353 | WRITE_VREG(DOS_SW_RESET1, 0); |
2354 | |
2355 | READ_VREG(DOS_SW_RESET1); |
2356 | READ_VREG(DOS_SW_RESET1); |
2357 | READ_VREG(DOS_SW_RESET1); |
2358 | } |
2359 | |
2360 | static void __iomem *mc_addr; |
2361 | static u32 mc_addr_map; |
2362 | #define MC_SIZE (4096 * 8) |
2363 | s32 amvenc_loadmc(const char *p, struct encode_wq_s *wq) |
2364 | { |
2365 | ulong timeout; |
2366 | s32 ret = 0; |
2367 | |
2368 | /* use static mempry*/ |
2369 | if (mc_addr == NULL) { |
2370 | mc_addr = kmalloc(MC_SIZE, GFP_KERNEL); |
2371 | if (!mc_addr) { |
2372 | enc_pr(LOG_ERROR, "avc loadmc iomap mc addr error.\n"); |
2373 | return -ENOMEM; |
2374 | } |
2375 | } |
2376 | |
2377 | enc_pr(LOG_ALL, "avc encode ucode name is %s\n", p); |
2378 | ret = get_data_from_name(p, (u8 *)mc_addr); |
2379 | if (ret < 0) { |
2380 | enc_pr(LOG_ERROR, |
2381 | "avc microcode fail ret=%d, name: %s, wq:%p.\n", |
2382 | ret, p, (void *)wq); |
2383 | } |
2384 | |
2385 | mc_addr_map = dma_map_single( |
2386 | &encode_manager.this_pdev->dev, |
2387 | mc_addr, MC_SIZE, DMA_TO_DEVICE); |
2388 | |
2389 | /* mc_addr_map = wq->mem.assit_buffer_offset; */ |
2390 | /* mc_addr = ioremap_wc(mc_addr_map, MC_SIZE); */ |
2391 | /* memcpy(mc_addr, p, MC_SIZE); */ |
2392 | enc_pr(LOG_ALL, "address 0 is 0x%x\n", *((u32 *)mc_addr)); |
2393 | enc_pr(LOG_ALL, "address 1 is 0x%x\n", *((u32 *)mc_addr + 1)); |
2394 | enc_pr(LOG_ALL, "address 2 is 0x%x\n", *((u32 *)mc_addr + 2)); |
2395 | enc_pr(LOG_ALL, "address 3 is 0x%x\n", *((u32 *)mc_addr + 3)); |
2396 | WRITE_HREG(HCODEC_MPSR, 0); |
2397 | WRITE_HREG(HCODEC_CPSR, 0); |
2398 | |
2399 | /* Read CBUS register for timing */ |
2400 | timeout = READ_HREG(HCODEC_MPSR); |
2401 | timeout = READ_HREG(HCODEC_MPSR); |
2402 | |
2403 | timeout = jiffies + HZ; |
2404 | |
2405 | WRITE_HREG(HCODEC_IMEM_DMA_ADR, mc_addr_map); |
2406 | WRITE_HREG(HCODEC_IMEM_DMA_COUNT, 0x1000); |
2407 | WRITE_HREG(HCODEC_IMEM_DMA_CTRL, (0x8000 | (7 << 16))); |
2408 | |
2409 | while (READ_HREG(HCODEC_IMEM_DMA_CTRL) & 0x8000) { |
2410 | if (time_before(jiffies, timeout)) |
2411 | schedule(); |
2412 | else { |
2413 | enc_pr(LOG_ERROR, "hcodec load mc error\n"); |
2414 | ret = -EBUSY; |
2415 | break; |
2416 | } |
2417 | } |
2418 | dma_unmap_single( |
2419 | &encode_manager.this_pdev->dev, |
2420 | mc_addr_map, MC_SIZE, DMA_TO_DEVICE); |
2421 | return ret; |
2422 | } |
2423 | |
2424 | const u32 fix_mc[] __aligned(8) = { |
2425 | 0x0809c05a, 0x06696000, 0x0c780000, 0x00000000 |
2426 | }; |
2427 | |
2428 | |
2429 | /* |
2430 | * DOS top level register access fix. |
2431 | * When hcodec is running, a protocol register HCODEC_CCPU_INTR_MSK |
2432 | * is set to make hcodec access one CBUS out of DOS domain once |
2433 | * to work around a HW bug for 4k2k dual decoder implementation. |
2434 | * If hcodec is not running, then a ucode is loaded and executed |
2435 | * instead. |
2436 | */ |
2437 | void amvenc_dos_top_reg_fix(void) |
2438 | { |
2439 | bool hcodec_on; |
2440 | ulong flags; |
2441 | |
2442 | spin_lock_irqsave(&lock, flags); |
2443 | |
2444 | hcodec_on = vdec_on(VDEC_HCODEC); |
2445 | |
2446 | if ((hcodec_on) && (READ_VREG(HCODEC_MPSR) & 1)) { |
2447 | WRITE_HREG(HCODEC_CCPU_INTR_MSK, 1); |
2448 | spin_unlock_irqrestore(&lock, flags); |
2449 | return; |
2450 | } |
2451 | |
2452 | if (!hcodec_on) |
2453 | vdec_poweron(VDEC_HCODEC); |
2454 | |
2455 | amhcodec_loadmc(fix_mc); |
2456 | |
2457 | amhcodec_start(); |
2458 | |
2459 | udelay(1000); |
2460 | |
2461 | amhcodec_stop(); |
2462 | |
2463 | if (!hcodec_on) |
2464 | vdec_poweroff(VDEC_HCODEC); |
2465 | |
2466 | spin_unlock_irqrestore(&lock, flags); |
2467 | } |
2468 | |
2469 | bool amvenc_avc_on(void) |
2470 | { |
2471 | bool hcodec_on; |
2472 | ulong flags; |
2473 | |
2474 | spin_lock_irqsave(&lock, flags); |
2475 | |
2476 | hcodec_on = vdec_on(VDEC_HCODEC); |
2477 | hcodec_on &= (encode_manager.wq_count > 0); |
2478 | |
2479 | spin_unlock_irqrestore(&lock, flags); |
2480 | return hcodec_on; |
2481 | } |
2482 | |
2483 | static s32 avc_poweron(u32 clock) |
2484 | { |
2485 | ulong flags; |
2486 | u32 data32; |
2487 | |
2488 | data32 = 0; |
2489 | |
2490 | amports_switch_gate("vdec", 1); |
2491 | |
2492 | spin_lock_irqsave(&lock, flags); |
2493 | |
2494 | WRITE_AOREG(AO_RTI_PWR_CNTL_REG0, |
2495 | (READ_AOREG(AO_RTI_PWR_CNTL_REG0) & (~0x18))); |
2496 | udelay(10); |
2497 | /* Powerup HCODEC */ |
2498 | /* [1:0] HCODEC */ |
2499 | WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, |
2500 | READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) & |
2501 | ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || |
2502 | get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) |
2503 | ? ~0x1 : ~0x3)); |
2504 | |
2505 | udelay(10); |
2506 | |
2507 | WRITE_VREG(DOS_SW_RESET1, 0xffffffff); |
2508 | WRITE_VREG(DOS_SW_RESET1, 0); |
2509 | |
2510 | /* Enable Dos internal clock gating */ |
2511 | hvdec_clock_enable(clock); |
2512 | |
2513 | /* Powerup HCODEC memories */ |
2514 | WRITE_VREG(DOS_MEM_PD_HCODEC, 0x0); |
2515 | |
2516 | /* Remove HCODEC ISO */ |
2517 | WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, |
2518 | READ_AOREG(AO_RTI_GEN_PWR_ISO0) & |
2519 | ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || |
2520 | get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) |
2521 | ? ~0x1 : ~0x30)); |
2522 | |
2523 | udelay(10); |
2524 | /* Disable auto-clock gate */ |
2525 | WRITE_VREG(DOS_GEN_CTRL0, |
2526 | (READ_VREG(DOS_GEN_CTRL0) | 0x1)); |
2527 | WRITE_VREG(DOS_GEN_CTRL0, |
2528 | (READ_VREG(DOS_GEN_CTRL0) & 0xFFFFFFFE)); |
2529 | |
2530 | spin_unlock_irqrestore(&lock, flags); |
2531 | |
2532 | mdelay(10); |
2533 | return 0; |
2534 | } |
2535 | |
2536 | static s32 avc_poweroff(void) |
2537 | { |
2538 | ulong flags; |
2539 | |
2540 | spin_lock_irqsave(&lock, flags); |
2541 | |
2542 | /* enable HCODEC isolation */ |
2543 | WRITE_AOREG(AO_RTI_GEN_PWR_ISO0, |
2544 | READ_AOREG(AO_RTI_GEN_PWR_ISO0) | |
2545 | ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || |
2546 | get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) |
2547 | ? 0x1 : 0x30)); |
2548 | |
2549 | /* power off HCODEC memories */ |
2550 | WRITE_VREG(DOS_MEM_PD_HCODEC, 0xffffffffUL); |
2551 | |
2552 | /* disable HCODEC clock */ |
2553 | hvdec_clock_disable(); |
2554 | |
2555 | /* HCODEC power off */ |
2556 | WRITE_AOREG(AO_RTI_GEN_PWR_SLEEP0, |
2557 | READ_AOREG(AO_RTI_GEN_PWR_SLEEP0) | |
2558 | ((get_cpu_type() == MESON_CPU_MAJOR_ID_SM1 || |
2559 | get_cpu_type() >= MESON_CPU_MAJOR_ID_TM2) |
2560 | ? 0x1 : 0x3)); |
2561 | |
2562 | spin_unlock_irqrestore(&lock, flags); |
2563 | |
2564 | /* release DOS clk81 clock gating */ |
2565 | amports_switch_gate("vdec", 0); |
2566 | return 0; |
2567 | } |
2568 | |
2569 | static s32 reload_mc(struct encode_wq_s *wq) |
2570 | { |
2571 | const char *p = select_ucode(encode_manager.ucode_index); |
2572 | |
2573 | amvenc_stop(); |
2574 | |
2575 | WRITE_VREG(DOS_SW_RESET1, 0xffffffff); |
2576 | WRITE_VREG(DOS_SW_RESET1, 0); |
2577 | |
2578 | udelay(10); |
2579 | |
2580 | WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32); |
2581 | enc_pr(LOG_INFO, "reload microcode\n"); |
2582 | |
2583 | if (amvenc_loadmc(p, wq) < 0) |
2584 | return -EBUSY; |
2585 | return 0; |
2586 | } |
2587 | |
2588 | static void encode_isr_tasklet(ulong data) |
2589 | { |
2590 | struct encode_manager_s *manager = (struct encode_manager_s *)data; |
2591 | |
2592 | enc_pr(LOG_INFO, "encoder is done %d\n", manager->encode_hw_status); |
2593 | if (((manager->encode_hw_status == ENCODER_IDR_DONE) |
2594 | || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) |
2595 | || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) |
2596 | || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) |
2597 | && (manager->process_irq)) { |
2598 | wake_up_interruptible(&manager->event.hw_complete); |
2599 | } |
2600 | } |
2601 | |
2602 | /* irq function */ |
2603 | static irqreturn_t enc_isr(s32 irq_number, void *para) |
2604 | { |
2605 | struct encode_manager_s *manager = (struct encode_manager_s *)para; |
2606 | |
2607 | WRITE_HREG(HCODEC_IRQ_MBOX_CLR, 1); |
2608 | |
2609 | manager->encode_hw_status = READ_HREG(ENCODER_STATUS); |
2610 | if ((manager->encode_hw_status == ENCODER_IDR_DONE) |
2611 | || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) |
2612 | || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) |
2613 | || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) { |
2614 | enc_pr(LOG_ALL, "encoder stage is %d\n", |
2615 | manager->encode_hw_status); |
2616 | } |
2617 | |
2618 | if (((manager->encode_hw_status == ENCODER_IDR_DONE) |
2619 | || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) |
2620 | || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) |
2621 | || (manager->encode_hw_status == ENCODER_PICTURE_DONE)) |
2622 | && (!manager->process_irq)) { |
2623 | manager->process_irq = true; |
2624 | if (manager->encode_hw_status != ENCODER_SEQUENCE_DONE) |
2625 | manager->need_reset = true; |
2626 | tasklet_schedule(&manager->encode_tasklet); |
2627 | } |
2628 | return IRQ_HANDLED; |
2629 | } |
2630 | |
2631 | static s32 convert_request(struct encode_wq_s *wq, u32 *cmd_info) |
2632 | { |
2633 | int i = 0; |
2634 | u8 *ptr; |
2635 | u32 data_offset; |
2636 | u32 cmd = cmd_info[0]; |
2637 | unsigned long paddr = 0; |
2638 | struct enc_dma_cfg *cfg = NULL; |
2639 | s32 ret = 0; |
2640 | struct platform_device *pdev; |
2641 | |
2642 | if (!wq) |
2643 | return -1; |
2644 | memset(&wq->request, 0, sizeof(struct encode_request_s)); |
2645 | wq->request.me_weight = ME_WEIGHT_OFFSET; |
2646 | wq->request.i4_weight = I4MB_WEIGHT_OFFSET; |
2647 | wq->request.i16_weight = I16MB_WEIGHT_OFFSET; |
2648 | |
2649 | if (cmd == ENCODER_SEQUENCE) { |
2650 | wq->request.cmd = cmd; |
2651 | wq->request.ucode_mode = cmd_info[1]; |
2652 | wq->request.quant = cmd_info[2]; |
2653 | wq->request.flush_flag = cmd_info[3]; |
2654 | wq->request.timeout = cmd_info[4]; |
2655 | wq->request.timeout = 5000; /* 5000 ms */ |
2656 | } else if ((cmd == ENCODER_IDR) || (cmd == ENCODER_NON_IDR)) { |
2657 | wq->request.cmd = cmd; |
2658 | wq->request.ucode_mode = cmd_info[1]; |
2659 | wq->request.type = cmd_info[2]; |
2660 | wq->request.fmt = cmd_info[3]; |
2661 | wq->request.src = cmd_info[4]; |
2662 | wq->request.framesize = cmd_info[5]; |
2663 | wq->request.quant = cmd_info[6]; |
2664 | wq->request.flush_flag = cmd_info[7]; |
2665 | wq->request.timeout = cmd_info[8]; |
2666 | wq->request.crop_top = cmd_info[9]; |
2667 | wq->request.crop_bottom = cmd_info[10]; |
2668 | wq->request.crop_left = cmd_info[11]; |
2669 | wq->request.crop_right = cmd_info[12]; |
2670 | wq->request.src_w = cmd_info[13]; |
2671 | wq->request.src_h = cmd_info[14]; |
2672 | wq->request.scale_enable = cmd_info[15]; |
2673 | wq->request.nr_mode = |
2674 | (nr_mode > 0) ? nr_mode : cmd_info[16]; |
2675 | if (cmd == ENCODER_IDR) |
2676 | wq->request.nr_mode = 0; |
2677 | |
2678 | data_offset = 17 + |
2679 | (sizeof(wq->quant_tbl_i4) |
2680 | + sizeof(wq->quant_tbl_i16) |
2681 | + sizeof(wq->quant_tbl_me)) / 4; |
2682 | |
2683 | if (wq->request.quant == ADJUSTED_QP_FLAG) { |
2684 | ptr = (u8 *) &cmd_info[17]; |
2685 | memcpy(wq->quant_tbl_i4, ptr, |
2686 | sizeof(wq->quant_tbl_i4)); |
2687 | ptr += sizeof(wq->quant_tbl_i4); |
2688 | memcpy(wq->quant_tbl_i16, ptr, |
2689 | sizeof(wq->quant_tbl_i16)); |
2690 | ptr += sizeof(wq->quant_tbl_i16); |
2691 | memcpy(wq->quant_tbl_me, ptr, |
2692 | sizeof(wq->quant_tbl_me)); |
2693 | wq->request.i4_weight -= |
2694 | cmd_info[data_offset++]; |
2695 | wq->request.i16_weight -= |
2696 | cmd_info[data_offset++]; |
2697 | wq->request.me_weight -= |
2698 | cmd_info[data_offset++]; |
2699 | if (qp_table_debug) { |
2700 | u8 *qp_tb = (u8 *)(&wq->quant_tbl_i4[0]); |
2701 | |
2702 | for (i = 0; i < 32; i++) { |
2703 | enc_pr(LOG_INFO, "%d ", *qp_tb); |
2704 | qp_tb++; |
2705 | } |
2706 | enc_pr(LOG_INFO, "\n"); |
2707 | |
2708 | qp_tb = (u8 *)(&wq->quant_tbl_i16[0]); |
2709 | for (i = 0; i < 32; i++) { |
2710 | enc_pr(LOG_INFO, "%d ", *qp_tb); |
2711 | qp_tb++; |
2712 | } |
2713 | enc_pr(LOG_INFO, "\n"); |
2714 | |
2715 | qp_tb = (u8 *)(&wq->quant_tbl_me[0]); |
2716 | for (i = 0; i < 32; i++) { |
2717 | enc_pr(LOG_INFO, "%d ", *qp_tb); |
2718 | qp_tb++; |
2719 | } |
2720 | enc_pr(LOG_INFO, "\n"); |
2721 | } |
2722 | } else { |
2723 | memset(wq->quant_tbl_me, wq->request.quant, |
2724 | sizeof(wq->quant_tbl_me)); |
2725 | memset(wq->quant_tbl_i4, wq->request.quant, |
2726 | sizeof(wq->quant_tbl_i4)); |
2727 | memset(wq->quant_tbl_i16, wq->request.quant, |
2728 | sizeof(wq->quant_tbl_i16)); |
2729 | data_offset += 3; |
2730 | } |
2731 | #ifdef H264_ENC_CBR |
2732 | wq->cbr_info.block_w = cmd_info[data_offset++]; |
2733 | wq->cbr_info.block_h = cmd_info[data_offset++]; |
2734 | wq->cbr_info.long_th = cmd_info[data_offset++]; |
2735 | wq->cbr_info.start_tbl_id = cmd_info[data_offset++]; |
2736 | wq->cbr_info.short_shift = CBR_SHORT_SHIFT; |
2737 | wq->cbr_info.long_mb_num = CBR_LONG_MB_NUM; |
2738 | #endif |
2739 | data_offset = 17 + |
2740 | (sizeof(wq->quant_tbl_i4) |
2741 | + sizeof(wq->quant_tbl_i16) |
2742 | + sizeof(wq->quant_tbl_me)) / 4 + 7; |
2743 | |
2744 | if (wq->request.type == DMA_BUFF) { |
2745 | wq->request.plane_num = cmd_info[data_offset++]; |
2746 | enc_pr(LOG_INFO, "wq->request.plane_num %d\n", |
2747 | wq->request.plane_num); |
2748 | if (wq->request.fmt == FMT_NV12 || |
2749 | wq->request.fmt == FMT_NV21 || |
2750 | wq->request.fmt == FMT_YUV420) { |
2751 | for (i = 0; i < wq->request.plane_num; i++) { |
2752 | cfg = &wq->request.dma_cfg[i]; |
2753 | cfg->dir = DMA_TO_DEVICE; |
2754 | cfg->fd = cmd_info[data_offset++]; |
2755 | pdev = encode_manager.this_pdev; |
2756 | cfg->dev = &(pdev->dev); |
2757 | |
2758 | ret = enc_dma_buf_get_phys(cfg, &paddr); |
2759 | if (ret < 0) { |
2760 | enc_pr(LOG_ERROR, |
2761 | "import fd %d failed\n", |
2762 | cfg->fd); |
2763 | cfg->paddr = NULL; |
2764 | cfg->vaddr = NULL; |
2765 | return -1; |
2766 | } |
2767 | cfg->paddr = (void *)paddr; |
2768 | enc_pr(LOG_INFO, "vaddr %p\n", |
2769 | cfg->vaddr); |
2770 | } |
2771 | } else { |
2772 | enc_pr(LOG_ERROR, "error fmt = %d\n", |
2773 | wq->request.fmt); |
2774 | } |
2775 | } |
2776 | |
2777 | } else { |
2778 | enc_pr(LOG_ERROR, "error cmd = %d, wq: %p.\n", |
2779 | cmd, (void *)wq); |
2780 | return -1; |
2781 | } |
2782 | wq->request.parent = wq; |
2783 | return 0; |
2784 | } |
2785 | |
2786 | void amvenc_avc_start_cmd(struct encode_wq_s *wq, |
2787 | struct encode_request_s *request) |
2788 | { |
2789 | u32 reload_flag = 0; |
2790 | |
2791 | if (request->ucode_mode != encode_manager.ucode_index) { |
2792 | encode_manager.ucode_index = request->ucode_mode; |
2793 | if (reload_mc(wq)) { |
2794 | enc_pr(LOG_ERROR, |
2795 | "reload mc fail, wq:%p\n", (void *)wq); |
2796 | return; |
2797 | } |
2798 | reload_flag = 1; |
2799 | encode_manager.need_reset = true; |
2800 | } |
2801 | |
2802 | wq->hw_status = 0; |
2803 | wq->output_size = 0; |
2804 | wq->ucode_index = encode_manager.ucode_index; |
2805 | |
2806 | ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT; |
2807 | if (encode_manager.need_reset) { |
2808 | encode_manager.need_reset = false; |
2809 | encode_manager.encode_hw_status = ENCODER_IDLE; |
2810 | amvenc_reset(); |
2811 | avc_canvas_init(wq); |
2812 | avc_init_encoder(wq, |
2813 | (request->cmd == ENCODER_IDR) ? true : false); |
2814 | avc_init_input_buffer(wq); |
2815 | avc_init_output_buffer(wq); |
2816 | avc_prot_init(wq, request, request->quant, |
2817 | (request->cmd == ENCODER_IDR) ? true : false); |
2818 | avc_init_assit_buffer(wq); |
2819 | enc_pr(LOG_INFO, |
2820 | "begin to new frame, request->cmd: %d, ucode mode: %d, wq:%p\n", |
2821 | request->cmd, request->ucode_mode, (void *)wq); |
2822 | } |
2823 | if ((request->cmd == ENCODER_IDR) || |
2824 | (request->cmd == ENCODER_NON_IDR)) { |
2825 | #ifdef H264_ENC_SVC |
2826 | /* encode non reference frame or not */ |
2827 | if (request->cmd == ENCODER_IDR) |
2828 | wq->pic.non_ref_cnt = 0; //IDR reset counter |
2829 | if (wq->pic.enable_svc && wq->pic.non_ref_cnt) { |
2830 | enc_pr(LOG_INFO, |
2831 | "PIC is NON REF cmd %d cnt %d value 0x%x\n", |
2832 | request->cmd, wq->pic.non_ref_cnt, |
2833 | ENC_SLC_NON_REF); |
2834 | WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_NON_REF); |
2835 | } else { |
2836 | enc_pr(LOG_INFO, |
2837 | "PIC is REF cmd %d cnt %d val 0x%x\n", |
2838 | request->cmd, wq->pic.non_ref_cnt, |
2839 | ENC_SLC_REF); |
2840 | WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF); |
2841 | } |
2842 | #else |
2843 | /* if FW defined but not defined SVC in driver here*/ |
2844 | WRITE_HREG(H264_ENC_SVC_PIC_TYPE, ENC_SLC_REF); |
2845 | #endif |
2846 | avc_init_dblk_buffer(wq->mem.dblk_buf_canvas); |
2847 | avc_init_reference_buffer(wq->mem.ref_buf_canvas); |
2848 | } |
2849 | if ((request->cmd == ENCODER_IDR) || |
2850 | (request->cmd == ENCODER_NON_IDR)) |
2851 | set_input_format(wq, request); |
2852 | |
2853 | if (request->cmd == ENCODER_IDR) |
2854 | ie_me_mb_type = HENC_MB_Type_I4MB; |
2855 | else if (request->cmd == ENCODER_NON_IDR) |
2856 | ie_me_mb_type = |
2857 | (HENC_SKIP_RUN_AUTO << 16) | |
2858 | (HENC_MB_Type_AUTO << 4) | |
2859 | (HENC_MB_Type_AUTO << 0); |
2860 | else |
2861 | ie_me_mb_type = 0; |
2862 | avc_init_ie_me_parameter(wq, request->quant); |
2863 | |
2864 | #ifdef MULTI_SLICE_MC |
2865 | if (fixed_slice_cfg) |
2866 | WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); |
2867 | else if (wq->pic.rows_per_slice != |
2868 | (wq->pic.encoder_height + 15) >> 4) { |
2869 | u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; |
2870 | |
2871 | mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; |
2872 | WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); |
2873 | } else |
2874 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
2875 | #else |
2876 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
2877 | #endif |
2878 | |
2879 | encode_manager.encode_hw_status = request->cmd; |
2880 | wq->hw_status = request->cmd; |
2881 | WRITE_HREG(ENCODER_STATUS, request->cmd); |
2882 | if ((request->cmd == ENCODER_IDR) |
2883 | || (request->cmd == ENCODER_NON_IDR) |
2884 | || (request->cmd == ENCODER_SEQUENCE) |
2885 | || (request->cmd == ENCODER_PICTURE)) |
2886 | encode_manager.process_irq = false; |
2887 | |
2888 | if (reload_flag) |
2889 | amvenc_start(); |
2890 | enc_pr(LOG_ALL, "amvenc_avc_start cmd out, request:%p.\n", (void*)request); |
2891 | } |
2892 | |
2893 | static void dma_flush(u32 buf_start, u32 buf_size) |
2894 | { |
2895 | if ((buf_start == 0) || (buf_size == 0)) |
2896 | return; |
2897 | dma_sync_single_for_device( |
2898 | &encode_manager.this_pdev->dev, buf_start, |
2899 | buf_size, DMA_TO_DEVICE); |
2900 | } |
2901 | |
2902 | static void cache_flush(u32 buf_start, u32 buf_size) |
2903 | { |
2904 | if ((buf_start == 0) || (buf_size == 0)) |
2905 | return; |
2906 | dma_sync_single_for_cpu( |
2907 | &encode_manager.this_pdev->dev, buf_start, |
2908 | buf_size, DMA_FROM_DEVICE); |
2909 | } |
2910 | |
2911 | static u32 getbuffer(struct encode_wq_s *wq, u32 type) |
2912 | { |
2913 | u32 ret = 0; |
2914 | |
2915 | switch (type) { |
2916 | case ENCODER_BUFFER_INPUT: |
2917 | ret = wq->mem.dct_buff_start_addr; |
2918 | break; |
2919 | case ENCODER_BUFFER_REF0: |
2920 | ret = wq->mem.dct_buff_start_addr + |
2921 | wq->mem.bufspec.dec0_y.buf_start; |
2922 | break; |
2923 | case ENCODER_BUFFER_REF1: |
2924 | ret = wq->mem.dct_buff_start_addr + |
2925 | wq->mem.bufspec.dec1_y.buf_start; |
2926 | break; |
2927 | case ENCODER_BUFFER_OUTPUT: |
2928 | ret = wq->mem.BitstreamStart; |
2929 | break; |
2930 | case ENCODER_BUFFER_DUMP: |
2931 | ret = wq->mem.dump_info_ddr_start_addr; |
2932 | break; |
2933 | case ENCODER_BUFFER_CBR: |
2934 | ret = wq->mem.cbr_info_ddr_start_addr; |
2935 | break; |
2936 | default: |
2937 | break; |
2938 | } |
2939 | return ret; |
2940 | } |
2941 | |
2942 | s32 amvenc_avc_start(struct encode_wq_s *wq, u32 clock) |
2943 | { |
2944 | const char *p = select_ucode(encode_manager.ucode_index); |
2945 | |
2946 | avc_poweron(clock); |
2947 | avc_canvas_init(wq); |
2948 | |
2949 | WRITE_HREG(HCODEC_ASSIST_MMC_CTRL1, 0x32); |
2950 | |
2951 | if (amvenc_loadmc(p, wq) < 0) |
2952 | return -EBUSY; |
2953 | |
2954 | encode_manager.need_reset = true; |
2955 | encode_manager.process_irq = false; |
2956 | encode_manager.encode_hw_status = ENCODER_IDLE; |
2957 | amvenc_reset(); |
2958 | avc_init_encoder(wq, true); |
2959 | avc_init_input_buffer(wq); /* dct buffer setting */ |
2960 | avc_init_output_buffer(wq); /* output stream buffer */ |
2961 | |
2962 | ie_me_mode = (0 & ME_PIXEL_MODE_MASK) << ME_PIXEL_MODE_SHIFT; |
2963 | avc_prot_init(wq, NULL, wq->pic.init_qppicture, true); |
2964 | if (request_irq(encode_manager.irq_num, enc_isr, IRQF_SHARED, |
2965 | "enc-irq", (void *)&encode_manager) == 0) |
2966 | encode_manager.irq_requested = true; |
2967 | else |
2968 | encode_manager.irq_requested = false; |
2969 | |
2970 | /* decoder buffer , need set before each frame start */ |
2971 | avc_init_dblk_buffer(wq->mem.dblk_buf_canvas); |
2972 | /* reference buffer , need set before each frame start */ |
2973 | avc_init_reference_buffer(wq->mem.ref_buf_canvas); |
2974 | avc_init_assit_buffer(wq); /* assitant buffer for microcode */ |
2975 | ie_me_mb_type = 0; |
2976 | avc_init_ie_me_parameter(wq, wq->pic.init_qppicture); |
2977 | WRITE_HREG(ENCODER_STATUS, ENCODER_IDLE); |
2978 | |
2979 | #ifdef MULTI_SLICE_MC |
2980 | if (fixed_slice_cfg) |
2981 | WRITE_HREG(FIXED_SLICE_CFG, fixed_slice_cfg); |
2982 | else if (wq->pic.rows_per_slice != |
2983 | (wq->pic.encoder_height + 15) >> 4) { |
2984 | u32 mb_per_slice = (wq->pic.encoder_height + 15) >> 4; |
2985 | |
2986 | mb_per_slice = mb_per_slice * wq->pic.rows_per_slice; |
2987 | WRITE_HREG(FIXED_SLICE_CFG, mb_per_slice); |
2988 | } else |
2989 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
2990 | #else |
2991 | WRITE_HREG(FIXED_SLICE_CFG, 0); |
2992 | #endif |
2993 | amvenc_start(); |
2994 | return 0; |
2995 | } |
2996 | |
2997 | void amvenc_avc_stop(void) |
2998 | { |
2999 | if ((encode_manager.irq_num >= 0) && |
3000 | (encode_manager.irq_requested == true)) { |
3001 | free_irq(encode_manager.irq_num, &encode_manager); |
3002 | encode_manager.irq_requested = false; |
3003 | } |
3004 | amvenc_stop(); |
3005 | avc_poweroff(); |
3006 | } |
3007 | |
3008 | static s32 avc_init(struct encode_wq_s *wq) |
3009 | { |
3010 | s32 r = 0; |
3011 | |
3012 | encode_manager.ucode_index = wq->ucode_index; |
3013 | r = amvenc_avc_start(wq, clock_level); |
3014 | |
3015 | enc_pr(LOG_DEBUG, |
3016 | "init avc encode. microcode %d, ret=%d, wq:%p.\n", |
3017 | encode_manager.ucode_index, r, (void *)wq); |
3018 | return 0; |
3019 | } |
3020 | |
3021 | static s32 amvenc_avc_light_reset(struct encode_wq_s *wq, u32 value) |
3022 | { |
3023 | s32 r = 0; |
3024 | |
3025 | amvenc_avc_stop(); |
3026 | |
3027 | mdelay(value); |
3028 | |
3029 | encode_manager.ucode_index = UCODE_MODE_FULL; |
3030 | r = amvenc_avc_start(wq, clock_level); |
3031 | |
3032 | enc_pr(LOG_DEBUG, |
3033 | "amvenc_avc_light_reset finish, wq:%p. ret=%d\n", |
3034 | (void *)wq, r); |
3035 | return r; |
3036 | } |
3037 | |
3038 | #ifdef CONFIG_CMA |
3039 | static u32 checkCMA(void) |
3040 | { |
3041 | u32 ret; |
3042 | |
3043 | if (encode_manager.cma_pool_size > 0) { |
3044 | ret = encode_manager.cma_pool_size; |
3045 | ret = ret / MIN_SIZE; |
3046 | } else |
3047 | ret = 0; |
3048 | return ret; |
3049 | } |
3050 | #endif |
3051 | |
3052 | /* file operation */ |
3053 | static s32 amvenc_avc_open(struct inode *inode, struct file *file) |
3054 | { |
3055 | s32 r = 0; |
3056 | struct encode_wq_s *wq = NULL; |
3057 | |
3058 | file->private_data = NULL; |
3059 | enc_pr(LOG_DEBUG, "avc open\n"); |
3060 | #ifdef CONFIG_AM_JPEG_ENCODER |
3061 | if (jpegenc_on() == true) { |
3062 | enc_pr(LOG_ERROR, |
3063 | "hcodec in use for JPEG Encode now.\n"); |
3064 | return -EBUSY; |
3065 | } |
3066 | #endif |
3067 | |
3068 | #ifdef CONFIG_CMA |
3069 | if ((encode_manager.use_reserve == false) && |
3070 | (encode_manager.check_cma == false)) { |
3071 | encode_manager.max_instance = checkCMA(); |
3072 | if (encode_manager.max_instance > 0) { |
3073 | enc_pr(LOG_DEBUG, |
3074 | "amvenc_avc check CMA pool success, max instance: %d.\n", |
3075 | encode_manager.max_instance); |
3076 | } else { |
3077 | enc_pr(LOG_ERROR, |
3078 | "amvenc_avc CMA pool too small.\n"); |
3079 | } |
3080 | encode_manager.check_cma = true; |
3081 | } |
3082 | #endif |
3083 | |
3084 | wq = create_encode_work_queue(); |
3085 | if (wq == NULL) { |
3086 | enc_pr(LOG_ERROR, "amvenc_avc create instance fail.\n"); |
3087 | return -EBUSY; |
3088 | } |
3089 | |
3090 | #ifdef CONFIG_CMA |
3091 | if (encode_manager.use_reserve == false) { |
3092 | wq->mem.buf_start = codec_mm_alloc_for_dma(ENCODE_NAME, |
3093 | MIN_SIZE >> PAGE_SHIFT, 0, |
3094 | CODEC_MM_FLAGS_CPU); |
3095 | if (wq->mem.buf_start) { |
3096 | wq->mem.buf_size = MIN_SIZE; |
3097 | enc_pr(LOG_DEBUG, |
3098 | "allocating phys 0x%x, size %dk, wq:%p.\n", |
3099 | wq->mem.buf_start, |
3100 | wq->mem.buf_size >> 10, (void *)wq); |
3101 | } else { |
3102 | enc_pr(LOG_ERROR, |
3103 | "CMA failed to allocate dma buffer for %s, wq:%p.\n", |
3104 | encode_manager.this_pdev->name, |
3105 | (void *)wq); |
3106 | destroy_encode_work_queue(wq); |
3107 | return -ENOMEM; |
3108 | } |
3109 | } |
3110 | #endif |
3111 | |
3112 | if (wq->mem.buf_start == 0 || |
3113 | wq->mem.buf_size < MIN_SIZE) { |
3114 | enc_pr(LOG_ERROR, |
3115 | "alloc mem failed, start: 0x%x, size:0x%x, wq:%p.\n", |
3116 | wq->mem.buf_start, |
3117 | wq->mem.buf_size, (void *)wq); |
3118 | destroy_encode_work_queue(wq); |
3119 | return -ENOMEM; |
3120 | } |
3121 | |
3122 | memcpy(&wq->mem.bufspec, &amvenc_buffspec[0], |
3123 | sizeof(struct BuffInfo_s)); |
3124 | |
3125 | enc_pr(LOG_DEBUG, |
3126 | "amvenc_avc memory config success, buff start:0x%x, size is 0x%x, wq:%p.\n", |
3127 | wq->mem.buf_start, wq->mem.buf_size, (void *)wq); |
3128 | |
3129 | file->private_data = (void *) wq; |
3130 | return r; |
3131 | } |
3132 | |
3133 | static s32 amvenc_avc_release(struct inode *inode, struct file *file) |
3134 | { |
3135 | struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; |
3136 | |
3137 | if (wq) { |
3138 | enc_pr(LOG_DEBUG, "avc release, wq:%p\n", (void *)wq); |
3139 | destroy_encode_work_queue(wq); |
3140 | } |
3141 | return 0; |
3142 | } |
3143 | |
3144 | static long amvenc_avc_ioctl(struct file *file, u32 cmd, ulong arg) |
3145 | { |
3146 | long r = 0; |
3147 | u32 amrisc_cmd = 0; |
3148 | struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; |
3149 | #define MAX_ADDR_INFO_SIZE 52 |
3150 | u32 addr_info[MAX_ADDR_INFO_SIZE + 4]; |
3151 | ulong argV; |
3152 | u32 buf_start; |
3153 | s32 canvas = -1; |
3154 | struct canvas_s dst; |
3155 | |
3156 | switch (cmd) { |
3157 | case AMVENC_AVC_IOC_GET_ADDR: |
3158 | if ((wq->mem.ref_buf_canvas & 0xff) == (ENC_CANVAS_OFFSET)) |
3159 | put_user(1, (u32 *)arg); |
3160 | else |
3161 | put_user(2, (u32 *)arg); |
3162 | break; |
3163 | case AMVENC_AVC_IOC_INPUT_UPDATE: |
3164 | break; |
3165 | case AMVENC_AVC_IOC_NEW_CMD: |
3166 | if (copy_from_user(addr_info, (void *)arg, |
3167 | MAX_ADDR_INFO_SIZE * sizeof(u32))) { |
3168 | enc_pr(LOG_ERROR, |
3169 | "avc get new cmd error, wq:%p.\n", (void *)wq); |
3170 | return -1; |
3171 | } |
3172 | r = convert_request(wq, addr_info); |
3173 | if (r == 0) |
3174 | r = encode_wq_add_request(wq); |
3175 | if (r) { |
3176 | enc_pr(LOG_ERROR, |
3177 | "avc add new request error, wq:%p.\n", |
3178 | (void *)wq); |
3179 | } |
3180 | break; |
3181 | case AMVENC_AVC_IOC_GET_STAGE: |
3182 | put_user(wq->hw_status, (u32 *)arg); |
3183 | break; |
3184 | case AMVENC_AVC_IOC_GET_OUTPUT_SIZE: |
3185 | addr_info[0] = wq->output_size; |
3186 | addr_info[1] = wq->me_weight; |
3187 | addr_info[2] = wq->i4_weight; |
3188 | addr_info[3] = wq->i16_weight; |
3189 | r = copy_to_user((u32 *)arg, |
3190 | addr_info, 4 * sizeof(u32)); |
3191 | break; |
3192 | case AMVENC_AVC_IOC_CONFIG_INIT: |
3193 | if (copy_from_user(addr_info, (void *)arg, |
3194 | MAX_ADDR_INFO_SIZE * sizeof(u32))) { |
3195 | enc_pr(LOG_ERROR, |
3196 | "avc config init error, wq:%p.\n", (void *)wq); |
3197 | return -1; |
3198 | } |
3199 | wq->ucode_index = UCODE_MODE_FULL; |
3200 | #ifdef MULTI_SLICE_MC |
3201 | wq->pic.rows_per_slice = addr_info[1]; |
3202 | enc_pr(LOG_DEBUG, |
3203 | "avc init -- rows_per_slice: %d, wq: %p.\n", |
3204 | wq->pic.rows_per_slice, (void *)wq); |
3205 | #endif |
3206 | enc_pr(LOG_DEBUG, |
3207 | "avc init as mode %d, wq: %p.\n", |
3208 | wq->ucode_index, (void *)wq); |
3209 | |
3210 | if (addr_info[2] > wq->mem.bufspec.max_width || |
3211 | addr_info[3] > wq->mem.bufspec.max_height) { |
3212 | enc_pr(LOG_ERROR, |
3213 | "avc config init- encode size %dx%d is larger than supported (%dx%d). wq:%p.\n", |
3214 | addr_info[2], addr_info[3], |
3215 | wq->mem.bufspec.max_width, |
3216 | wq->mem.bufspec.max_height, (void *)wq); |
3217 | return -1; |
3218 | } |
3219 | wq->pic.encoder_width = addr_info[2]; |
3220 | wq->pic.encoder_height = addr_info[3]; |
3221 | if (wq->pic.encoder_width * |
3222 | wq->pic.encoder_height >= 1280 * 720) |
3223 | clock_level = 6; |
3224 | else |
3225 | clock_level = 5; |
3226 | avc_buffspec_init(wq); |
3227 | complete(&encode_manager.event.request_in_com); |
3228 | addr_info[1] = wq->mem.bufspec.dct.buf_start; |
3229 | addr_info[2] = wq->mem.bufspec.dct.buf_size; |
3230 | addr_info[3] = wq->mem.bufspec.bitstream.buf_start; |
3231 | addr_info[4] = wq->mem.bufspec.bitstream.buf_size; |
3232 | addr_info[5] = wq->mem.bufspec.scale_buff.buf_start; |
3233 | addr_info[6] = wq->mem.bufspec.scale_buff.buf_size; |
3234 | addr_info[7] = wq->mem.bufspec.dump_info.buf_start; |
3235 | addr_info[8] = wq->mem.bufspec.dump_info.buf_size; |
3236 | addr_info[9] = wq->mem.bufspec.cbr_info.buf_start; |
3237 | addr_info[10] = wq->mem.bufspec.cbr_info.buf_size; |
3238 | r = copy_to_user((u32 *)arg, addr_info, 11*sizeof(u32)); |
3239 | break; |
3240 | case AMVENC_AVC_IOC_FLUSH_CACHE: |
3241 | if (copy_from_user(addr_info, (void *)arg, |
3242 | MAX_ADDR_INFO_SIZE * sizeof(u32))) { |
3243 | enc_pr(LOG_ERROR, |
3244 | "avc flush cache error, wq: %p.\n", (void *)wq); |
3245 | return -1; |
3246 | } |
3247 | buf_start = getbuffer(wq, addr_info[0]); |
3248 | dma_flush(buf_start + addr_info[1], |
3249 | addr_info[2] - addr_info[1]); |
3250 | break; |
3251 | case AMVENC_AVC_IOC_FLUSH_DMA: |
3252 | if (copy_from_user(addr_info, (void *)arg, |
3253 | MAX_ADDR_INFO_SIZE * sizeof(u32))) { |
3254 | enc_pr(LOG_ERROR, |
3255 | "avc flush dma error, wq:%p.\n", (void *)wq); |
3256 | return -1; |
3257 | } |
3258 | buf_start = getbuffer(wq, addr_info[0]); |
3259 | cache_flush(buf_start + addr_info[1], |
3260 | addr_info[2] - addr_info[1]); |
3261 | break; |
3262 | case AMVENC_AVC_IOC_GET_BUFFINFO: |
3263 | put_user(wq->mem.buf_size, (u32 *)arg); |
3264 | break; |
3265 | case AMVENC_AVC_IOC_GET_DEVINFO: |
3266 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXL) { |
3267 | /* send the same id as GXTVBB to upper*/ |
3268 | r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB, |
3269 | strlen(AMVENC_DEVINFO_GXTVBB)); |
3270 | } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXTVBB) { |
3271 | r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXTVBB, |
3272 | strlen(AMVENC_DEVINFO_GXTVBB)); |
3273 | } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_GXBB) { |
3274 | r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_GXBB, |
3275 | strlen(AMVENC_DEVINFO_GXBB)); |
3276 | } else if (get_cpu_type() == MESON_CPU_MAJOR_ID_MG9TV) { |
3277 | r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_G9, |
3278 | strlen(AMVENC_DEVINFO_G9)); |
3279 | } else { |
3280 | r = copy_to_user((s8 *)arg, AMVENC_DEVINFO_M8, |
3281 | strlen(AMVENC_DEVINFO_M8)); |
3282 | } |
3283 | break; |
3284 | case AMVENC_AVC_IOC_SUBMIT: |
3285 | get_user(amrisc_cmd, ((u32 *)arg)); |
3286 | if (amrisc_cmd == ENCODER_IDR) { |
3287 | wq->pic.idr_pic_id++; |
3288 | if (wq->pic.idr_pic_id > 65535) |
3289 | wq->pic.idr_pic_id = 0; |
3290 | wq->pic.pic_order_cnt_lsb = 2; |
3291 | wq->pic.frame_number = 1; |
3292 | } else if (amrisc_cmd == ENCODER_NON_IDR) { |
3293 | #ifdef H264_ENC_SVC |
3294 | /* only update when there is reference frame */ |
3295 | if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) { |
3296 | wq->pic.frame_number++; |
3297 | enc_pr(LOG_INFO, "Increase frame_num to %d\n", |
3298 | wq->pic.frame_number); |
3299 | } |
3300 | #else |
3301 | wq->pic.frame_number++; |
3302 | #endif |
3303 | |
3304 | wq->pic.pic_order_cnt_lsb += 2; |
3305 | if (wq->pic.frame_number > 65535) |
3306 | wq->pic.frame_number = 0; |
3307 | } |
3308 | #ifdef H264_ENC_SVC |
3309 | /* only update when there is reference frame */ |
3310 | if (wq->pic.enable_svc == 0 || wq->pic.non_ref_cnt == 0) { |
3311 | amrisc_cmd = wq->mem.dblk_buf_canvas; |
3312 | wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas; |
3313 | /* current dblk buffer as next reference buffer */ |
3314 | wq->mem.ref_buf_canvas = amrisc_cmd; |
3315 | enc_pr(LOG_INFO, |
3316 | "switch buffer enable %d cnt %d\n", |
3317 | wq->pic.enable_svc, wq->pic.non_ref_cnt); |
3318 | } |
3319 | if (wq->pic.enable_svc) { |
3320 | wq->pic.non_ref_cnt ++; |
3321 | if (wq->pic.non_ref_cnt > wq->pic.non_ref_limit) { |
3322 | enc_pr(LOG_INFO, "Svc clear cnt %d conf %d\n", |
3323 | wq->pic.non_ref_cnt, |
3324 | wq->pic.non_ref_limit); |
3325 | wq->pic.non_ref_cnt = 0; |
3326 | } else |
3327 | enc_pr(LOG_INFO,"Svc increase non ref counter to %d\n", |
3328 | wq->pic.non_ref_cnt ); |
3329 | } |
3330 | #else |
3331 | amrisc_cmd = wq->mem.dblk_buf_canvas; |
3332 | wq->mem.dblk_buf_canvas = wq->mem.ref_buf_canvas; |
3333 | /* current dblk buffer as next reference buffer */ |
3334 | wq->mem.ref_buf_canvas = amrisc_cmd; |
3335 | #endif |
3336 | break; |
3337 | case AMVENC_AVC_IOC_READ_CANVAS: |
3338 | get_user(argV, ((u32 *)arg)); |
3339 | canvas = argV; |
3340 | if (canvas & 0xff) { |
3341 | canvas_read(canvas & 0xff, &dst); |
3342 | addr_info[0] = dst.addr; |
3343 | if ((canvas & 0xff00) >> 8) |
3344 | canvas_read((canvas & 0xff00) >> 8, &dst); |
3345 | if ((canvas & 0xff0000) >> 16) |
3346 | canvas_read((canvas & 0xff0000) >> 16, &dst); |
3347 | addr_info[1] = dst.addr - addr_info[0] + |
3348 | dst.width * dst.height; |
3349 | } else { |
3350 | addr_info[0] = 0; |
3351 | addr_info[1] = 0; |
3352 | } |
3353 | dma_flush(dst.addr, dst.width * dst.height * 3 / 2); |
3354 | r = copy_to_user((u32 *)arg, addr_info, 2 * sizeof(u32)); |
3355 | break; |
3356 | case AMVENC_AVC_IOC_MAX_INSTANCE: |
3357 | put_user(encode_manager.max_instance, (u32 *)arg); |
3358 | break; |
3359 | case AMVENC_AVC_IOC_QP_MODE: |
3360 | get_user(qp_mode, ((u32 *)arg)); |
3361 | pr_info("qp_mode %d\n", qp_mode); |
3362 | break; |
3363 | default: |
3364 | r = -1; |
3365 | break; |
3366 | } |
3367 | return r; |
3368 | } |
3369 | |
3370 | #ifdef CONFIG_COMPAT |
3371 | static long amvenc_avc_compat_ioctl(struct file *filp, |
3372 | unsigned int cmd, unsigned long args) |
3373 | { |
3374 | unsigned long ret; |
3375 | |
3376 | args = (unsigned long)compat_ptr(args); |
3377 | ret = amvenc_avc_ioctl(filp, cmd, args); |
3378 | return ret; |
3379 | } |
3380 | #endif |
3381 | |
3382 | static s32 avc_mmap(struct file *filp, struct vm_area_struct *vma) |
3383 | { |
3384 | struct encode_wq_s *wq = (struct encode_wq_s *)filp->private_data; |
3385 | ulong off = vma->vm_pgoff << PAGE_SHIFT; |
3386 | ulong vma_size = vma->vm_end - vma->vm_start; |
3387 | |
3388 | if (vma_size == 0) { |
3389 | enc_pr(LOG_ERROR, "vma_size is 0, wq:%p.\n", (void *)wq); |
3390 | return -EAGAIN; |
3391 | } |
3392 | if (!off) |
3393 | off += wq->mem.buf_start; |
3394 | enc_pr(LOG_ALL, |
3395 | "vma_size is %ld , off is %ld, wq:%p.\n", |
3396 | vma_size, off, (void *)wq); |
3397 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; |
3398 | /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ |
3399 | if (remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, |
3400 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) { |
3401 | enc_pr(LOG_ERROR, |
3402 | "set_cached: failed remap_pfn_range, wq:%p.\n", |
3403 | (void *)wq); |
3404 | return -EAGAIN; |
3405 | } |
3406 | return 0; |
3407 | } |
3408 | |
3409 | static u32 amvenc_avc_poll(struct file *file, poll_table *wait_table) |
3410 | { |
3411 | struct encode_wq_s *wq = (struct encode_wq_s *)file->private_data; |
3412 | |
3413 | poll_wait(file, &wq->request_complete, wait_table); |
3414 | |
3415 | if (atomic_read(&wq->request_ready)) { |
3416 | atomic_dec(&wq->request_ready); |
3417 | return POLLIN | POLLRDNORM; |
3418 | } |
3419 | return 0; |
3420 | } |
3421 | |
3422 | static const struct file_operations amvenc_avc_fops = { |
3423 | .owner = THIS_MODULE, |
3424 | .open = amvenc_avc_open, |
3425 | .mmap = avc_mmap, |
3426 | .release = amvenc_avc_release, |
3427 | .unlocked_ioctl = amvenc_avc_ioctl, |
3428 | #ifdef CONFIG_COMPAT |
3429 | .compat_ioctl = amvenc_avc_compat_ioctl, |
3430 | #endif |
3431 | .poll = amvenc_avc_poll, |
3432 | }; |
3433 | |
3434 | /* work queue function */ |
3435 | static s32 encode_process_request(struct encode_manager_s *manager, |
3436 | struct encode_queue_item_s *pitem) |
3437 | { |
3438 | s32 ret = 0; |
3439 | struct encode_wq_s *wq = pitem->request.parent; |
3440 | struct encode_request_s *request = &pitem->request; |
3441 | u32 timeout = (request->timeout == 0) ? |
3442 | 1 : msecs_to_jiffies(request->timeout); |
3443 | u32 buf_start = 0; |
3444 | u32 size = 0; |
3445 | u32 flush_size = ((wq->pic.encoder_width + 31) >> 5 << 5) * |
3446 | ((wq->pic.encoder_height + 15) >> 4 << 4) * 3 / 2; |
3447 | |
3448 | struct enc_dma_cfg *cfg = NULL; |
3449 | int i = 0; |
3450 | |
3451 | #ifdef H264_ENC_CBR |
3452 | if (request->cmd == ENCODER_IDR || request->cmd == ENCODER_NON_IDR) { |
3453 | if (request->flush_flag & AMVENC_FLUSH_FLAG_CBR |
3454 | && get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
3455 | void *vaddr = wq->mem.cbr_info_ddr_virt_addr; |
3456 | ConvertTable2Risc(vaddr, 0xa00); |
3457 | buf_start = getbuffer(wq, ENCODER_BUFFER_CBR); |
3458 | codec_mm_dma_flush(vaddr, wq->mem.cbr_info_ddr_size, DMA_TO_DEVICE); |
3459 | } |
3460 | } |
3461 | #endif |
3462 | |
3463 | Again: |
3464 | amvenc_avc_start_cmd(wq, request); |
3465 | |
3466 | if (no_timeout) { |
3467 | wait_event_interruptible(manager->event.hw_complete, |
3468 | (manager->encode_hw_status == ENCODER_IDR_DONE |
3469 | || manager->encode_hw_status == ENCODER_NON_IDR_DONE |
3470 | || manager->encode_hw_status == ENCODER_SEQUENCE_DONE |
3471 | || manager->encode_hw_status == ENCODER_PICTURE_DONE)); |
3472 | } else { |
3473 | wait_event_interruptible_timeout(manager->event.hw_complete, |
3474 | ((manager->encode_hw_status == ENCODER_IDR_DONE) |
3475 | || (manager->encode_hw_status == ENCODER_NON_IDR_DONE) |
3476 | || (manager->encode_hw_status == ENCODER_SEQUENCE_DONE) |
3477 | || (manager->encode_hw_status == ENCODER_PICTURE_DONE)), |
3478 | timeout); |
3479 | } |
3480 | |
3481 | if ((request->cmd == ENCODER_SEQUENCE) && |
3482 | (manager->encode_hw_status == ENCODER_SEQUENCE_DONE)) { |
3483 | wq->sps_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES); |
3484 | wq->hw_status = manager->encode_hw_status; |
3485 | request->cmd = ENCODER_PICTURE; |
3486 | goto Again; |
3487 | } else if ((request->cmd == ENCODER_PICTURE) && |
3488 | (manager->encode_hw_status == ENCODER_PICTURE_DONE)) { |
3489 | wq->pps_size = |
3490 | READ_HREG(HCODEC_VLC_TOTAL_BYTES) - wq->sps_size; |
3491 | wq->hw_status = manager->encode_hw_status; |
3492 | if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) { |
3493 | buf_start = getbuffer(wq, ENCODER_BUFFER_OUTPUT); |
3494 | cache_flush(buf_start, |
3495 | wq->sps_size + wq->pps_size); |
3496 | } |
3497 | wq->output_size = (wq->sps_size << 16) | wq->pps_size; |
3498 | } else { |
3499 | wq->hw_status = manager->encode_hw_status; |
3500 | if ((manager->encode_hw_status == ENCODER_IDR_DONE) || |
3501 | (manager->encode_hw_status == ENCODER_NON_IDR_DONE)) { |
3502 | wq->output_size = READ_HREG(HCODEC_VLC_TOTAL_BYTES); |
3503 | if (request->flush_flag & AMVENC_FLUSH_FLAG_OUTPUT) { |
3504 | buf_start = getbuffer(wq, |
3505 | ENCODER_BUFFER_OUTPUT); |
3506 | cache_flush(buf_start, wq->output_size); |
3507 | } |
3508 | if (request->flush_flag & |
3509 | AMVENC_FLUSH_FLAG_DUMP) { |
3510 | buf_start = getbuffer(wq, |
3511 | ENCODER_BUFFER_DUMP); |
3512 | size = wq->mem.dump_info_ddr_size; |
3513 | cache_flush(buf_start, size); |
3514 | //enc_pr(LOG_DEBUG, "CBR flush dump_info done"); |
3515 | } |
3516 | if (request->flush_flag & |
3517 | AMVENC_FLUSH_FLAG_REFERENCE) { |
3518 | u32 ref_id = ENCODER_BUFFER_REF0; |
3519 | |
3520 | if ((wq->mem.ref_buf_canvas & 0xff) == |
3521 | (ENC_CANVAS_OFFSET)) |
3522 | ref_id = ENCODER_BUFFER_REF0; |
3523 | else |
3524 | ref_id = ENCODER_BUFFER_REF1; |
3525 | buf_start = getbuffer(wq, ref_id); |
3526 | cache_flush(buf_start, flush_size); |
3527 | } |
3528 | } else { |
3529 | manager->encode_hw_status = ENCODER_ERROR; |
3530 | enc_pr(LOG_DEBUG, "avc encode light reset --- "); |
3531 | enc_pr(LOG_DEBUG, |
3532 | "frame type: %s, size: %dx%d, wq: %p\n", |
3533 | (request->cmd == ENCODER_IDR) ? "IDR" : "P", |
3534 | wq->pic.encoder_width, |
3535 | wq->pic.encoder_height, (void *)wq); |
3536 | enc_pr(LOG_DEBUG, |
3537 | "mb info: 0x%x, encode status: 0x%x, dct status: 0x%x ", |
3538 | READ_HREG(HCODEC_VLC_MB_INFO), |
3539 | READ_HREG(ENCODER_STATUS), |
3540 | READ_HREG(HCODEC_QDCT_STATUS_CTRL)); |
3541 | enc_pr(LOG_DEBUG, |
3542 | "vlc status: 0x%x, me status: 0x%x, risc pc:0x%x, debug:0x%x\n", |
3543 | READ_HREG(HCODEC_VLC_STATUS_CTRL), |
3544 | READ_HREG(HCODEC_ME_STATUS), |
3545 | READ_HREG(HCODEC_MPC_E), |
3546 | READ_HREG(DEBUG_REG)); |
3547 | amvenc_avc_light_reset(wq, 30); |
3548 | } |
3549 | for (i = 0; i < request->plane_num; i++) { |
3550 | cfg = &request->dma_cfg[i]; |
3551 | enc_pr(LOG_INFO, "request vaddr %p, paddr %p\n", |
3552 | cfg->vaddr, cfg->paddr); |
3553 | if (cfg->fd >= 0 && cfg->vaddr != NULL) |
3554 | enc_dma_buf_unmap(cfg); |
3555 | } |
3556 | } |
3557 | atomic_inc(&wq->request_ready); |
3558 | wake_up_interruptible(&wq->request_complete); |
3559 | return ret; |
3560 | } |
3561 | |
3562 | s32 encode_wq_add_request(struct encode_wq_s *wq) |
3563 | { |
3564 | struct encode_queue_item_s *pitem = NULL; |
3565 | struct list_head *head = NULL; |
3566 | struct encode_wq_s *tmp = NULL; |
3567 | bool find = false; |
3568 | |
3569 | spin_lock(&encode_manager.event.sem_lock); |
3570 | |
3571 | head = &encode_manager.wq; |
3572 | list_for_each_entry(tmp, head, list) { |
3573 | if ((wq == tmp) && (wq != NULL)) { |
3574 | find = true; |
3575 | break; |
3576 | } |
3577 | } |
3578 | |
3579 | if (find == false) { |
3580 | enc_pr(LOG_ERROR, "current wq (%p) doesn't register.\n", |
3581 | (void *)wq); |
3582 | goto error; |
3583 | } |
3584 | |
3585 | if (list_empty(&encode_manager.free_queue)) { |
3586 | enc_pr(LOG_ERROR, "work queue no space, wq:%p.\n", |
3587 | (void *)wq); |
3588 | goto error; |
3589 | } |
3590 | |
3591 | pitem = list_entry(encode_manager.free_queue.next, |
3592 | struct encode_queue_item_s, list); |
3593 | if (IS_ERR(pitem)) |
3594 | goto error; |
3595 | |
3596 | memcpy(&pitem->request, &wq->request, sizeof(struct encode_request_s)); |
3597 | |
3598 | enc_pr(LOG_INFO, "new work request %p, vaddr %p, paddr %p\n", &pitem->request, |
3599 | pitem->request.dma_cfg[0].vaddr,pitem->request.dma_cfg[0].paddr); |
3600 | |
3601 | memset(&wq->request, 0, sizeof(struct encode_request_s)); |
3602 | wq->request.dma_cfg[0].fd = -1; |
3603 | wq->request.dma_cfg[1].fd = -1; |
3604 | wq->request.dma_cfg[2].fd = -1; |
3605 | wq->hw_status = 0; |
3606 | wq->output_size = 0; |
3607 | pitem->request.parent = wq; |
3608 | list_move_tail(&pitem->list, &encode_manager.process_queue); |
3609 | spin_unlock(&encode_manager.event.sem_lock); |
3610 | |
3611 | enc_pr(LOG_INFO, |
3612 | "add new work ok, cmd:%d, ucode mode: %d, wq:%p.\n", |
3613 | pitem->request.cmd, pitem->request.ucode_mode, |
3614 | (void *)wq); |
3615 | complete(&encode_manager.event.request_in_com);/* new cmd come in */ |
3616 | return 0; |
3617 | error: |
3618 | spin_unlock(&encode_manager.event.sem_lock); |
3619 | return -1; |
3620 | } |
3621 | |
3622 | struct encode_wq_s *create_encode_work_queue(void) |
3623 | { |
3624 | struct encode_wq_s *encode_work_queue = NULL; |
3625 | bool done = false; |
3626 | u32 i, max_instance; |
3627 | struct Buff_s *reserve_buff; |
3628 | |
3629 | encode_work_queue = kzalloc(sizeof(struct encode_wq_s), GFP_KERNEL); |
3630 | if (IS_ERR(encode_work_queue)) { |
3631 | enc_pr(LOG_ERROR, "can't create work queue\n"); |
3632 | return NULL; |
3633 | } |
3634 | max_instance = encode_manager.max_instance; |
3635 | encode_work_queue->pic.init_qppicture = 26; |
3636 | encode_work_queue->pic.log2_max_frame_num = 4; |
3637 | encode_work_queue->pic.log2_max_pic_order_cnt_lsb = 4; |
3638 | encode_work_queue->pic.idr_pic_id = 0; |
3639 | encode_work_queue->pic.frame_number = 0; |
3640 | encode_work_queue->pic.pic_order_cnt_lsb = 0; |
3641 | #ifdef H264_ENC_SVC |
3642 | /* Get settings from the global*/ |
3643 | encode_work_queue->pic.enable_svc = svc_enable; |
3644 | encode_work_queue->pic.non_ref_limit = svc_ref_conf; |
3645 | encode_work_queue->pic.non_ref_cnt = 0; |
3646 | enc_pr(LOG_INFO, "svc conf enable %d, duration %d\n", |
3647 | encode_work_queue->pic.enable_svc, |
3648 | encode_work_queue->pic.non_ref_limit); |
3649 | #endif |
3650 | encode_work_queue->ucode_index = UCODE_MODE_FULL; |
3651 | |
3652 | #ifdef H264_ENC_CBR |
3653 | encode_work_queue->cbr_info.block_w = 16; |
3654 | encode_work_queue->cbr_info.block_h = 9; |
3655 | encode_work_queue->cbr_info.long_th = CBR_LONG_THRESH; |
3656 | encode_work_queue->cbr_info.start_tbl_id = START_TABLE_ID; |
3657 | encode_work_queue->cbr_info.short_shift = CBR_SHORT_SHIFT; |
3658 | encode_work_queue->cbr_info.long_mb_num = CBR_LONG_MB_NUM; |
3659 | #endif |
3660 | init_waitqueue_head(&encode_work_queue->request_complete); |
3661 | atomic_set(&encode_work_queue->request_ready, 0); |
3662 | spin_lock(&encode_manager.event.sem_lock); |
3663 | if (encode_manager.wq_count < encode_manager.max_instance) { |
3664 | list_add_tail(&encode_work_queue->list, &encode_manager.wq); |
3665 | encode_manager.wq_count++; |
3666 | if (encode_manager.use_reserve == true) { |
3667 | for (i = 0; i < max_instance; i++) { |
3668 | reserve_buff = &encode_manager.reserve_buff[i]; |
3669 | if (reserve_buff->used == false) { |
3670 | encode_work_queue->mem.buf_start = |
3671 | reserve_buff->buf_start; |
3672 | encode_work_queue->mem.buf_size = |
3673 | reserve_buff->buf_size; |
3674 | reserve_buff->used = true; |
3675 | done = true; |
3676 | break; |
3677 | } |
3678 | } |
3679 | } else |
3680 | done = true; |
3681 | } |
3682 | spin_unlock(&encode_manager.event.sem_lock); |
3683 | if (done == false) { |
3684 | kfree(encode_work_queue); |
3685 | encode_work_queue = NULL; |
3686 | enc_pr(LOG_ERROR, "too many work queue!\n"); |
3687 | } |
3688 | return encode_work_queue; /* find it */ |
3689 | } |
3690 | |
3691 | static void _destroy_encode_work_queue(struct encode_manager_s *manager, |
3692 | struct encode_wq_s **wq, |
3693 | struct encode_wq_s *encode_work_queue, |
3694 | bool *find) |
3695 | { |
3696 | struct list_head *head; |
3697 | struct encode_wq_s *wp_tmp = NULL; |
3698 | u32 i, max_instance; |
3699 | struct Buff_s *reserve_buff; |
3700 | u32 buf_start = encode_work_queue->mem.buf_start; |
3701 | |
3702 | max_instance = manager->max_instance; |
3703 | head = &manager->wq; |
3704 | list_for_each_entry_safe((*wq), wp_tmp, head, list) { |
3705 | if ((*wq) && (*wq == encode_work_queue)) { |
3706 | list_del(&(*wq)->list); |
3707 | if (manager->use_reserve == true) { |
3708 | for (i = 0; i < max_instance; i++) { |
3709 | reserve_buff = |
3710 | &manager->reserve_buff[i]; |
3711 | if (reserve_buff->used == true && |
3712 | buf_start == |
3713 | reserve_buff->buf_start) { |
3714 | reserve_buff->used = false; |
3715 | break; |
3716 | } |
3717 | } |
3718 | } |
3719 | *find = true; |
3720 | manager->wq_count--; |
3721 | enc_pr(LOG_DEBUG, |
3722 | "remove encode_work_queue %p success, %s line %d.\n", |
3723 | (void *)encode_work_queue, |
3724 | __func__, __LINE__); |
3725 | break; |
3726 | } |
3727 | } |
3728 | } |
3729 | |
3730 | s32 destroy_encode_work_queue(struct encode_wq_s *encode_work_queue) |
3731 | { |
3732 | struct encode_queue_item_s *pitem, *tmp; |
3733 | struct encode_wq_s *wq = NULL; |
3734 | bool find = false; |
3735 | |
3736 | struct list_head *head; |
3737 | |
3738 | if (encode_work_queue) { |
3739 | spin_lock(&encode_manager.event.sem_lock); |
3740 | if (encode_manager.current_wq == encode_work_queue) { |
3741 | encode_manager.remove_flag = true; |
3742 | spin_unlock(&encode_manager.event.sem_lock); |
3743 | enc_pr(LOG_DEBUG, |
3744 | "warning--Destroy the running queue, should not be here.\n"); |
3745 | wait_for_completion( |
3746 | &encode_manager.event.process_complete); |
3747 | spin_lock(&encode_manager.event.sem_lock); |
3748 | } /* else we can delete it safely. */ |
3749 | |
3750 | head = &encode_manager.process_queue; |
3751 | list_for_each_entry_safe(pitem, tmp, head, list) { |
3752 | if (pitem && pitem->request.parent == |
3753 | encode_work_queue) { |
3754 | pitem->request.parent = NULL; |
3755 | enc_pr(LOG_DEBUG, |
3756 | "warning--remove not process request, should not be here.\n"); |
3757 | list_move_tail(&pitem->list, |
3758 | &encode_manager.free_queue); |
3759 | } |
3760 | } |
3761 | |
3762 | _destroy_encode_work_queue(&encode_manager, &wq, |
3763 | encode_work_queue, &find); |
3764 | spin_unlock(&encode_manager.event.sem_lock); |
3765 | #ifdef CONFIG_CMA |
3766 | if (encode_work_queue->mem.buf_start) { |
3767 | if (wq->mem.cbr_info_ddr_virt_addr != NULL) { |
3768 | codec_mm_unmap_phyaddr(wq->mem.cbr_info_ddr_virt_addr); |
3769 | wq->mem.cbr_info_ddr_virt_addr = NULL; |
3770 | } |
3771 | codec_mm_free_for_dma( |
3772 | ENCODE_NAME, |
3773 | encode_work_queue->mem.buf_start); |
3774 | encode_work_queue->mem.buf_start = 0; |
3775 | |
3776 | } |
3777 | #endif |
3778 | kfree(encode_work_queue); |
3779 | complete(&encode_manager.event.request_in_com); |
3780 | } |
3781 | return 0; |
3782 | } |
3783 | |
3784 | static s32 encode_monitor_thread(void *data) |
3785 | { |
3786 | struct encode_manager_s *manager = (struct encode_manager_s *)data; |
3787 | struct encode_queue_item_s *pitem = NULL; |
3788 | struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 }; |
3789 | s32 ret = 0; |
3790 | |
3791 | enc_pr(LOG_DEBUG, "encode workqueue monitor start.\n"); |
3792 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
3793 | allow_signal(SIGTERM); |
3794 | /* setup current_wq here. */ |
3795 | while (manager->process_queue_state != ENCODE_PROCESS_QUEUE_STOP) { |
3796 | if (kthread_should_stop()) |
3797 | break; |
3798 | |
3799 | ret = wait_for_completion_interruptible( |
3800 | &manager->event.request_in_com); |
3801 | |
3802 | if (ret == -ERESTARTSYS) |
3803 | break; |
3804 | |
3805 | if (kthread_should_stop()) |
3806 | break; |
3807 | if (manager->inited == false) { |
3808 | spin_lock(&manager->event.sem_lock); |
3809 | if (!list_empty(&manager->wq)) { |
3810 | struct encode_wq_s *first_wq = |
3811 | list_entry(manager->wq.next, |
3812 | struct encode_wq_s, list); |
3813 | manager->current_wq = first_wq; |
3814 | spin_unlock(&manager->event.sem_lock); |
3815 | if (first_wq) { |
3816 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
3817 | if (!manager->context) |
3818 | manager->context = |
3819 | create_ge2d_work_queue(); |
3820 | #endif |
3821 | avc_init(first_wq); |
3822 | manager->inited = true; |
3823 | } |
3824 | spin_lock(&manager->event.sem_lock); |
3825 | manager->current_wq = NULL; |
3826 | spin_unlock(&manager->event.sem_lock); |
3827 | if (manager->remove_flag) { |
3828 | complete( |
3829 | &manager |
3830 | ->event.process_complete); |
3831 | manager->remove_flag = false; |
3832 | } |
3833 | } else |
3834 | spin_unlock(&manager->event.sem_lock); |
3835 | continue; |
3836 | } |
3837 | |
3838 | spin_lock(&manager->event.sem_lock); |
3839 | pitem = NULL; |
3840 | if (list_empty(&manager->wq)) { |
3841 | spin_unlock(&manager->event.sem_lock); |
3842 | manager->inited = false; |
3843 | amvenc_avc_stop(); |
3844 | #ifdef CONFIG_AMLOGIC_MEDIA_GE2D |
3845 | if (manager->context) { |
3846 | destroy_ge2d_work_queue(manager->context); |
3847 | manager->context = NULL; |
3848 | } |
3849 | #endif |
3850 | enc_pr(LOG_DEBUG, "power off encode.\n"); |
3851 | continue; |
3852 | } else if (!list_empty(&manager->process_queue)) { |
3853 | pitem = list_entry(manager->process_queue.next, |
3854 | struct encode_queue_item_s, list); |
3855 | list_del(&pitem->list); |
3856 | manager->current_item = pitem; |
3857 | manager->current_wq = pitem->request.parent; |
3858 | } |
3859 | spin_unlock(&manager->event.sem_lock); |
3860 | |
3861 | if (pitem) { |
3862 | encode_process_request(manager, pitem); |
3863 | spin_lock(&manager->event.sem_lock); |
3864 | list_add_tail(&pitem->list, &manager->free_queue); |
3865 | manager->current_item = NULL; |
3866 | manager->last_wq = manager->current_wq; |
3867 | manager->current_wq = NULL; |
3868 | spin_unlock(&manager->event.sem_lock); |
3869 | } |
3870 | if (manager->remove_flag) { |
3871 | complete(&manager->event.process_complete); |
3872 | manager->remove_flag = false; |
3873 | } |
3874 | } |
3875 | while (!kthread_should_stop()) |
3876 | msleep(20); |
3877 | |
3878 | enc_pr(LOG_DEBUG, "exit encode_monitor_thread.\n"); |
3879 | return 0; |
3880 | } |
3881 | |
3882 | static s32 encode_start_monitor(void) |
3883 | { |
3884 | s32 ret = 0; |
3885 | |
3886 | if (get_cpu_type() >= MESON_CPU_MAJOR_ID_GXTVBB) { |
3887 | y_tnr_mot2alp_nrm_gain = 216; |
3888 | y_tnr_mot2alp_dis_gain = 144; |
3889 | c_tnr_mot2alp_nrm_gain = 216; |
3890 | c_tnr_mot2alp_dis_gain = 144; |
3891 | } else { |
3892 | /* more tnr */ |
3893 | y_tnr_mot2alp_nrm_gain = 144; |
3894 | y_tnr_mot2alp_dis_gain = 96; |
3895 | c_tnr_mot2alp_nrm_gain = 144; |
3896 | c_tnr_mot2alp_dis_gain = 96; |
3897 | } |
3898 | |
3899 | enc_pr(LOG_DEBUG, "encode start monitor.\n"); |
3900 | encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_START; |
3901 | encode_manager.encode_thread = kthread_run(encode_monitor_thread, |
3902 | &encode_manager, "encode_monitor"); |
3903 | if (IS_ERR(encode_manager.encode_thread)) { |
3904 | ret = PTR_ERR(encode_manager.encode_thread); |
3905 | encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP; |
3906 | enc_pr(LOG_ERROR, |
3907 | "encode monitor : failed to start kthread (%d)\n", ret); |
3908 | } |
3909 | return ret; |
3910 | } |
3911 | |
3912 | static s32 encode_stop_monitor(void) |
3913 | { |
3914 | enc_pr(LOG_DEBUG, "stop encode monitor thread\n"); |
3915 | if (encode_manager.encode_thread) { |
3916 | spin_lock(&encode_manager.event.sem_lock); |
3917 | if (!list_empty(&encode_manager.wq)) { |
3918 | u32 count = encode_manager.wq_count; |
3919 | |
3920 | spin_unlock(&encode_manager.event.sem_lock); |
3921 | enc_pr(LOG_ERROR, |
3922 | "stop encode monitor thread error, active wq (%d) is not 0.\n", |
3923 | count); |
3924 | return -1; |
3925 | } |
3926 | spin_unlock(&encode_manager.event.sem_lock); |
3927 | encode_manager.process_queue_state = ENCODE_PROCESS_QUEUE_STOP; |
3928 | send_sig(SIGTERM, encode_manager.encode_thread, 1); |
3929 | complete(&encode_manager.event.request_in_com); |
3930 | kthread_stop(encode_manager.encode_thread); |
3931 | encode_manager.encode_thread = NULL; |
3932 | kfree(mc_addr); |
3933 | mc_addr = NULL; |
3934 | } |
3935 | return 0; |
3936 | } |
3937 | |
3938 | static s32 encode_wq_init(void) |
3939 | { |
3940 | u32 i = 0; |
3941 | struct encode_queue_item_s *pitem = NULL; |
3942 | |
3943 | enc_pr(LOG_DEBUG, "encode_wq_init.\n"); |
3944 | encode_manager.irq_requested = false; |
3945 | |
3946 | spin_lock_init(&encode_manager.event.sem_lock); |
3947 | init_completion(&encode_manager.event.request_in_com); |
3948 | init_waitqueue_head(&encode_manager.event.hw_complete); |
3949 | init_completion(&encode_manager.event.process_complete); |
3950 | INIT_LIST_HEAD(&encode_manager.process_queue); |
3951 | INIT_LIST_HEAD(&encode_manager.free_queue); |
3952 | INIT_LIST_HEAD(&encode_manager.wq); |
3953 | |
3954 | tasklet_init(&encode_manager.encode_tasklet, |
3955 | encode_isr_tasklet, |
3956 | (ulong)&encode_manager); |
3957 | |
3958 | for (i = 0; i < MAX_ENCODE_REQUEST; i++) { |
3959 | pitem = kcalloc(1, |
3960 | sizeof(struct encode_queue_item_s), |
3961 | GFP_KERNEL); |
3962 | if (IS_ERR(pitem)) { |
3963 | enc_pr(LOG_ERROR, "can't request queue item memory.\n"); |
3964 | return -1; |
3965 | } |
3966 | pitem->request.parent = NULL; |
3967 | list_add_tail(&pitem->list, &encode_manager.free_queue); |
3968 | } |
3969 | encode_manager.current_wq = NULL; |
3970 | encode_manager.last_wq = NULL; |
3971 | encode_manager.encode_thread = NULL; |
3972 | encode_manager.current_item = NULL; |
3973 | encode_manager.wq_count = 0; |
3974 | encode_manager.remove_flag = false; |
3975 | InitEncodeWeight(); |
3976 | if (encode_start_monitor()) { |
3977 | enc_pr(LOG_ERROR, "encode create thread error.\n"); |
3978 | return -1; |
3979 | } |
3980 | return 0; |
3981 | } |
3982 | |
3983 | static s32 encode_wq_uninit(void) |
3984 | { |
3985 | struct encode_queue_item_s *pitem, *tmp; |
3986 | struct list_head *head; |
3987 | u32 count = 0; |
3988 | s32 r = -1; |
3989 | |
3990 | enc_pr(LOG_DEBUG, "uninit encode wq.\n"); |
3991 | if (encode_stop_monitor() == 0) { |
3992 | if ((encode_manager.irq_num >= 0) && |
3993 | (encode_manager.irq_requested == true)) { |
3994 | free_irq(encode_manager.irq_num, &encode_manager); |
3995 | encode_manager.irq_requested = false; |
3996 | } |
3997 | spin_lock(&encode_manager.event.sem_lock); |
3998 | head = &encode_manager.process_queue; |
3999 | list_for_each_entry_safe(pitem, tmp, head, list) { |
4000 | if (pitem) { |
4001 | list_del(&pitem->list); |
4002 | kfree(pitem); |
4003 | count++; |
4004 | } |
4005 | } |
4006 | head = &encode_manager.free_queue; |
4007 | list_for_each_entry_safe(pitem, tmp, head, list) { |
4008 | if (pitem) { |
4009 | list_del(&pitem->list); |
4010 | kfree(pitem); |
4011 | count++; |
4012 | } |
4013 | } |
4014 | spin_unlock(&encode_manager.event.sem_lock); |
4015 | if (count == MAX_ENCODE_REQUEST) |
4016 | r = 0; |
4017 | else { |
4018 | enc_pr(LOG_ERROR, "lost some request item %d.\n", |
4019 | MAX_ENCODE_REQUEST - count); |
4020 | } |
4021 | } |
4022 | return r; |
4023 | } |
4024 | |
4025 | static ssize_t encode_status_show(struct class *cla, |
4026 | struct class_attribute *attr, char *buf) |
4027 | { |
4028 | u32 process_count = 0; |
4029 | u32 free_count = 0; |
4030 | struct encode_queue_item_s *pitem = NULL; |
4031 | struct encode_wq_s *current_wq = NULL; |
4032 | struct encode_wq_s *last_wq = NULL; |
4033 | struct list_head *head = NULL; |
4034 | s32 irq_num = 0; |
4035 | u32 hw_status = 0; |
4036 | u32 process_queue_state = 0; |
4037 | u32 wq_count = 0; |
4038 | u32 ucode_index; |
4039 | bool need_reset; |
4040 | bool process_irq; |
4041 | bool inited; |
4042 | bool use_reserve; |
4043 | struct Buff_s reserve_mem; |
4044 | u32 max_instance; |
4045 | #ifdef CONFIG_CMA |
4046 | bool check_cma = false; |
4047 | #endif |
4048 | |
4049 | spin_lock(&encode_manager.event.sem_lock); |
4050 | head = &encode_manager.free_queue; |
4051 | list_for_each_entry(pitem, head, list) { |
4052 | free_count++; |
4053 | if (free_count > MAX_ENCODE_REQUEST) |
4054 | break; |
4055 | } |
4056 | |
4057 | head = &encode_manager.process_queue; |
4058 | list_for_each_entry(pitem, head, list) { |
4059 | process_count++; |
4060 | if (free_count > MAX_ENCODE_REQUEST) |
4061 | break; |
4062 | } |
4063 | |
4064 | current_wq = encode_manager.current_wq; |
4065 | last_wq = encode_manager.last_wq; |
4066 | pitem = encode_manager.current_item; |
4067 | irq_num = encode_manager.irq_num; |
4068 | hw_status = encode_manager.encode_hw_status; |
4069 | process_queue_state = encode_manager.process_queue_state; |
4070 | wq_count = encode_manager.wq_count; |
4071 | ucode_index = encode_manager.ucode_index; |
4072 | need_reset = encode_manager.need_reset; |
4073 | process_irq = encode_manager.process_irq; |
4074 | inited = encode_manager.inited; |
4075 | use_reserve = encode_manager.use_reserve; |
4076 | reserve_mem.buf_start = encode_manager.reserve_mem.buf_start; |
4077 | reserve_mem.buf_size = encode_manager.reserve_mem.buf_size; |
4078 | |
4079 | max_instance = encode_manager.max_instance; |
4080 | #ifdef CONFIG_CMA |
4081 | check_cma = encode_manager.check_cma; |
4082 | #endif |
4083 | |
4084 | spin_unlock(&encode_manager.event.sem_lock); |
4085 | |
4086 | enc_pr(LOG_DEBUG, |
4087 | "encode process queue count: %d, free queue count: %d.\n", |
4088 | process_count, free_count); |
4089 | enc_pr(LOG_DEBUG, |
4090 | "encode curent wq: %p, last wq: %p, wq count: %d, max_instance: %d.\n", |
4091 | current_wq, last_wq, wq_count, max_instance); |
4092 | if (current_wq) |
4093 | enc_pr(LOG_DEBUG, |
4094 | "encode curent wq -- encode width: %d, encode height: %d.\n", |
4095 | current_wq->pic.encoder_width, |
4096 | current_wq->pic.encoder_height); |
4097 | enc_pr(LOG_DEBUG, |
4098 | "encode curent pitem: %p, ucode_index: %d, hw_status: %d, need_reset: %s, process_irq: %s.\n", |
4099 | pitem, ucode_index, hw_status, need_reset ? "true" : "false", |
4100 | process_irq ? "true" : "false"); |
4101 | enc_pr(LOG_DEBUG, |
4102 | "encode irq num: %d, inited: %s, process_queue_state: %d.\n", |
4103 | irq_num, inited ? "true" : "false", process_queue_state); |
4104 | if (use_reserve) { |
4105 | enc_pr(LOG_DEBUG, |
4106 | "encode use reserve memory, buffer start: 0x%x, size: %d MB.\n", |
4107 | reserve_mem.buf_start, |
4108 | reserve_mem.buf_size / SZ_1M); |
4109 | } else { |
4110 | #ifdef CONFIG_CMA |
4111 | enc_pr(LOG_DEBUG, "encode check cma: %s.\n", |
4112 | check_cma ? "true" : "false"); |
4113 | #endif |
4114 | } |
4115 | return snprintf(buf, 40, "encode max instance: %d\n", max_instance); |
4116 | } |
4117 | |
4118 | static struct class_attribute amvenc_class_attrs[] = { |
4119 | __ATTR(encode_status, |
4120 | S_IRUGO | S_IWUSR, |
4121 | encode_status_show, |
4122 | NULL), |
4123 | __ATTR_NULL |
4124 | }; |
4125 | |
4126 | static struct class amvenc_avc_class = { |
4127 | .name = CLASS_NAME, |
4128 | .class_attrs = amvenc_class_attrs, |
4129 | }; |
4130 | |
4131 | s32 init_avc_device(void) |
4132 | { |
4133 | s32 r = 0; |
4134 | |
4135 | r = register_chrdev(0, DEVICE_NAME, &amvenc_avc_fops); |
4136 | if (r <= 0) { |
4137 | enc_pr(LOG_ERROR, "register amvenc_avc device error.\n"); |
4138 | return r; |
4139 | } |
4140 | avc_device_major = r; |
4141 | |
4142 | r = class_register(&amvenc_avc_class); |
4143 | if (r < 0) { |
4144 | enc_pr(LOG_ERROR, "error create amvenc_avc class.\n"); |
4145 | return r; |
4146 | } |
4147 | |
4148 | amvenc_avc_dev = device_create(&amvenc_avc_class, NULL, |
4149 | MKDEV(avc_device_major, 0), NULL, |
4150 | DEVICE_NAME); |
4151 | |
4152 | if (IS_ERR(amvenc_avc_dev)) { |
4153 | enc_pr(LOG_ERROR, "create amvenc_avc device error.\n"); |
4154 | class_unregister(&amvenc_avc_class); |
4155 | return -1; |
4156 | } |
4157 | return r; |
4158 | } |
4159 | |
4160 | s32 uninit_avc_device(void) |
4161 | { |
4162 | if (amvenc_avc_dev) |
4163 | device_destroy(&amvenc_avc_class, MKDEV(avc_device_major, 0)); |
4164 | |
4165 | class_destroy(&amvenc_avc_class); |
4166 | |
4167 | unregister_chrdev(avc_device_major, DEVICE_NAME); |
4168 | return 0; |
4169 | } |
4170 | |
4171 | static s32 avc_mem_device_init(struct reserved_mem *rmem, struct device *dev) |
4172 | { |
4173 | s32 r; |
4174 | struct resource res; |
4175 | |
4176 | if (!rmem) { |
4177 | enc_pr(LOG_ERROR, |
4178 | "Can not obtain I/O memory, and will allocate avc buffer!\n"); |
4179 | r = -EFAULT; |
4180 | return r; |
4181 | } |
4182 | res.start = (phys_addr_t)rmem->base; |
4183 | res.end = res.start + (phys_addr_t)rmem->size - 1; |
4184 | encode_manager.reserve_mem.buf_start = res.start; |
4185 | encode_manager.reserve_mem.buf_size = res.end - res.start + 1; |
4186 | |
4187 | if (encode_manager.reserve_mem.buf_size >= |
4188 | amvenc_buffspec[0].min_buffsize) { |
4189 | encode_manager.max_instance = |
4190 | encode_manager.reserve_mem.buf_size / |
4191 | amvenc_buffspec[0].min_buffsize; |
4192 | if (encode_manager.max_instance > MAX_ENCODE_INSTANCE) |
4193 | encode_manager.max_instance = MAX_ENCODE_INSTANCE; |
4194 | encode_manager.reserve_buff = kzalloc( |
4195 | encode_manager.max_instance * |
4196 | sizeof(struct Buff_s), GFP_KERNEL); |
4197 | if (encode_manager.reserve_buff) { |
4198 | u32 i; |
4199 | struct Buff_s *reserve_buff; |
4200 | u32 max_instance = encode_manager.max_instance; |
4201 | |
4202 | for (i = 0; i < max_instance; i++) { |
4203 | reserve_buff = &encode_manager.reserve_buff[i]; |
4204 | reserve_buff->buf_start = |
4205 | i * |
4206 | amvenc_buffspec[0] |
4207 | .min_buffsize + |
4208 | encode_manager.reserve_mem.buf_start; |
4209 | reserve_buff->buf_size = |
4210 | encode_manager.reserve_mem.buf_start; |
4211 | reserve_buff->used = false; |
4212 | } |
4213 | encode_manager.use_reserve = true; |
4214 | r = 0; |
4215 | enc_pr(LOG_DEBUG, |
4216 | "amvenc_avc use reserve memory, buff start: 0x%x, size: 0x%x, max instance is %d\n", |
4217 | encode_manager.reserve_mem.buf_start, |
4218 | encode_manager.reserve_mem.buf_size, |
4219 | encode_manager.max_instance); |
4220 | } else { |
4221 | enc_pr(LOG_ERROR, |
4222 | "amvenc_avc alloc reserve buffer pointer fail. max instance is %d.\n", |
4223 | encode_manager.max_instance); |
4224 | encode_manager.max_instance = 0; |
4225 | encode_manager.reserve_mem.buf_start = 0; |
4226 | encode_manager.reserve_mem.buf_size = 0; |
4227 | r = -ENOMEM; |
4228 | } |
4229 | } else { |
4230 | enc_pr(LOG_ERROR, |
4231 | "amvenc_avc memory resource too small, size is 0x%x. Need 0x%x bytes at least.\n", |
4232 | encode_manager.reserve_mem.buf_size, |
4233 | amvenc_buffspec[0] |
4234 | .min_buffsize); |
4235 | encode_manager.reserve_mem.buf_start = 0; |
4236 | encode_manager.reserve_mem.buf_size = 0; |
4237 | r = -ENOMEM; |
4238 | } |
4239 | return r; |
4240 | } |
4241 | |
4242 | static s32 amvenc_avc_probe(struct platform_device *pdev) |
4243 | { |
4244 | /* struct resource mem; */ |
4245 | s32 res_irq; |
4246 | s32 idx; |
4247 | s32 r; |
4248 | |
4249 | enc_pr(LOG_INFO, "amvenc_avc probe start.\n"); |
4250 | |
4251 | encode_manager.this_pdev = pdev; |
4252 | #ifdef CONFIG_CMA |
4253 | encode_manager.check_cma = false; |
4254 | #endif |
4255 | encode_manager.reserve_mem.buf_start = 0; |
4256 | encode_manager.reserve_mem.buf_size = 0; |
4257 | encode_manager.use_reserve = false; |
4258 | encode_manager.max_instance = 0; |
4259 | encode_manager.reserve_buff = NULL; |
4260 | |
4261 | idx = of_reserved_mem_device_init(&pdev->dev); |
4262 | if (idx != 0) { |
4263 | enc_pr(LOG_DEBUG, |
4264 | "amvenc_avc_probe -- reserved memory config fail.\n"); |
4265 | } |
4266 | |
4267 | if (encode_manager.use_reserve == false) { |
4268 | #ifndef CONFIG_CMA |
4269 | enc_pr(LOG_ERROR, |
4270 | "amvenc_avc memory is invaild, probe fail!\n"); |
4271 | return -EFAULT; |
4272 | #else |
4273 | encode_manager.cma_pool_size = |
4274 | (codec_mm_get_total_size() > (MIN_SIZE * 3)) ? |
4275 | (MIN_SIZE * 3) : codec_mm_get_total_size(); |
4276 | enc_pr(LOG_DEBUG, |
4277 | "amvenc_avc - cma memory pool size: %d MB\n", |
4278 | (u32)encode_manager.cma_pool_size / SZ_1M); |
4279 | #endif |
4280 | } |
4281 | |
4282 | res_irq = platform_get_irq(pdev, 0); |
4283 | if (res_irq < 0) { |
4284 | enc_pr(LOG_ERROR, "[%s] get irq error!", __func__); |
4285 | return -EINVAL; |
4286 | } |
4287 | |
4288 | encode_manager.irq_num = res_irq; |
4289 | if (encode_wq_init()) { |
4290 | kfree(encode_manager.reserve_buff); |
4291 | encode_manager.reserve_buff = NULL; |
4292 | enc_pr(LOG_ERROR, "encode work queue init error.\n"); |
4293 | return -EFAULT; |
4294 | } |
4295 | |
4296 | r = init_avc_device(); |
4297 | enc_pr(LOG_INFO, "amvenc_avc probe end.\n"); |
4298 | return r; |
4299 | } |
4300 | |
4301 | static s32 amvenc_avc_remove(struct platform_device *pdev) |
4302 | { |
4303 | kfree(encode_manager.reserve_buff); |
4304 | encode_manager.reserve_buff = NULL; |
4305 | if (encode_wq_uninit()) |
4306 | enc_pr(LOG_ERROR, "encode work queue uninit error.\n"); |
4307 | uninit_avc_device(); |
4308 | enc_pr(LOG_INFO, "amvenc_avc remove.\n"); |
4309 | return 0; |
4310 | } |
4311 | |
4312 | static const struct of_device_id amlogic_avcenc_dt_match[] = { |
4313 | { |
4314 | .compatible = "amlogic, amvenc_avc", |
4315 | }, |
4316 | {}, |
4317 | }; |
4318 | |
4319 | static struct platform_driver amvenc_avc_driver = { |
4320 | .probe = amvenc_avc_probe, |
4321 | .remove = amvenc_avc_remove, |
4322 | .driver = { |
4323 | .name = DRIVER_NAME, |
4324 | .of_match_table = amlogic_avcenc_dt_match, |
4325 | } |
4326 | }; |
4327 | |
4328 | static struct codec_profile_t amvenc_avc_profile = { |
4329 | .name = "avc", |
4330 | .profile = "" |
4331 | }; |
4332 | |
4333 | static s32 __init amvenc_avc_driver_init_module(void) |
4334 | { |
4335 | enc_pr(LOG_INFO, "amvenc_avc module init\n"); |
4336 | |
4337 | if (platform_driver_register(&amvenc_avc_driver)) { |
4338 | enc_pr(LOG_ERROR, |
4339 | "failed to register amvenc_avc driver\n"); |
4340 | return -ENODEV; |
4341 | } |
4342 | vcodec_profile_register(&amvenc_avc_profile); |
4343 | return 0; |
4344 | } |
4345 | |
4346 | static void __exit amvenc_avc_driver_remove_module(void) |
4347 | { |
4348 | enc_pr(LOG_INFO, "amvenc_avc module remove.\n"); |
4349 | |
4350 | platform_driver_unregister(&amvenc_avc_driver); |
4351 | } |
4352 | |
4353 | static const struct reserved_mem_ops rmem_avc_ops = { |
4354 | .device_init = avc_mem_device_init, |
4355 | }; |
4356 | |
4357 | static s32 __init avc_mem_setup(struct reserved_mem *rmem) |
4358 | { |
4359 | rmem->ops = &rmem_avc_ops; |
4360 | enc_pr(LOG_DEBUG, "amvenc_avc reserved mem setup.\n"); |
4361 | return 0; |
4362 | } |
4363 | |
4364 | static int enc_dma_buf_map(struct enc_dma_cfg *cfg) |
4365 | { |
4366 | long ret = -1; |
4367 | int fd = -1; |
4368 | struct dma_buf *dbuf = NULL; |
4369 | struct dma_buf_attachment *d_att = NULL; |
4370 | struct sg_table *sg = NULL; |
4371 | void *vaddr = NULL; |
4372 | struct device *dev = NULL; |
4373 | enum dma_data_direction dir; |
4374 | |
4375 | if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) { |
4376 | enc_pr(LOG_ERROR, "error input param\n"); |
4377 | return -EINVAL; |
4378 | } |
4379 | enc_pr(LOG_INFO, "enc_dma_buf_map, fd %d\n", cfg->fd); |
4380 | |
4381 | fd = cfg->fd; |
4382 | dev = cfg->dev; |
4383 | dir = cfg->dir; |
4384 | enc_pr(LOG_INFO, "enc_dma_buffer_map fd %d\n", fd); |
4385 | |
4386 | dbuf = dma_buf_get(fd); |
4387 | if (dbuf == NULL) { |
4388 | enc_pr(LOG_ERROR, "failed to get dma buffer,fd %d\n",fd); |
4389 | return -EINVAL; |
4390 | } |
4391 | |
4392 | d_att = dma_buf_attach(dbuf, dev); |
4393 | if (d_att == NULL) { |
4394 | enc_pr(LOG_ERROR, "failed to set dma attach\n"); |
4395 | goto attach_err; |
4396 | } |
4397 | |
4398 | sg = dma_buf_map_attachment(d_att, dir); |
4399 | if (sg == NULL) { |
4400 | enc_pr(LOG_ERROR, "failed to get dma sg\n"); |
4401 | goto map_attach_err; |
4402 | } |
4403 | |
4404 | ret = dma_buf_begin_cpu_access(dbuf, dir); |
4405 | if (ret != 0) { |
4406 | enc_pr(LOG_ERROR, "failed to access dma buff\n"); |
4407 | goto access_err; |
4408 | } |
4409 | |
4410 | vaddr = dma_buf_vmap(dbuf); |
4411 | if (vaddr == NULL) { |
4412 | enc_pr(LOG_ERROR, "failed to vmap dma buf\n"); |
4413 | goto vmap_err; |
4414 | } |
4415 | cfg->dbuf = dbuf; |
4416 | cfg->attach = d_att; |
4417 | cfg->vaddr = vaddr; |
4418 | cfg->sg = sg; |
4419 | |
4420 | return ret; |
4421 | |
4422 | vmap_err: |
4423 | dma_buf_end_cpu_access(dbuf, dir); |
4424 | |
4425 | access_err: |
4426 | dma_buf_unmap_attachment(d_att, sg, dir); |
4427 | |
4428 | map_attach_err: |
4429 | dma_buf_detach(dbuf, d_att); |
4430 | |
4431 | attach_err: |
4432 | dma_buf_put(dbuf); |
4433 | |
4434 | return ret; |
4435 | } |
4436 | |
4437 | static int enc_dma_buf_get_phys(struct enc_dma_cfg *cfg, unsigned long *addr) |
4438 | { |
4439 | struct sg_table *sg_table; |
4440 | struct page *page; |
4441 | int ret; |
4442 | enc_pr(LOG_INFO, "enc_dma_buf_get_phys in\n"); |
4443 | |
4444 | ret = enc_dma_buf_map(cfg); |
4445 | if (ret < 0) { |
4446 | enc_pr(LOG_ERROR, "gdc_dma_buf_map failed\n"); |
4447 | return ret; |
4448 | } |
4449 | if (cfg->sg) { |
4450 | sg_table = cfg->sg; |
4451 | page = sg_page(sg_table->sgl); |
4452 | *addr = PFN_PHYS(page_to_pfn(page)); |
4453 | ret = 0; |
4454 | } |
4455 | enc_pr(LOG_INFO, "enc_dma_buf_get_phys 0x%lx\n", *addr); |
4456 | return ret; |
4457 | } |
4458 | |
4459 | static void enc_dma_buf_unmap(struct enc_dma_cfg *cfg) |
4460 | { |
4461 | int fd = -1; |
4462 | struct dma_buf *dbuf = NULL; |
4463 | struct dma_buf_attachment *d_att = NULL; |
4464 | struct sg_table *sg = NULL; |
4465 | void *vaddr = NULL; |
4466 | struct device *dev = NULL; |
4467 | enum dma_data_direction dir; |
4468 | |
4469 | if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL |
4470 | || cfg->dbuf == NULL || cfg->vaddr == NULL |
4471 | || cfg->attach == NULL || cfg->sg == NULL) { |
4472 | enc_pr(LOG_ERROR, "Error input param\n"); |
4473 | return; |
4474 | } |
4475 | |
4476 | fd = cfg->fd; |
4477 | dev = cfg->dev; |
4478 | dir = cfg->dir; |
4479 | dbuf = cfg->dbuf; |
4480 | vaddr = cfg->vaddr; |
4481 | d_att = cfg->attach; |
4482 | sg = cfg->sg; |
4483 | |
4484 | dma_buf_vunmap(dbuf, vaddr); |
4485 | |
4486 | dma_buf_end_cpu_access(dbuf, dir); |
4487 | |
4488 | dma_buf_unmap_attachment(d_att, sg, dir); |
4489 | |
4490 | dma_buf_detach(dbuf, d_att); |
4491 | |
4492 | dma_buf_put(dbuf); |
4493 | enc_pr(LOG_DEBUG, "enc_dma_buffer_unmap vaddr %p\n",(unsigned *)vaddr); |
4494 | } |
4495 | |
4496 | |
4497 | module_param(fixed_slice_cfg, uint, 0664); |
4498 | MODULE_PARM_DESC(fixed_slice_cfg, "\n fixed_slice_cfg\n"); |
4499 | |
4500 | module_param(clock_level, uint, 0664); |
4501 | MODULE_PARM_DESC(clock_level, "\n clock_level\n"); |
4502 | |
4503 | module_param(encode_print_level, uint, 0664); |
4504 | MODULE_PARM_DESC(encode_print_level, "\n encode_print_level\n"); |
4505 | |
4506 | module_param(no_timeout, uint, 0664); |
4507 | MODULE_PARM_DESC(no_timeout, "\n no_timeout flag for process request\n"); |
4508 | |
4509 | module_param(nr_mode, int, 0664); |
4510 | MODULE_PARM_DESC(nr_mode, "\n nr_mode option\n"); |
4511 | |
4512 | module_param(qp_table_debug, uint, 0664); |
4513 | MODULE_PARM_DESC(qp_table_debug, "\n print qp table\n"); |
4514 | |
4515 | #ifdef H264_ENC_SVC |
4516 | module_param(svc_enable, uint, 0664); |
4517 | MODULE_PARM_DESC(svc_enable, "\n svc enable\n"); |
4518 | module_param(svc_ref_conf, uint, 0664); |
4519 | MODULE_PARM_DESC(svc_ref_conf, "\n svc reference duration config\n"); |
4520 | #endif |
4521 | |
4522 | #ifdef MORE_MODULE_PARAM |
4523 | module_param(me_mv_merge_ctl, uint, 0664); |
4524 | MODULE_PARM_DESC(me_mv_merge_ctl, "\n me_mv_merge_ctl\n"); |
4525 | |
4526 | module_param(me_step0_close_mv, uint, 0664); |
4527 | MODULE_PARM_DESC(me_step0_close_mv, "\n me_step0_close_mv\n"); |
4528 | |
4529 | module_param(me_f_skip_sad, uint, 0664); |
4530 | MODULE_PARM_DESC(me_f_skip_sad, "\n me_f_skip_sad\n"); |
4531 | |
4532 | module_param(me_f_skip_weight, uint, 0664); |
4533 | MODULE_PARM_DESC(me_f_skip_weight, "\n me_f_skip_weight\n"); |
4534 | |
4535 | module_param(me_mv_weight_01, uint, 0664); |
4536 | MODULE_PARM_DESC(me_mv_weight_01, "\n me_mv_weight_01\n"); |
4537 | |
4538 | module_param(me_mv_weight_23, uint, 0664); |
4539 | MODULE_PARM_DESC(me_mv_weight_23, "\n me_mv_weight_23\n"); |
4540 | |
4541 | module_param(me_sad_range_inc, uint, 0664); |
4542 | MODULE_PARM_DESC(me_sad_range_inc, "\n me_sad_range_inc\n"); |
4543 | |
4544 | module_param(me_sad_enough_01, uint, 0664); |
4545 | MODULE_PARM_DESC(me_sad_enough_01, "\n me_sad_enough_01\n"); |
4546 | |
4547 | module_param(me_sad_enough_23, uint, 0664); |
4548 | MODULE_PARM_DESC(me_sad_enough_23, "\n me_sad_enough_23\n"); |
4549 | |
4550 | module_param(y_tnr_mc_en, uint, 0664); |
4551 | MODULE_PARM_DESC(y_tnr_mc_en, "\n y_tnr_mc_en option\n"); |
4552 | module_param(y_tnr_txt_mode, uint, 0664); |
4553 | MODULE_PARM_DESC(y_tnr_txt_mode, "\n y_tnr_txt_mode option\n"); |
4554 | module_param(y_tnr_mot_sad_margin, uint, 0664); |
4555 | MODULE_PARM_DESC(y_tnr_mot_sad_margin, "\n y_tnr_mot_sad_margin option\n"); |
4556 | module_param(y_tnr_mot_cortxt_rate, uint, 0664); |
4557 | MODULE_PARM_DESC(y_tnr_mot_cortxt_rate, "\n y_tnr_mot_cortxt_rate option\n"); |
4558 | module_param(y_tnr_mot_distxt_ofst, uint, 0664); |
4559 | MODULE_PARM_DESC(y_tnr_mot_distxt_ofst, "\n y_tnr_mot_distxt_ofst option\n"); |
4560 | module_param(y_tnr_mot_distxt_rate, uint, 0664); |
4561 | MODULE_PARM_DESC(y_tnr_mot_distxt_rate, "\n y_tnr_mot_distxt_rate option\n"); |
4562 | module_param(y_tnr_mot_dismot_ofst, uint, 0664); |
4563 | MODULE_PARM_DESC(y_tnr_mot_dismot_ofst, "\n y_tnr_mot_dismot_ofst option\n"); |
4564 | module_param(y_tnr_mot_frcsad_lock, uint, 0664); |
4565 | MODULE_PARM_DESC(y_tnr_mot_frcsad_lock, "\n y_tnr_mot_frcsad_lock option\n"); |
4566 | module_param(y_tnr_mot2alp_frc_gain, uint, 0664); |
4567 | MODULE_PARM_DESC(y_tnr_mot2alp_frc_gain, "\n y_tnr_mot2alp_frc_gain option\n"); |
4568 | module_param(y_tnr_mot2alp_nrm_gain, uint, 0664); |
4569 | MODULE_PARM_DESC(y_tnr_mot2alp_nrm_gain, "\n y_tnr_mot2alp_nrm_gain option\n"); |
4570 | module_param(y_tnr_mot2alp_dis_gain, uint, 0664); |
4571 | MODULE_PARM_DESC(y_tnr_mot2alp_dis_gain, "\n y_tnr_mot2alp_dis_gain option\n"); |
4572 | module_param(y_tnr_mot2alp_dis_ofst, uint, 0664); |
4573 | MODULE_PARM_DESC(y_tnr_mot2alp_dis_ofst, "\n y_tnr_mot2alp_dis_ofst option\n"); |
4574 | module_param(y_tnr_alpha_min, uint, 0664); |
4575 | MODULE_PARM_DESC(y_tnr_alpha_min, "\n y_tnr_alpha_min option\n"); |
4576 | module_param(y_tnr_alpha_max, uint, 0664); |
4577 | MODULE_PARM_DESC(y_tnr_alpha_max, "\n y_tnr_alpha_max option\n"); |
4578 | module_param(y_tnr_deghost_os, uint, 0664); |
4579 | MODULE_PARM_DESC(y_tnr_deghost_os, "\n y_tnr_deghost_os option\n"); |
4580 | |
4581 | module_param(c_tnr_mc_en, uint, 0664); |
4582 | MODULE_PARM_DESC(c_tnr_mc_en, "\n c_tnr_mc_en option\n"); |
4583 | module_param(c_tnr_txt_mode, uint, 0664); |
4584 | MODULE_PARM_DESC(c_tnr_txt_mode, "\n c_tnr_txt_mode option\n"); |
4585 | module_param(c_tnr_mot_sad_margin, uint, 0664); |
4586 | MODULE_PARM_DESC(c_tnr_mot_sad_margin, "\n c_tnr_mot_sad_margin option\n"); |
4587 | module_param(c_tnr_mot_cortxt_rate, uint, 0664); |
4588 | MODULE_PARM_DESC(c_tnr_mot_cortxt_rate, "\n c_tnr_mot_cortxt_rate option\n"); |
4589 | module_param(c_tnr_mot_distxt_ofst, uint, 0664); |
4590 | MODULE_PARM_DESC(c_tnr_mot_distxt_ofst, "\n c_tnr_mot_distxt_ofst option\n"); |
4591 | module_param(c_tnr_mot_distxt_rate, uint, 0664); |
4592 | MODULE_PARM_DESC(c_tnr_mot_distxt_rate, "\n c_tnr_mot_distxt_rate option\n"); |
4593 | module_param(c_tnr_mot_dismot_ofst, uint, 0664); |
4594 | MODULE_PARM_DESC(c_tnr_mot_dismot_ofst, "\n c_tnr_mot_dismot_ofst option\n"); |
4595 | module_param(c_tnr_mot_frcsad_lock, uint, 0664); |
4596 | MODULE_PARM_DESC(c_tnr_mot_frcsad_lock, "\n c_tnr_mot_frcsad_lock option\n"); |
4597 | module_param(c_tnr_mot2alp_frc_gain, uint, 0664); |
4598 | MODULE_PARM_DESC(c_tnr_mot2alp_frc_gain, "\n c_tnr_mot2alp_frc_gain option\n"); |
4599 | module_param(c_tnr_mot2alp_nrm_gain, uint, 0664); |
4600 | MODULE_PARM_DESC(c_tnr_mot2alp_nrm_gain, "\n c_tnr_mot2alp_nrm_gain option\n"); |
4601 | module_param(c_tnr_mot2alp_dis_gain, uint, 0664); |
4602 | MODULE_PARM_DESC(c_tnr_mot2alp_dis_gain, "\n c_tnr_mot2alp_dis_gain option\n"); |
4603 | module_param(c_tnr_mot2alp_dis_ofst, uint, 0664); |
4604 | MODULE_PARM_DESC(c_tnr_mot2alp_dis_ofst, "\n c_tnr_mot2alp_dis_ofst option\n"); |
4605 | module_param(c_tnr_alpha_min, uint, 0664); |
4606 | MODULE_PARM_DESC(c_tnr_alpha_min, "\n c_tnr_alpha_min option\n"); |
4607 | module_param(c_tnr_alpha_max, uint, 0664); |
4608 | MODULE_PARM_DESC(c_tnr_alpha_max, "\n c_tnr_alpha_max option\n"); |
4609 | module_param(c_tnr_deghost_os, uint, 0664); |
4610 | MODULE_PARM_DESC(c_tnr_deghost_os, "\n c_tnr_deghost_os option\n"); |
4611 | |
4612 | module_param(y_snr_err_norm, uint, 0664); |
4613 | MODULE_PARM_DESC(y_snr_err_norm, "\n y_snr_err_norm option\n"); |
4614 | module_param(y_snr_gau_bld_core, uint, 0664); |
4615 | MODULE_PARM_DESC(y_snr_gau_bld_core, "\n y_snr_gau_bld_core option\n"); |
4616 | module_param(y_snr_gau_bld_ofst, int, 0664); |
4617 | MODULE_PARM_DESC(y_snr_gau_bld_ofst, "\n y_snr_gau_bld_ofst option\n"); |
4618 | module_param(y_snr_gau_bld_rate, uint, 0664); |
4619 | MODULE_PARM_DESC(y_snr_gau_bld_rate, "\n y_snr_gau_bld_rate option\n"); |
4620 | module_param(y_snr_gau_alp0_min, uint, 0664); |
4621 | MODULE_PARM_DESC(y_snr_gau_alp0_min, "\n y_snr_gau_alp0_min option\n"); |
4622 | module_param(y_snr_gau_alp0_max, uint, 0664); |
4623 | MODULE_PARM_DESC(y_snr_gau_alp0_max, "\n y_snr_gau_alp0_max option\n"); |
4624 | module_param(y_bld_beta2alp_rate, uint, 0664); |
4625 | MODULE_PARM_DESC(y_bld_beta2alp_rate, "\n y_bld_beta2alp_rate option\n"); |
4626 | module_param(y_bld_beta_min, uint, 0664); |
4627 | MODULE_PARM_DESC(y_bld_beta_min, "\n y_bld_beta_min option\n"); |
4628 | module_param(y_bld_beta_max, uint, 0664); |
4629 | MODULE_PARM_DESC(y_bld_beta_max, "\n y_bld_beta_max option\n"); |
4630 | |
4631 | module_param(c_snr_err_norm, uint, 0664); |
4632 | MODULE_PARM_DESC(c_snr_err_norm, "\n c_snr_err_norm option\n"); |
4633 | module_param(c_snr_gau_bld_core, uint, 0664); |
4634 | MODULE_PARM_DESC(c_snr_gau_bld_core, "\n c_snr_gau_bld_core option\n"); |
4635 | module_param(c_snr_gau_bld_ofst, int, 0664); |
4636 | MODULE_PARM_DESC(c_snr_gau_bld_ofst, "\n c_snr_gau_bld_ofst option\n"); |
4637 | module_param(c_snr_gau_bld_rate, uint, 0664); |
4638 | MODULE_PARM_DESC(c_snr_gau_bld_rate, "\n c_snr_gau_bld_rate option\n"); |
4639 | module_param(c_snr_gau_alp0_min, uint, 0664); |
4640 | MODULE_PARM_DESC(c_snr_gau_alp0_min, "\n c_snr_gau_alp0_min option\n"); |
4641 | module_param(c_snr_gau_alp0_max, uint, 0664); |
4642 | MODULE_PARM_DESC(c_snr_gau_alp0_max, "\n c_snr_gau_alp0_max option\n"); |
4643 | module_param(c_bld_beta2alp_rate, uint, 0664); |
4644 | MODULE_PARM_DESC(c_bld_beta2alp_rate, "\n c_bld_beta2alp_rate option\n"); |
4645 | module_param(c_bld_beta_min, uint, 0664); |
4646 | MODULE_PARM_DESC(c_bld_beta_min, "\n c_bld_beta_min option\n"); |
4647 | module_param(c_bld_beta_max, uint, 0664); |
4648 | MODULE_PARM_DESC(c_bld_beta_max, "\n c_bld_beta_max option\n"); |
4649 | #endif |
4650 | |
4651 | module_init(amvenc_avc_driver_init_module); |
4652 | module_exit(amvenc_avc_driver_remove_module); |
4653 | RESERVEDMEM_OF_DECLARE(amvenc_avc, "amlogic, amvenc-memory", avc_mem_setup); |
4654 | |
4655 | MODULE_DESCRIPTION("AMLOGIC AVC Video Encoder Driver"); |
4656 | MODULE_LICENSE("GPL"); |
4657 | MODULE_AUTHOR("simon.zheng <simon.zheng@amlogic.com>"); |
4658 |