summaryrefslogtreecommitdiff
path: root/libavcodec/libopenh264enc.c (plain)
blob: 9c22bf4f30cf195a8635025b053027a86b52c747
1/*
2 * OpenH264 video encoder
3 * Copyright (C) 2014 Martin Storsjo
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <wels/codec_api.h>
23#include <wels/codec_ver.h>
24
25#include "libavutil/attributes.h"
26#include "libavutil/common.h"
27#include "libavutil/opt.h"
28#include "libavutil/internal.h"
29#include "libavutil/intreadwrite.h"
30#include "libavutil/mathematics.h"
31
32#include "avcodec.h"
33#include "internal.h"
34#include "libopenh264.h"
35
36#if !OPENH264_VER_AT_LEAST(1, 6)
37#define SM_SIZELIMITED_SLICE SM_DYN_SLICE
38#endif
39
40typedef struct SVCContext {
41 const AVClass *av_class;
42 ISVCEncoder *encoder;
43 int slice_mode;
44 int loopfilter;
45 char *profile;
46 int max_nal_size;
47 int skip_frames;
48 int skipped;
49 int cabac;
50} SVCContext;
51
52#define OFFSET(x) offsetof(SVCContext, x)
53#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
54static const AVOption options[] = {
55#if OPENH264_VER_AT_LEAST(1, 6)
56 { "slice_mode", "set slice mode", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_FIXEDSLCNUM_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE, "slice_mode" },
57#else
58 { "slice_mode", "set slice mode", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_AUTO_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE, "slice_mode" },
59#endif
60 { "fixed", "a fixed number of slices", 0, AV_OPT_TYPE_CONST, { .i64 = SM_FIXEDSLCNUM_SLICE }, 0, 0, VE, "slice_mode" },
61#if OPENH264_VER_AT_LEAST(1, 6)
62 { "dyn", "Size limited (compatibility name)", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
63 { "sizelimited", "Size limited", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
64#else
65 { "rowmb", "one slice per row of macroblocks", 0, AV_OPT_TYPE_CONST, { .i64 = SM_ROWMB_SLICE }, 0, 0, VE, "slice_mode" },
66 { "auto", "automatic number of slices according to number of threads", 0, AV_OPT_TYPE_CONST, { .i64 = SM_AUTO_SLICE }, 0, 0, VE, "slice_mode" },
67 { "dyn", "Dynamic slicing", 0, AV_OPT_TYPE_CONST, { .i64 = SM_DYN_SLICE }, 0, 0, VE, "slice_mode" },
68#endif
69 { "loopfilter", "enable loop filter", OFFSET(loopfilter), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
70 { "profile", "set profile restrictions", OFFSET(profile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VE },
71 { "max_nal_size", "set maximum NAL size in bytes", OFFSET(max_nal_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
72 { "allow_skip_frames", "allow skipping frames to hit the target bitrate", OFFSET(skip_frames), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
73 { "cabac", "Enable cabac", OFFSET(cabac), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
74 { NULL }
75};
76
77static const AVClass class = {
78 "libopenh264enc", av_default_item_name, options, LIBAVUTIL_VERSION_INT
79};
80
81static av_cold int svc_encode_close(AVCodecContext *avctx)
82{
83 SVCContext *s = avctx->priv_data;
84
85 if (s->encoder)
86 WelsDestroySVCEncoder(s->encoder);
87 if (s->skipped > 0)
88 av_log(avctx, AV_LOG_WARNING, "%d frames skipped\n", s->skipped);
89 return 0;
90}
91
92static av_cold int svc_encode_init(AVCodecContext *avctx)
93{
94 SVCContext *s = avctx->priv_data;
95 SEncParamExt param = { 0 };
96 int err;
97 int log_level;
98 WelsTraceCallback callback_function;
99 AVCPBProperties *props;
100
101 if ((err = ff_libopenh264_check_version(avctx)) < 0)
102 return err;
103
104 if (WelsCreateSVCEncoder(&s->encoder)) {
105 av_log(avctx, AV_LOG_ERROR, "Unable to create encoder\n");
106 return AVERROR_UNKNOWN;
107 }
108
109 // Pass all libopenh264 messages to our callback, to allow ourselves to filter them.
110 log_level = WELS_LOG_DETAIL;
111 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_LEVEL, &log_level);
112
113 // Set the logging callback function to one that uses av_log() (see implementation above).
114 callback_function = (WelsTraceCallback) ff_libopenh264_trace_callback;
115 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK, &callback_function);
116
117 // Set the AVCodecContext as the libopenh264 callback context so that it can be passed to av_log().
118 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, &avctx);
119
120 (*s->encoder)->GetDefaultParams(s->encoder, &param);
121
122#if FF_API_CODER_TYPE
123FF_DISABLE_DEPRECATION_WARNINGS
124 if (!s->cabac)
125 s->cabac = avctx->coder_type == FF_CODER_TYPE_AC;
126FF_ENABLE_DEPRECATION_WARNINGS
127#endif
128
129 param.fMaxFrameRate = 1/av_q2d(avctx->time_base);
130 param.iPicWidth = avctx->width;
131 param.iPicHeight = avctx->height;
132 param.iTargetBitrate = avctx->bit_rate;
133 param.iMaxBitrate = FFMAX(avctx->rc_max_rate, avctx->bit_rate);
134 param.iRCMode = RC_QUALITY_MODE;
135 param.iTemporalLayerNum = 1;
136 param.iSpatialLayerNum = 1;
137 param.bEnableDenoise = 0;
138 param.bEnableBackgroundDetection = 1;
139 param.bEnableAdaptiveQuant = 1;
140 param.bEnableFrameSkip = s->skip_frames;
141 param.bEnableLongTermReference = 0;
142 param.iLtrMarkPeriod = 30;
143 param.uiIntraPeriod = avctx->gop_size;
144#if OPENH264_VER_AT_LEAST(1, 4)
145 param.eSpsPpsIdStrategy = CONSTANT_ID;
146#else
147 param.bEnableSpsPpsIdAddition = 0;
148#endif
149 param.bPrefixNalAddingCtrl = 0;
150 param.iLoopFilterDisableIdc = !s->loopfilter;
151 param.iEntropyCodingModeFlag = 0;
152 param.iMultipleThreadIdc = avctx->thread_count;
153 if (s->profile && !strcmp(s->profile, "main"))
154 param.iEntropyCodingModeFlag = 1;
155 else if (!s->profile && s->cabac)
156 param.iEntropyCodingModeFlag = 1;
157
158 param.sSpatialLayers[0].iVideoWidth = param.iPicWidth;
159 param.sSpatialLayers[0].iVideoHeight = param.iPicHeight;
160 param.sSpatialLayers[0].fFrameRate = param.fMaxFrameRate;
161 param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate;
162 param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate;
163
164 if ((avctx->slices > 1) && (s->max_nal_size)) {
165 av_log(avctx, AV_LOG_ERROR,
166 "Invalid combination -slices %d and -max_nal_size %d.\n",
167 avctx->slices, s->max_nal_size);
168 return AVERROR(EINVAL);
169 }
170
171 if (avctx->slices > 1)
172 s->slice_mode = SM_FIXEDSLCNUM_SLICE;
173
174 if (s->max_nal_size)
175 s->slice_mode = SM_SIZELIMITED_SLICE;
176
177#if OPENH264_VER_AT_LEAST(1, 6)
178 param.sSpatialLayers[0].sSliceArgument.uiSliceMode = s->slice_mode;
179 param.sSpatialLayers[0].sSliceArgument.uiSliceNum = avctx->slices;
180#else
181 param.sSpatialLayers[0].sSliceCfg.uiSliceMode = s->slice_mode;
182 param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices;
183#endif
184
185 if (s->slice_mode == SM_SIZELIMITED_SLICE) {
186 if (s->max_nal_size){
187 param.uiMaxNalSize = s->max_nal_size;
188#if OPENH264_VER_AT_LEAST(1, 6)
189 param.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
190#else
191 param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
192#endif
193 } else {
194 av_log(avctx, AV_LOG_ERROR, "Invalid -max_nal_size, "
195 "specify a valid max_nal_size to use -slice_mode dyn\n");
196 return AVERROR(EINVAL);
197 }
198 }
199
200 if ((*s->encoder)->InitializeExt(s->encoder, &param) != cmResultSuccess) {
201 av_log(avctx, AV_LOG_ERROR, "Initialize failed\n");
202 return AVERROR_UNKNOWN;
203 }
204
205 if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
206 SFrameBSInfo fbi = { 0 };
207 int i, size = 0;
208 (*s->encoder)->EncodeParameterSets(s->encoder, &fbi);
209 for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++)
210 size += fbi.sLayerInfo[0].pNalLengthInByte[i];
211 avctx->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
212 if (!avctx->extradata)
213 return AVERROR(ENOMEM);
214 avctx->extradata_size = size;
215 memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size);
216 }
217
218 props = ff_add_cpb_side_data(avctx);
219 if (!props)
220 return AVERROR(ENOMEM);
221 props->max_bitrate = param.iMaxBitrate;
222 props->avg_bitrate = param.iTargetBitrate;
223
224 return 0;
225}
226
227static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
228 const AVFrame *frame, int *got_packet)
229{
230 SVCContext *s = avctx->priv_data;
231 SFrameBSInfo fbi = { 0 };
232 int i, ret;
233 int encoded;
234 SSourcePicture sp = { 0 };
235 int size = 0, layer, first_layer = 0;
236 int layer_size[MAX_LAYER_NUM_OF_FRAME] = { 0 };
237
238 sp.iColorFormat = videoFormatI420;
239 for (i = 0; i < 3; i++) {
240 sp.iStride[i] = frame->linesize[i];
241 sp.pData[i] = frame->data[i];
242 }
243 sp.iPicWidth = avctx->width;
244 sp.iPicHeight = avctx->height;
245
246 encoded = (*s->encoder)->EncodeFrame(s->encoder, &sp, &fbi);
247 if (encoded != cmResultSuccess) {
248 av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed\n");
249 return AVERROR_UNKNOWN;
250 }
251 if (fbi.eFrameType == videoFrameTypeSkip) {
252 s->skipped++;
253 av_log(avctx, AV_LOG_DEBUG, "frame skipped\n");
254 return 0;
255 }
256 first_layer = 0;
257 // Normal frames are returned with one single layer, while IDR
258 // frames have two layers, where the first layer contains the SPS/PPS.
259 // If using global headers, don't include the SPS/PPS in the returned
260 // packet - thus, only return one layer.
261 if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
262 first_layer = fbi.iLayerNum - 1;
263
264 for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
265 for (i = 0; i < fbi.sLayerInfo[layer].iNalCount; i++)
266 layer_size[layer] += fbi.sLayerInfo[layer].pNalLengthInByte[i];
267 size += layer_size[layer];
268 }
269 av_log(avctx, AV_LOG_DEBUG, "%d slices\n", fbi.sLayerInfo[fbi.iLayerNum - 1].iNalCount);
270
271 if ((ret = ff_alloc_packet2(avctx, avpkt, size, size))) {
272 av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
273 return ret;
274 }
275 size = 0;
276 for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
277 memcpy(avpkt->data + size, fbi.sLayerInfo[layer].pBsBuf, layer_size[layer]);
278 size += layer_size[layer];
279 }
280 avpkt->pts = frame->pts;
281 if (fbi.eFrameType == videoFrameTypeIDR)
282 avpkt->flags |= AV_PKT_FLAG_KEY;
283 *got_packet = 1;
284 return 0;
285}
286
287AVCodec ff_libopenh264_encoder = {
288 .name = "libopenh264",
289 .long_name = NULL_IF_CONFIG_SMALL("OpenH264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
290 .type = AVMEDIA_TYPE_VIDEO,
291 .id = AV_CODEC_ID_H264,
292 .priv_data_size = sizeof(SVCContext),
293 .init = svc_encode_init,
294 .encode2 = svc_encode_frame,
295 .close = svc_encode_close,
296 .capabilities = AV_CODEC_CAP_AUTO_THREADS,
297 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
298 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
299 AV_PIX_FMT_NONE },
300 .priv_class = &class,
301};
302