blob: d415f0fb20c18c43acc75723e42407c3a54d3243
1 | /* |
2 | * RTMP network protocol |
3 | * Copyright (c) 2009 Konstantin Shishkov |
4 | * |
5 | * This file is part of FFmpeg. |
6 | * |
7 | * FFmpeg is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * FFmpeg is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with FFmpeg; if not, write to the Free Software |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | */ |
21 | |
22 | /** |
23 | * @file |
24 | * RTMP protocol |
25 | */ |
26 | |
27 | #include "libavcodec/bytestream.h" |
28 | #include "libavutil/avstring.h" |
29 | #include "libavutil/base64.h" |
30 | #include "libavutil/hmac.h" |
31 | #include "libavutil/intfloat.h" |
32 | #include "libavutil/lfg.h" |
33 | #include "libavutil/md5.h" |
34 | #include "libavutil/opt.h" |
35 | #include "libavutil/random_seed.h" |
36 | #include "avformat.h" |
37 | #include "internal.h" |
38 | |
39 | #include "network.h" |
40 | |
41 | #include "flv.h" |
42 | #include "rtmp.h" |
43 | #include "rtmpcrypt.h" |
44 | #include "rtmppkt.h" |
45 | #include "url.h" |
46 | |
47 | #if CONFIG_ZLIB |
48 | #include <zlib.h> |
49 | #endif |
50 | |
51 | #define APP_MAX_LENGTH 1024 |
52 | #define PLAYPATH_MAX_LENGTH 512 |
53 | #define TCURL_MAX_LENGTH 1024 |
54 | #define FLASHVER_MAX_LENGTH 64 |
55 | #define RTMP_PKTDATA_DEFAULT_SIZE 4096 |
56 | #define RTMP_HEADER 11 |
57 | |
58 | /** RTMP protocol handler state */ |
59 | typedef enum { |
60 | STATE_START, ///< client has not done anything yet |
61 | STATE_HANDSHAKED, ///< client has performed handshake |
62 | STATE_FCPUBLISH, ///< client FCPublishing stream (for output) |
63 | STATE_PLAYING, ///< client has started receiving multimedia data from server |
64 | STATE_SEEKING, ///< client has started the seek operation. Back on STATE_PLAYING when the time comes |
65 | STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output) |
66 | STATE_RECEIVING, ///< received a publish command (for input) |
67 | STATE_SENDING, ///< received a play command (for output) |
68 | STATE_STOPPED, ///< the broadcast has been stopped |
69 | } ClientState; |
70 | |
71 | typedef struct TrackedMethod { |
72 | char *name; |
73 | int id; |
74 | } TrackedMethod; |
75 | |
76 | /** protocol handler context */ |
77 | typedef struct RTMPContext { |
78 | const AVClass *class; |
79 | URLContext* stream; ///< TCP stream used in interactions with RTMP server |
80 | RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing) |
81 | int nb_prev_pkt[2]; ///< number of elements in prev_pkt |
82 | int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into |
83 | int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into |
84 | int is_input; ///< input/output flag |
85 | char *playpath; ///< stream identifier to play (with possible "mp4:" prefix) |
86 | int live; ///< 0: recorded, -1: live, -2: both |
87 | char *app; ///< name of application |
88 | char *conn; ///< append arbitrary AMF data to the Connect message |
89 | ClientState state; ///< current state |
90 | int stream_id; ///< ID assigned by the server for the stream |
91 | uint8_t* flv_data; ///< buffer with data for demuxer |
92 | int flv_size; ///< current buffer size |
93 | int flv_off; ///< number of bytes read from current buffer |
94 | int flv_nb_packets; ///< number of flv packets published |
95 | RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output) |
96 | uint32_t client_report_size; ///< number of bytes after which client should report to server |
97 | uint64_t bytes_read; ///< number of bytes read from server |
98 | uint64_t last_bytes_read; ///< number of bytes read last reported to server |
99 | uint32_t last_timestamp; ///< last timestamp received in a packet |
100 | int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call |
101 | int has_audio; ///< presence of audio data |
102 | int has_video; ///< presence of video data |
103 | int received_metadata; ///< Indicates if we have received metadata about the streams |
104 | uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header |
105 | int flv_header_bytes; ///< number of initialized bytes in flv_header |
106 | int nb_invokes; ///< keeps track of invoke messages |
107 | char* tcurl; ///< url of the target stream |
108 | char* flashver; ///< version of the flash plugin |
109 | char* swfhash; ///< SHA256 hash of the decompressed SWF file (32 bytes) |
110 | int swfhash_len; ///< length of the SHA256 hash |
111 | int swfsize; ///< size of the decompressed SWF file |
112 | char* swfurl; ///< url of the swf player |
113 | char* swfverify; ///< URL to player swf file, compute hash/size automatically |
114 | char swfverification[42]; ///< hash of the SWF verification |
115 | char* pageurl; ///< url of the web page |
116 | char* subscribe; ///< name of live stream to subscribe |
117 | int server_bw; ///< server bandwidth |
118 | int client_buffer_time; ///< client buffer time in ms |
119 | int flush_interval; ///< number of packets flushed in the same request (RTMPT only) |
120 | int encrypted; ///< use an encrypted connection (RTMPE only) |
121 | TrackedMethod*tracked_methods; ///< tracked methods buffer |
122 | int nb_tracked_methods; ///< number of tracked methods |
123 | int tracked_methods_size; ///< size of the tracked methods buffer |
124 | int listen; ///< listen mode flag |
125 | int listen_timeout; ///< listen timeout to wait for new connections |
126 | int nb_streamid; ///< The next stream id to return on createStream calls |
127 | double duration; ///< Duration of the stream in seconds as returned by the server (only valid if non-zero) |
128 | char username[50]; |
129 | char password[50]; |
130 | char auth_params[500]; |
131 | int do_reconnect; |
132 | int auth_tried; |
133 | } RTMPContext; |
134 | |
135 | #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing |
136 | /** Client key used for digest signing */ |
137 | static const uint8_t rtmp_player_key[] = { |
138 | 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ', |
139 | 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1', |
140 | |
141 | 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02, |
142 | 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8, |
143 | 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE |
144 | }; |
145 | |
146 | #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing |
147 | /** Key used for RTMP server digest signing */ |
148 | static const uint8_t rtmp_server_key[] = { |
149 | 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ', |
150 | 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ', |
151 | 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1', |
152 | |
153 | 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02, |
154 | 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8, |
155 | 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE |
156 | }; |
157 | |
158 | static int handle_chunk_size(URLContext *s, RTMPPacket *pkt); |
159 | static int handle_server_bw(URLContext *s, RTMPPacket *pkt); |
160 | static int handle_client_bw(URLContext *s, RTMPPacket *pkt); |
161 | |
162 | static int add_tracked_method(RTMPContext *rt, const char *name, int id) |
163 | { |
164 | int err; |
165 | |
166 | if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) { |
167 | rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2; |
168 | if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size * |
169 | sizeof(*rt->tracked_methods))) < 0) { |
170 | rt->nb_tracked_methods = 0; |
171 | rt->tracked_methods_size = 0; |
172 | return err; |
173 | } |
174 | } |
175 | |
176 | rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name); |
177 | if (!rt->tracked_methods[rt->nb_tracked_methods].name) |
178 | return AVERROR(ENOMEM); |
179 | rt->tracked_methods[rt->nb_tracked_methods].id = id; |
180 | rt->nb_tracked_methods++; |
181 | |
182 | return 0; |
183 | } |
184 | |
185 | static void del_tracked_method(RTMPContext *rt, int index) |
186 | { |
187 | memmove(&rt->tracked_methods[index], &rt->tracked_methods[index + 1], |
188 | sizeof(*rt->tracked_methods) * (rt->nb_tracked_methods - index - 1)); |
189 | rt->nb_tracked_methods--; |
190 | } |
191 | |
192 | static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset, |
193 | char **tracked_method) |
194 | { |
195 | RTMPContext *rt = s->priv_data; |
196 | GetByteContext gbc; |
197 | double pkt_id; |
198 | int ret; |
199 | int i; |
200 | |
201 | bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset); |
202 | if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0) |
203 | return ret; |
204 | |
205 | for (i = 0; i < rt->nb_tracked_methods; i++) { |
206 | if (rt->tracked_methods[i].id != pkt_id) |
207 | continue; |
208 | |
209 | *tracked_method = rt->tracked_methods[i].name; |
210 | del_tracked_method(rt, i); |
211 | break; |
212 | } |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | static void free_tracked_methods(RTMPContext *rt) |
218 | { |
219 | int i; |
220 | |
221 | for (i = 0; i < rt->nb_tracked_methods; i ++) |
222 | av_freep(&rt->tracked_methods[i].name); |
223 | av_freep(&rt->tracked_methods); |
224 | rt->tracked_methods_size = 0; |
225 | rt->nb_tracked_methods = 0; |
226 | } |
227 | |
228 | static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track) |
229 | { |
230 | int ret; |
231 | |
232 | if (pkt->type == RTMP_PT_INVOKE && track) { |
233 | GetByteContext gbc; |
234 | char name[128]; |
235 | double pkt_id; |
236 | int len; |
237 | |
238 | bytestream2_init(&gbc, pkt->data, pkt->size); |
239 | if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0) |
240 | goto fail; |
241 | |
242 | if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0) |
243 | goto fail; |
244 | |
245 | if ((ret = add_tracked_method(rt, name, pkt_id)) < 0) |
246 | goto fail; |
247 | } |
248 | |
249 | ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size, |
250 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
251 | fail: |
252 | ff_rtmp_packet_destroy(pkt); |
253 | return ret; |
254 | } |
255 | |
256 | static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p) |
257 | { |
258 | char *field, *value; |
259 | char type; |
260 | |
261 | /* The type must be B for Boolean, N for number, S for string, O for |
262 | * object, or Z for null. For Booleans the data must be either 0 or 1 for |
263 | * FALSE or TRUE, respectively. Likewise for Objects the data must be |
264 | * 0 or 1 to end or begin an object, respectively. Data items in subobjects |
265 | * may be named, by prefixing the type with 'N' and specifying the name |
266 | * before the value (ie. NB:myFlag:1). This option may be used multiple times |
267 | * to construct arbitrary AMF sequences. */ |
268 | if (param[0] && param[1] == ':') { |
269 | type = param[0]; |
270 | value = param + 2; |
271 | } else if (param[0] == 'N' && param[1] && param[2] == ':') { |
272 | type = param[1]; |
273 | field = param + 3; |
274 | value = strchr(field, ':'); |
275 | if (!value) |
276 | goto fail; |
277 | *value = '\0'; |
278 | value++; |
279 | |
280 | ff_amf_write_field_name(p, field); |
281 | } else { |
282 | goto fail; |
283 | } |
284 | |
285 | switch (type) { |
286 | case 'B': |
287 | ff_amf_write_bool(p, value[0] != '0'); |
288 | break; |
289 | case 'S': |
290 | ff_amf_write_string(p, value); |
291 | break; |
292 | case 'N': |
293 | ff_amf_write_number(p, strtod(value, NULL)); |
294 | break; |
295 | case 'Z': |
296 | ff_amf_write_null(p); |
297 | break; |
298 | case 'O': |
299 | if (value[0] != '0') |
300 | ff_amf_write_object_start(p); |
301 | else |
302 | ff_amf_write_object_end(p); |
303 | break; |
304 | default: |
305 | goto fail; |
306 | break; |
307 | } |
308 | |
309 | return 0; |
310 | |
311 | fail: |
312 | av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param); |
313 | return AVERROR(EINVAL); |
314 | } |
315 | |
316 | /** |
317 | * Generate 'connect' call and send it to the server. |
318 | */ |
319 | static int gen_connect(URLContext *s, RTMPContext *rt) |
320 | { |
321 | RTMPPacket pkt; |
322 | uint8_t *p; |
323 | int ret; |
324 | |
325 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
326 | 0, 4096 + APP_MAX_LENGTH)) < 0) |
327 | return ret; |
328 | |
329 | p = pkt.data; |
330 | |
331 | ff_amf_write_string(&p, "connect"); |
332 | ff_amf_write_number(&p, ++rt->nb_invokes); |
333 | ff_amf_write_object_start(&p); |
334 | ff_amf_write_field_name(&p, "app"); |
335 | ff_amf_write_string2(&p, rt->app, rt->auth_params); |
336 | |
337 | if (!rt->is_input) { |
338 | ff_amf_write_field_name(&p, "type"); |
339 | ff_amf_write_string(&p, "nonprivate"); |
340 | } |
341 | ff_amf_write_field_name(&p, "flashVer"); |
342 | ff_amf_write_string(&p, rt->flashver); |
343 | |
344 | if (rt->swfurl) { |
345 | ff_amf_write_field_name(&p, "swfUrl"); |
346 | ff_amf_write_string(&p, rt->swfurl); |
347 | } |
348 | |
349 | ff_amf_write_field_name(&p, "tcUrl"); |
350 | ff_amf_write_string2(&p, rt->tcurl, rt->auth_params); |
351 | if (rt->is_input) { |
352 | ff_amf_write_field_name(&p, "fpad"); |
353 | ff_amf_write_bool(&p, 0); |
354 | ff_amf_write_field_name(&p, "capabilities"); |
355 | ff_amf_write_number(&p, 15.0); |
356 | |
357 | /* Tell the server we support all the audio codecs except |
358 | * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010) |
359 | * which are unused in the RTMP protocol implementation. */ |
360 | ff_amf_write_field_name(&p, "audioCodecs"); |
361 | ff_amf_write_number(&p, 4071.0); |
362 | ff_amf_write_field_name(&p, "videoCodecs"); |
363 | ff_amf_write_number(&p, 252.0); |
364 | ff_amf_write_field_name(&p, "videoFunction"); |
365 | ff_amf_write_number(&p, 1.0); |
366 | |
367 | if (rt->pageurl) { |
368 | ff_amf_write_field_name(&p, "pageUrl"); |
369 | ff_amf_write_string(&p, rt->pageurl); |
370 | } |
371 | } |
372 | ff_amf_write_object_end(&p); |
373 | |
374 | if (rt->conn) { |
375 | char *param = rt->conn; |
376 | |
377 | // Write arbitrary AMF data to the Connect message. |
378 | while (param) { |
379 | char *sep; |
380 | param += strspn(param, " "); |
381 | if (!*param) |
382 | break; |
383 | sep = strchr(param, ' '); |
384 | if (sep) |
385 | *sep = '\0'; |
386 | if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) { |
387 | // Invalid AMF parameter. |
388 | ff_rtmp_packet_destroy(&pkt); |
389 | return ret; |
390 | } |
391 | |
392 | if (sep) |
393 | param = sep + 1; |
394 | else |
395 | break; |
396 | } |
397 | } |
398 | |
399 | pkt.size = p - pkt.data; |
400 | |
401 | return rtmp_send_packet(rt, &pkt, 1); |
402 | } |
403 | |
404 | |
405 | #define RTMP_CTRL_ABORT_MESSAGE (2) |
406 | |
407 | static int read_connect(URLContext *s, RTMPContext *rt) |
408 | { |
409 | RTMPPacket pkt = { 0 }; |
410 | uint8_t *p; |
411 | const uint8_t *cp; |
412 | int ret; |
413 | char command[64]; |
414 | int stringlen; |
415 | double seqnum; |
416 | uint8_t tmpstr[256]; |
417 | GetByteContext gbc; |
418 | |
419 | // handle RTMP Protocol Control Messages |
420 | for (;;) { |
421 | if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size, |
422 | &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0) |
423 | return ret; |
424 | #ifdef DEBUG |
425 | ff_rtmp_packet_dump(s, &pkt); |
426 | #endif |
427 | if (pkt.type == RTMP_PT_CHUNK_SIZE) { |
428 | if ((ret = handle_chunk_size(s, &pkt)) < 0) { |
429 | ff_rtmp_packet_destroy(&pkt); |
430 | return ret; |
431 | } |
432 | } else if (pkt.type == RTMP_CTRL_ABORT_MESSAGE) { |
433 | av_log(s, AV_LOG_ERROR, "received abort message\n"); |
434 | ff_rtmp_packet_destroy(&pkt); |
435 | return AVERROR_UNKNOWN; |
436 | } else if (pkt.type == RTMP_PT_BYTES_READ) { |
437 | av_log(s, AV_LOG_TRACE, "received acknowledgement\n"); |
438 | } else if (pkt.type == RTMP_PT_SERVER_BW) { |
439 | if ((ret = handle_server_bw(s, &pkt)) < 0) { |
440 | ff_rtmp_packet_destroy(&pkt); |
441 | return ret; |
442 | } |
443 | } else if (pkt.type == RTMP_PT_CLIENT_BW) { |
444 | if ((ret = handle_client_bw(s, &pkt)) < 0) { |
445 | ff_rtmp_packet_destroy(&pkt); |
446 | return ret; |
447 | } |
448 | } else if (pkt.type == RTMP_PT_INVOKE) { |
449 | // received RTMP Command Message |
450 | break; |
451 | } else { |
452 | av_log(s, AV_LOG_ERROR, "Unknown control message type (%d)\n", pkt.type); |
453 | } |
454 | ff_rtmp_packet_destroy(&pkt); |
455 | } |
456 | |
457 | cp = pkt.data; |
458 | bytestream2_init(&gbc, cp, pkt.size); |
459 | if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) { |
460 | av_log(s, AV_LOG_ERROR, "Unable to read command string\n"); |
461 | ff_rtmp_packet_destroy(&pkt); |
462 | return AVERROR_INVALIDDATA; |
463 | } |
464 | if (strcmp(command, "connect")) { |
465 | av_log(s, AV_LOG_ERROR, "Expecting connect, got %s\n", command); |
466 | ff_rtmp_packet_destroy(&pkt); |
467 | return AVERROR_INVALIDDATA; |
468 | } |
469 | ret = ff_amf_read_number(&gbc, &seqnum); |
470 | if (ret) |
471 | av_log(s, AV_LOG_WARNING, "SeqNum not found\n"); |
472 | /* Here one could parse an AMF Object with data as flashVers and others. */ |
473 | ret = ff_amf_get_field_value(gbc.buffer, |
474 | gbc.buffer + bytestream2_get_bytes_left(&gbc), |
475 | "app", tmpstr, sizeof(tmpstr)); |
476 | if (ret) |
477 | av_log(s, AV_LOG_WARNING, "App field not found in connect\n"); |
478 | if (!ret && strcmp(tmpstr, rt->app)) |
479 | av_log(s, AV_LOG_WARNING, "App field don't match up: %s <-> %s\n", |
480 | tmpstr, rt->app); |
481 | ff_rtmp_packet_destroy(&pkt); |
482 | |
483 | // Send Window Acknowledgement Size (as defined in specification) |
484 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, |
485 | RTMP_PT_SERVER_BW, 0, 4)) < 0) |
486 | return ret; |
487 | p = pkt.data; |
488 | bytestream_put_be32(&p, rt->server_bw); |
489 | pkt.size = p - pkt.data; |
490 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
491 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
492 | ff_rtmp_packet_destroy(&pkt); |
493 | if (ret < 0) |
494 | return ret; |
495 | // Send Peer Bandwidth |
496 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, |
497 | RTMP_PT_CLIENT_BW, 0, 5)) < 0) |
498 | return ret; |
499 | p = pkt.data; |
500 | bytestream_put_be32(&p, rt->server_bw); |
501 | bytestream_put_byte(&p, 2); // dynamic |
502 | pkt.size = p - pkt.data; |
503 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
504 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
505 | ff_rtmp_packet_destroy(&pkt); |
506 | if (ret < 0) |
507 | return ret; |
508 | |
509 | // Ping request |
510 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, |
511 | RTMP_PT_PING, 0, 6)) < 0) |
512 | return ret; |
513 | |
514 | p = pkt.data; |
515 | bytestream_put_be16(&p, 0); // 0 -> Stream Begin |
516 | bytestream_put_be32(&p, 0); |
517 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
518 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
519 | ff_rtmp_packet_destroy(&pkt); |
520 | if (ret < 0) |
521 | return ret; |
522 | |
523 | // Chunk size |
524 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, |
525 | RTMP_PT_CHUNK_SIZE, 0, 4)) < 0) |
526 | return ret; |
527 | |
528 | p = pkt.data; |
529 | bytestream_put_be32(&p, rt->out_chunk_size); |
530 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
531 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
532 | ff_rtmp_packet_destroy(&pkt); |
533 | if (ret < 0) |
534 | return ret; |
535 | |
536 | // Send _result NetConnection.Connect.Success to connect |
537 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, |
538 | RTMP_PT_INVOKE, 0, |
539 | RTMP_PKTDATA_DEFAULT_SIZE)) < 0) |
540 | return ret; |
541 | |
542 | p = pkt.data; |
543 | ff_amf_write_string(&p, "_result"); |
544 | ff_amf_write_number(&p, seqnum); |
545 | |
546 | ff_amf_write_object_start(&p); |
547 | ff_amf_write_field_name(&p, "fmsVer"); |
548 | ff_amf_write_string(&p, "FMS/3,0,1,123"); |
549 | ff_amf_write_field_name(&p, "capabilities"); |
550 | ff_amf_write_number(&p, 31); |
551 | ff_amf_write_object_end(&p); |
552 | |
553 | ff_amf_write_object_start(&p); |
554 | ff_amf_write_field_name(&p, "level"); |
555 | ff_amf_write_string(&p, "status"); |
556 | ff_amf_write_field_name(&p, "code"); |
557 | ff_amf_write_string(&p, "NetConnection.Connect.Success"); |
558 | ff_amf_write_field_name(&p, "description"); |
559 | ff_amf_write_string(&p, "Connection succeeded."); |
560 | ff_amf_write_field_name(&p, "objectEncoding"); |
561 | ff_amf_write_number(&p, 0); |
562 | ff_amf_write_object_end(&p); |
563 | |
564 | pkt.size = p - pkt.data; |
565 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
566 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
567 | ff_rtmp_packet_destroy(&pkt); |
568 | if (ret < 0) |
569 | return ret; |
570 | |
571 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, |
572 | RTMP_PT_INVOKE, 0, 30)) < 0) |
573 | return ret; |
574 | p = pkt.data; |
575 | ff_amf_write_string(&p, "onBWDone"); |
576 | ff_amf_write_number(&p, 0); |
577 | ff_amf_write_null(&p); |
578 | ff_amf_write_number(&p, 8192); |
579 | pkt.size = p - pkt.data; |
580 | ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size, |
581 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
582 | ff_rtmp_packet_destroy(&pkt); |
583 | |
584 | return ret; |
585 | } |
586 | |
587 | /** |
588 | * Generate 'releaseStream' call and send it to the server. It should make |
589 | * the server release some channel for media streams. |
590 | */ |
591 | static int gen_release_stream(URLContext *s, RTMPContext *rt) |
592 | { |
593 | RTMPPacket pkt; |
594 | uint8_t *p; |
595 | int ret; |
596 | |
597 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
598 | 0, 29 + strlen(rt->playpath))) < 0) |
599 | return ret; |
600 | |
601 | av_log(s, AV_LOG_DEBUG, "Releasing stream...\n"); |
602 | p = pkt.data; |
603 | ff_amf_write_string(&p, "releaseStream"); |
604 | ff_amf_write_number(&p, ++rt->nb_invokes); |
605 | ff_amf_write_null(&p); |
606 | ff_amf_write_string(&p, rt->playpath); |
607 | |
608 | return rtmp_send_packet(rt, &pkt, 1); |
609 | } |
610 | |
611 | /** |
612 | * Generate 'FCPublish' call and send it to the server. It should make |
613 | * the server prepare for receiving media streams. |
614 | */ |
615 | static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt) |
616 | { |
617 | RTMPPacket pkt; |
618 | uint8_t *p; |
619 | int ret; |
620 | |
621 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
622 | 0, 25 + strlen(rt->playpath))) < 0) |
623 | return ret; |
624 | |
625 | av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n"); |
626 | p = pkt.data; |
627 | ff_amf_write_string(&p, "FCPublish"); |
628 | ff_amf_write_number(&p, ++rt->nb_invokes); |
629 | ff_amf_write_null(&p); |
630 | ff_amf_write_string(&p, rt->playpath); |
631 | |
632 | return rtmp_send_packet(rt, &pkt, 1); |
633 | } |
634 | |
635 | /** |
636 | * Generate 'FCUnpublish' call and send it to the server. It should make |
637 | * the server destroy stream. |
638 | */ |
639 | static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt) |
640 | { |
641 | RTMPPacket pkt; |
642 | uint8_t *p; |
643 | int ret; |
644 | |
645 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
646 | 0, 27 + strlen(rt->playpath))) < 0) |
647 | return ret; |
648 | |
649 | av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n"); |
650 | p = pkt.data; |
651 | ff_amf_write_string(&p, "FCUnpublish"); |
652 | ff_amf_write_number(&p, ++rt->nb_invokes); |
653 | ff_amf_write_null(&p); |
654 | ff_amf_write_string(&p, rt->playpath); |
655 | |
656 | return rtmp_send_packet(rt, &pkt, 0); |
657 | } |
658 | |
659 | /** |
660 | * Generate 'createStream' call and send it to the server. It should make |
661 | * the server allocate some channel for media streams. |
662 | */ |
663 | static int gen_create_stream(URLContext *s, RTMPContext *rt) |
664 | { |
665 | RTMPPacket pkt; |
666 | uint8_t *p; |
667 | int ret; |
668 | |
669 | av_log(s, AV_LOG_DEBUG, "Creating stream...\n"); |
670 | |
671 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
672 | 0, 25)) < 0) |
673 | return ret; |
674 | |
675 | p = pkt.data; |
676 | ff_amf_write_string(&p, "createStream"); |
677 | ff_amf_write_number(&p, ++rt->nb_invokes); |
678 | ff_amf_write_null(&p); |
679 | |
680 | return rtmp_send_packet(rt, &pkt, 1); |
681 | } |
682 | |
683 | |
684 | /** |
685 | * Generate 'deleteStream' call and send it to the server. It should make |
686 | * the server remove some channel for media streams. |
687 | */ |
688 | static int gen_delete_stream(URLContext *s, RTMPContext *rt) |
689 | { |
690 | RTMPPacket pkt; |
691 | uint8_t *p; |
692 | int ret; |
693 | |
694 | av_log(s, AV_LOG_DEBUG, "Deleting stream...\n"); |
695 | |
696 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
697 | 0, 34)) < 0) |
698 | return ret; |
699 | |
700 | p = pkt.data; |
701 | ff_amf_write_string(&p, "deleteStream"); |
702 | ff_amf_write_number(&p, ++rt->nb_invokes); |
703 | ff_amf_write_null(&p); |
704 | ff_amf_write_number(&p, rt->stream_id); |
705 | |
706 | return rtmp_send_packet(rt, &pkt, 0); |
707 | } |
708 | |
709 | /** |
710 | * Generate 'getStreamLength' call and send it to the server. If the server |
711 | * knows the duration of the selected stream, it will reply with the duration |
712 | * in seconds. |
713 | */ |
714 | static int gen_get_stream_length(URLContext *s, RTMPContext *rt) |
715 | { |
716 | RTMPPacket pkt; |
717 | uint8_t *p; |
718 | int ret; |
719 | |
720 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, |
721 | 0, 31 + strlen(rt->playpath))) < 0) |
722 | return ret; |
723 | |
724 | p = pkt.data; |
725 | ff_amf_write_string(&p, "getStreamLength"); |
726 | ff_amf_write_number(&p, ++rt->nb_invokes); |
727 | ff_amf_write_null(&p); |
728 | ff_amf_write_string(&p, rt->playpath); |
729 | |
730 | return rtmp_send_packet(rt, &pkt, 1); |
731 | } |
732 | |
733 | /** |
734 | * Generate client buffer time and send it to the server. |
735 | */ |
736 | static int gen_buffer_time(URLContext *s, RTMPContext *rt) |
737 | { |
738 | RTMPPacket pkt; |
739 | uint8_t *p; |
740 | int ret; |
741 | |
742 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, |
743 | 1, 10)) < 0) |
744 | return ret; |
745 | |
746 | p = pkt.data; |
747 | bytestream_put_be16(&p, 3); |
748 | bytestream_put_be32(&p, rt->stream_id); |
749 | bytestream_put_be32(&p, rt->client_buffer_time); |
750 | |
751 | return rtmp_send_packet(rt, &pkt, 0); |
752 | } |
753 | |
754 | /** |
755 | * Generate 'play' call and send it to the server, then ping the server |
756 | * to start actual playing. |
757 | */ |
758 | static int gen_play(URLContext *s, RTMPContext *rt) |
759 | { |
760 | RTMPPacket pkt; |
761 | uint8_t *p; |
762 | int ret; |
763 | |
764 | av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath); |
765 | |
766 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, |
767 | 0, 29 + strlen(rt->playpath))) < 0) |
768 | return ret; |
769 | |
770 | pkt.extra = rt->stream_id; |
771 | |
772 | p = pkt.data; |
773 | ff_amf_write_string(&p, "play"); |
774 | ff_amf_write_number(&p, ++rt->nb_invokes); |
775 | ff_amf_write_null(&p); |
776 | ff_amf_write_string(&p, rt->playpath); |
777 | ff_amf_write_number(&p, rt->live * 1000); |
778 | |
779 | return rtmp_send_packet(rt, &pkt, 1); |
780 | } |
781 | |
782 | static int gen_seek(URLContext *s, RTMPContext *rt, int64_t timestamp) |
783 | { |
784 | RTMPPacket pkt; |
785 | uint8_t *p; |
786 | int ret; |
787 | |
788 | av_log(s, AV_LOG_DEBUG, "Sending seek command for timestamp %"PRId64"\n", |
789 | timestamp); |
790 | |
791 | if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 26)) < 0) |
792 | return ret; |
793 | |
794 | pkt.extra = rt->stream_id; |
795 | |
796 | p = pkt.data; |
797 | ff_amf_write_string(&p, "seek"); |
798 | ff_amf_write_number(&p, 0); //no tracking back responses |
799 | ff_amf_write_null(&p); //as usual, the first null param |
800 | ff_amf_write_number(&p, timestamp); //where we want to jump |
801 | |
802 | return rtmp_send_packet(rt, &pkt, 1); |
803 | } |
804 | |
805 | /** |
806 | * Generate a pause packet that either pauses or unpauses the current stream. |
807 | */ |
808 | static int gen_pause(URLContext *s, RTMPContext *rt, int pause, uint32_t timestamp) |
809 | { |
810 | RTMPPacket pkt; |
811 | uint8_t *p; |
812 | int ret; |
813 | |
814 | av_log(s, AV_LOG_DEBUG, "Sending pause command for timestamp %d\n", |
815 | timestamp); |
816 | |
817 | if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 29)) < 0) |
818 | return ret; |
819 | |
820 | pkt.extra = rt->stream_id; |
821 | |
822 | p = pkt.data; |
823 | ff_amf_write_string(&p, "pause"); |
824 | ff_amf_write_number(&p, 0); //no tracking back responses |
825 | ff_amf_write_null(&p); //as usual, the first null param |
826 | ff_amf_write_bool(&p, pause); // pause or unpause |
827 | ff_amf_write_number(&p, timestamp); //where we pause the stream |
828 | |
829 | return rtmp_send_packet(rt, &pkt, 1); |
830 | } |
831 | |
832 | /** |
833 | * Generate 'publish' call and send it to the server. |
834 | */ |
835 | static int gen_publish(URLContext *s, RTMPContext *rt) |
836 | { |
837 | RTMPPacket pkt; |
838 | uint8_t *p; |
839 | int ret; |
840 | |
841 | av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath); |
842 | |
843 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, |
844 | 0, 30 + strlen(rt->playpath))) < 0) |
845 | return ret; |
846 | |
847 | pkt.extra = rt->stream_id; |
848 | |
849 | p = pkt.data; |
850 | ff_amf_write_string(&p, "publish"); |
851 | ff_amf_write_number(&p, ++rt->nb_invokes); |
852 | ff_amf_write_null(&p); |
853 | ff_amf_write_string(&p, rt->playpath); |
854 | ff_amf_write_string(&p, "live"); |
855 | |
856 | return rtmp_send_packet(rt, &pkt, 1); |
857 | } |
858 | |
859 | /** |
860 | * Generate ping reply and send it to the server. |
861 | */ |
862 | static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt) |
863 | { |
864 | RTMPPacket pkt; |
865 | uint8_t *p; |
866 | int ret; |
867 | |
868 | if (ppkt->size < 6) { |
869 | av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n", |
870 | ppkt->size); |
871 | return AVERROR_INVALIDDATA; |
872 | } |
873 | |
874 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, |
875 | ppkt->timestamp + 1, 6)) < 0) |
876 | return ret; |
877 | |
878 | p = pkt.data; |
879 | bytestream_put_be16(&p, 7); |
880 | bytestream_put_be32(&p, AV_RB32(ppkt->data+2)); |
881 | |
882 | return rtmp_send_packet(rt, &pkt, 0); |
883 | } |
884 | |
885 | /** |
886 | * Generate SWF verification message and send it to the server. |
887 | */ |
888 | static int gen_swf_verification(URLContext *s, RTMPContext *rt) |
889 | { |
890 | RTMPPacket pkt; |
891 | uint8_t *p; |
892 | int ret; |
893 | |
894 | av_log(s, AV_LOG_DEBUG, "Sending SWF verification...\n"); |
895 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, |
896 | 0, 44)) < 0) |
897 | return ret; |
898 | |
899 | p = pkt.data; |
900 | bytestream_put_be16(&p, 27); |
901 | memcpy(p, rt->swfverification, 42); |
902 | |
903 | return rtmp_send_packet(rt, &pkt, 0); |
904 | } |
905 | |
906 | /** |
907 | * Generate server bandwidth message and send it to the server. |
908 | */ |
909 | static int gen_server_bw(URLContext *s, RTMPContext *rt) |
910 | { |
911 | RTMPPacket pkt; |
912 | uint8_t *p; |
913 | int ret; |
914 | |
915 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW, |
916 | 0, 4)) < 0) |
917 | return ret; |
918 | |
919 | p = pkt.data; |
920 | bytestream_put_be32(&p, rt->server_bw); |
921 | |
922 | return rtmp_send_packet(rt, &pkt, 0); |
923 | } |
924 | |
925 | /** |
926 | * Generate check bandwidth message and send it to the server. |
927 | */ |
928 | static int gen_check_bw(URLContext *s, RTMPContext *rt) |
929 | { |
930 | RTMPPacket pkt; |
931 | uint8_t *p; |
932 | int ret; |
933 | |
934 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
935 | 0, 21)) < 0) |
936 | return ret; |
937 | |
938 | p = pkt.data; |
939 | ff_amf_write_string(&p, "_checkbw"); |
940 | ff_amf_write_number(&p, ++rt->nb_invokes); |
941 | ff_amf_write_null(&p); |
942 | |
943 | return rtmp_send_packet(rt, &pkt, 1); |
944 | } |
945 | |
946 | /** |
947 | * Generate report on bytes read so far and send it to the server. |
948 | */ |
949 | static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts) |
950 | { |
951 | RTMPPacket pkt; |
952 | uint8_t *p; |
953 | int ret; |
954 | |
955 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ, |
956 | ts, 4)) < 0) |
957 | return ret; |
958 | |
959 | p = pkt.data; |
960 | bytestream_put_be32(&p, rt->bytes_read); |
961 | |
962 | return rtmp_send_packet(rt, &pkt, 0); |
963 | } |
964 | |
965 | static int gen_fcsubscribe_stream(URLContext *s, RTMPContext *rt, |
966 | const char *subscribe) |
967 | { |
968 | RTMPPacket pkt; |
969 | uint8_t *p; |
970 | int ret; |
971 | |
972 | if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, |
973 | 0, 27 + strlen(subscribe))) < 0) |
974 | return ret; |
975 | |
976 | p = pkt.data; |
977 | ff_amf_write_string(&p, "FCSubscribe"); |
978 | ff_amf_write_number(&p, ++rt->nb_invokes); |
979 | ff_amf_write_null(&p); |
980 | ff_amf_write_string(&p, subscribe); |
981 | |
982 | return rtmp_send_packet(rt, &pkt, 1); |
983 | } |
984 | |
985 | int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap, |
986 | const uint8_t *key, int keylen, uint8_t *dst) |
987 | { |
988 | AVHMAC *hmac; |
989 | |
990 | hmac = av_hmac_alloc(AV_HMAC_SHA256); |
991 | if (!hmac) |
992 | return AVERROR(ENOMEM); |
993 | |
994 | av_hmac_init(hmac, key, keylen); |
995 | if (gap <= 0) { |
996 | av_hmac_update(hmac, src, len); |
997 | } else { //skip 32 bytes used for storing digest |
998 | av_hmac_update(hmac, src, gap); |
999 | av_hmac_update(hmac, src + gap + 32, len - gap - 32); |
1000 | } |
1001 | av_hmac_final(hmac, dst, 32); |
1002 | |
1003 | av_hmac_free(hmac); |
1004 | |
1005 | return 0; |
1006 | } |
1007 | |
1008 | int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val, |
1009 | int add_val) |
1010 | { |
1011 | int i, digest_pos = 0; |
1012 | |
1013 | for (i = 0; i < 4; i++) |
1014 | digest_pos += buf[i + off]; |
1015 | digest_pos = digest_pos % mod_val + add_val; |
1016 | |
1017 | return digest_pos; |
1018 | } |
1019 | |
1020 | /** |
1021 | * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest |
1022 | * will be stored) into that packet. |
1023 | * |
1024 | * @param buf handshake data (1536 bytes) |
1025 | * @param encrypted use an encrypted connection (RTMPE) |
1026 | * @return offset to the digest inside input data |
1027 | */ |
1028 | static int rtmp_handshake_imprint_with_digest(uint8_t *buf, int encrypted) |
1029 | { |
1030 | int ret, digest_pos; |
1031 | |
1032 | if (encrypted) |
1033 | digest_pos = ff_rtmp_calc_digest_pos(buf, 772, 728, 776); |
1034 | else |
1035 | digest_pos = ff_rtmp_calc_digest_pos(buf, 8, 728, 12); |
1036 | |
1037 | ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos, |
1038 | rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN, |
1039 | buf + digest_pos); |
1040 | if (ret < 0) |
1041 | return ret; |
1042 | |
1043 | return digest_pos; |
1044 | } |
1045 | |
1046 | /** |
1047 | * Verify that the received server response has the expected digest value. |
1048 | * |
1049 | * @param buf handshake data received from the server (1536 bytes) |
1050 | * @param off position to search digest offset from |
1051 | * @return 0 if digest is valid, digest position otherwise |
1052 | */ |
1053 | static int rtmp_validate_digest(uint8_t *buf, int off) |
1054 | { |
1055 | uint8_t digest[32]; |
1056 | int ret, digest_pos; |
1057 | |
1058 | digest_pos = ff_rtmp_calc_digest_pos(buf, off, 728, off + 4); |
1059 | |
1060 | ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos, |
1061 | rtmp_server_key, SERVER_KEY_OPEN_PART_LEN, |
1062 | digest); |
1063 | if (ret < 0) |
1064 | return ret; |
1065 | |
1066 | if (!memcmp(digest, buf + digest_pos, 32)) |
1067 | return digest_pos; |
1068 | return 0; |
1069 | } |
1070 | |
1071 | static int rtmp_calc_swf_verification(URLContext *s, RTMPContext *rt, |
1072 | uint8_t *buf) |
1073 | { |
1074 | uint8_t *p; |
1075 | int ret; |
1076 | |
1077 | if (rt->swfhash_len != 32) { |
1078 | av_log(s, AV_LOG_ERROR, |
1079 | "Hash of the decompressed SWF file is not 32 bytes long.\n"); |
1080 | return AVERROR(EINVAL); |
1081 | } |
1082 | |
1083 | p = &rt->swfverification[0]; |
1084 | bytestream_put_byte(&p, 1); |
1085 | bytestream_put_byte(&p, 1); |
1086 | bytestream_put_be32(&p, rt->swfsize); |
1087 | bytestream_put_be32(&p, rt->swfsize); |
1088 | |
1089 | if ((ret = ff_rtmp_calc_digest(rt->swfhash, 32, 0, buf, 32, p)) < 0) |
1090 | return ret; |
1091 | |
1092 | return 0; |
1093 | } |
1094 | |
1095 | #if CONFIG_ZLIB |
1096 | static int rtmp_uncompress_swfplayer(uint8_t *in_data, int64_t in_size, |
1097 | uint8_t **out_data, int64_t *out_size) |
1098 | { |
1099 | z_stream zs = { 0 }; |
1100 | void *ptr; |
1101 | int size; |
1102 | int ret = 0; |
1103 | |
1104 | zs.avail_in = in_size; |
1105 | zs.next_in = in_data; |
1106 | ret = inflateInit(&zs); |
1107 | if (ret != Z_OK) |
1108 | return AVERROR_UNKNOWN; |
1109 | |
1110 | do { |
1111 | uint8_t tmp_buf[16384]; |
1112 | |
1113 | zs.avail_out = sizeof(tmp_buf); |
1114 | zs.next_out = tmp_buf; |
1115 | |
1116 | ret = inflate(&zs, Z_NO_FLUSH); |
1117 | if (ret != Z_OK && ret != Z_STREAM_END) { |
1118 | ret = AVERROR_UNKNOWN; |
1119 | goto fail; |
1120 | } |
1121 | |
1122 | size = sizeof(tmp_buf) - zs.avail_out; |
1123 | if (!(ptr = av_realloc(*out_data, *out_size + size))) { |
1124 | ret = AVERROR(ENOMEM); |
1125 | goto fail; |
1126 | } |
1127 | *out_data = ptr; |
1128 | |
1129 | memcpy(*out_data + *out_size, tmp_buf, size); |
1130 | *out_size += size; |
1131 | } while (zs.avail_out == 0); |
1132 | |
1133 | fail: |
1134 | inflateEnd(&zs); |
1135 | return ret; |
1136 | } |
1137 | #endif |
1138 | |
1139 | static int rtmp_calc_swfhash(URLContext *s) |
1140 | { |
1141 | RTMPContext *rt = s->priv_data; |
1142 | uint8_t *in_data = NULL, *out_data = NULL, *swfdata; |
1143 | int64_t in_size; |
1144 | URLContext *stream; |
1145 | char swfhash[32]; |
1146 | int swfsize; |
1147 | int ret = 0; |
1148 | |
1149 | /* Get the SWF player file. */ |
1150 | if ((ret = ffurl_open_whitelist(&stream, rt->swfverify, AVIO_FLAG_READ, |
1151 | &s->interrupt_callback, NULL, |
1152 | s->protocol_whitelist, s->protocol_blacklist, s)) < 0) { |
1153 | av_log(s, AV_LOG_ERROR, "Cannot open connection %s.\n", rt->swfverify); |
1154 | goto fail; |
1155 | } |
1156 | |
1157 | if ((in_size = ffurl_seek(stream, 0, AVSEEK_SIZE)) < 0) { |
1158 | ret = AVERROR(EIO); |
1159 | goto fail; |
1160 | } |
1161 | |
1162 | if (!(in_data = av_malloc(in_size))) { |
1163 | ret = AVERROR(ENOMEM); |
1164 | goto fail; |
1165 | } |
1166 | |
1167 | if ((ret = ffurl_read_complete(stream, in_data, in_size)) < 0) |
1168 | goto fail; |
1169 | |
1170 | if (in_size < 3) { |
1171 | ret = AVERROR_INVALIDDATA; |
1172 | goto fail; |
1173 | } |
1174 | |
1175 | if (!memcmp(in_data, "CWS", 3)) { |
1176 | #if CONFIG_ZLIB |
1177 | int64_t out_size; |
1178 | /* Decompress the SWF player file using Zlib. */ |
1179 | if (!(out_data = av_malloc(8))) { |
1180 | ret = AVERROR(ENOMEM); |
1181 | goto fail; |
1182 | } |
1183 | *in_data = 'F'; // magic stuff |
1184 | memcpy(out_data, in_data, 8); |
1185 | out_size = 8; |
1186 | |
1187 | if ((ret = rtmp_uncompress_swfplayer(in_data + 8, in_size - 8, |
1188 | &out_data, &out_size)) < 0) |
1189 | goto fail; |
1190 | swfsize = out_size; |
1191 | swfdata = out_data; |
1192 | #else |
1193 | av_log(s, AV_LOG_ERROR, |
1194 | "Zlib is required for decompressing the SWF player file.\n"); |
1195 | ret = AVERROR(EINVAL); |
1196 | goto fail; |
1197 | #endif |
1198 | } else { |
1199 | swfsize = in_size; |
1200 | swfdata = in_data; |
1201 | } |
1202 | |
1203 | /* Compute the SHA256 hash of the SWF player file. */ |
1204 | if ((ret = ff_rtmp_calc_digest(swfdata, swfsize, 0, |
1205 | "Genuine Adobe Flash Player 001", 30, |
1206 | swfhash)) < 0) |
1207 | goto fail; |
1208 | |
1209 | /* Set SWFVerification parameters. */ |
1210 | av_opt_set_bin(rt, "rtmp_swfhash", swfhash, 32, 0); |
1211 | rt->swfsize = swfsize; |
1212 | |
1213 | fail: |
1214 | av_freep(&in_data); |
1215 | av_freep(&out_data); |
1216 | ffurl_close(stream); |
1217 | return ret; |
1218 | } |
1219 | |
1220 | /** |
1221 | * Perform handshake with the server by means of exchanging pseudorandom data |
1222 | * signed with HMAC-SHA2 digest. |
1223 | * |
1224 | * @return 0 if handshake succeeds, negative value otherwise |
1225 | */ |
1226 | static int rtmp_handshake(URLContext *s, RTMPContext *rt) |
1227 | { |
1228 | AVLFG rnd; |
1229 | uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = { |
1230 | 3, // unencrypted data |
1231 | 0, 0, 0, 0, // client uptime |
1232 | RTMP_CLIENT_VER1, |
1233 | RTMP_CLIENT_VER2, |
1234 | RTMP_CLIENT_VER3, |
1235 | RTMP_CLIENT_VER4, |
1236 | }; |
1237 | uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE]; |
1238 | uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1]; |
1239 | int i; |
1240 | int server_pos, client_pos; |
1241 | uint8_t digest[32], signature[32]; |
1242 | int ret, type = 0; |
1243 | |
1244 | av_log(s, AV_LOG_DEBUG, "Handshaking...\n"); |
1245 | |
1246 | av_lfg_init(&rnd, 0xDEADC0DE); |
1247 | // generate handshake packet - 1536 bytes of pseudorandom data |
1248 | for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++) |
1249 | tosend[i] = av_lfg_get(&rnd) >> 24; |
1250 | |
1251 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1252 | /* When the client wants to use RTMPE, we have to change the command |
1253 | * byte to 0x06 which means to use encrypted data and we have to set |
1254 | * the flash version to at least 9.0.115.0. */ |
1255 | tosend[0] = 6; |
1256 | tosend[5] = 128; |
1257 | tosend[6] = 0; |
1258 | tosend[7] = 3; |
1259 | tosend[8] = 2; |
1260 | |
1261 | /* Initialize the Diffie-Hellmann context and generate the public key |
1262 | * to send to the server. */ |
1263 | if ((ret = ff_rtmpe_gen_pub_key(rt->stream, tosend + 1)) < 0) |
1264 | return ret; |
1265 | } |
1266 | |
1267 | client_pos = rtmp_handshake_imprint_with_digest(tosend + 1, rt->encrypted); |
1268 | if (client_pos < 0) |
1269 | return client_pos; |
1270 | |
1271 | if ((ret = ffurl_write(rt->stream, tosend, |
1272 | RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) { |
1273 | av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n"); |
1274 | return ret; |
1275 | } |
1276 | |
1277 | if ((ret = ffurl_read_complete(rt->stream, serverdata, |
1278 | RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) { |
1279 | av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n"); |
1280 | return ret; |
1281 | } |
1282 | |
1283 | if ((ret = ffurl_read_complete(rt->stream, clientdata, |
1284 | RTMP_HANDSHAKE_PACKET_SIZE)) < 0) { |
1285 | av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n"); |
1286 | return ret; |
1287 | } |
1288 | |
1289 | av_log(s, AV_LOG_DEBUG, "Type answer %d\n", serverdata[0]); |
1290 | av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n", |
1291 | serverdata[5], serverdata[6], serverdata[7], serverdata[8]); |
1292 | |
1293 | if (rt->is_input && serverdata[5] >= 3) { |
1294 | server_pos = rtmp_validate_digest(serverdata + 1, 772); |
1295 | if (server_pos < 0) |
1296 | return server_pos; |
1297 | |
1298 | if (!server_pos) { |
1299 | type = 1; |
1300 | server_pos = rtmp_validate_digest(serverdata + 1, 8); |
1301 | if (server_pos < 0) |
1302 | return server_pos; |
1303 | |
1304 | if (!server_pos) { |
1305 | av_log(s, AV_LOG_ERROR, "Server response validating failed\n"); |
1306 | return AVERROR(EIO); |
1307 | } |
1308 | } |
1309 | |
1310 | /* Generate SWFVerification token (SHA256 HMAC hash of decompressed SWF, |
1311 | * key are the last 32 bytes of the server handshake. */ |
1312 | if (rt->swfsize) { |
1313 | if ((ret = rtmp_calc_swf_verification(s, rt, serverdata + 1 + |
1314 | RTMP_HANDSHAKE_PACKET_SIZE - 32)) < 0) |
1315 | return ret; |
1316 | } |
1317 | |
1318 | ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0, |
1319 | rtmp_server_key, sizeof(rtmp_server_key), |
1320 | digest); |
1321 | if (ret < 0) |
1322 | return ret; |
1323 | |
1324 | ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32, |
1325 | 0, digest, 32, signature); |
1326 | if (ret < 0) |
1327 | return ret; |
1328 | |
1329 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1330 | /* Compute the shared secret key sent by the server and initialize |
1331 | * the RC4 encryption. */ |
1332 | if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1, |
1333 | tosend + 1, type)) < 0) |
1334 | return ret; |
1335 | |
1336 | /* Encrypt the signature received by the server. */ |
1337 | ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]); |
1338 | } |
1339 | |
1340 | if (memcmp(signature, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) { |
1341 | av_log(s, AV_LOG_ERROR, "Signature mismatch\n"); |
1342 | return AVERROR(EIO); |
1343 | } |
1344 | |
1345 | for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++) |
1346 | tosend[i] = av_lfg_get(&rnd) >> 24; |
1347 | ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0, |
1348 | rtmp_player_key, sizeof(rtmp_player_key), |
1349 | digest); |
1350 | if (ret < 0) |
1351 | return ret; |
1352 | |
1353 | ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0, |
1354 | digest, 32, |
1355 | tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32); |
1356 | if (ret < 0) |
1357 | return ret; |
1358 | |
1359 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1360 | /* Encrypt the signature to be send to the server. */ |
1361 | ff_rtmpe_encrypt_sig(rt->stream, tosend + |
1362 | RTMP_HANDSHAKE_PACKET_SIZE - 32, digest, |
1363 | serverdata[0]); |
1364 | } |
1365 | |
1366 | // write reply back to the server |
1367 | if ((ret = ffurl_write(rt->stream, tosend, |
1368 | RTMP_HANDSHAKE_PACKET_SIZE)) < 0) |
1369 | return ret; |
1370 | |
1371 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1372 | /* Set RC4 keys for encryption and update the keystreams. */ |
1373 | if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0) |
1374 | return ret; |
1375 | } |
1376 | } else { |
1377 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1378 | /* Compute the shared secret key sent by the server and initialize |
1379 | * the RC4 encryption. */ |
1380 | if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1, |
1381 | tosend + 1, 1)) < 0) |
1382 | return ret; |
1383 | |
1384 | if (serverdata[0] == 9) { |
1385 | /* Encrypt the signature received by the server. */ |
1386 | ff_rtmpe_encrypt_sig(rt->stream, signature, digest, |
1387 | serverdata[0]); |
1388 | } |
1389 | } |
1390 | |
1391 | if ((ret = ffurl_write(rt->stream, serverdata + 1, |
1392 | RTMP_HANDSHAKE_PACKET_SIZE)) < 0) |
1393 | return ret; |
1394 | |
1395 | if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) { |
1396 | /* Set RC4 keys for encryption and update the keystreams. */ |
1397 | if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0) |
1398 | return ret; |
1399 | } |
1400 | } |
1401 | |
1402 | return 0; |
1403 | } |
1404 | |
1405 | static int rtmp_receive_hs_packet(RTMPContext* rt, uint32_t *first_int, |
1406 | uint32_t *second_int, char *arraydata, |
1407 | int size) |
1408 | { |
1409 | int inoutsize; |
1410 | |
1411 | inoutsize = ffurl_read_complete(rt->stream, arraydata, |
1412 | RTMP_HANDSHAKE_PACKET_SIZE); |
1413 | if (inoutsize <= 0) |
1414 | return AVERROR(EIO); |
1415 | if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) { |
1416 | av_log(rt, AV_LOG_ERROR, "Erroneous Message size %d" |
1417 | " not following standard\n", (int)inoutsize); |
1418 | return AVERROR(EINVAL); |
1419 | } |
1420 | |
1421 | *first_int = AV_RB32(arraydata); |
1422 | *second_int = AV_RB32(arraydata + 4); |
1423 | return 0; |
1424 | } |
1425 | |
1426 | static int rtmp_send_hs_packet(RTMPContext* rt, uint32_t first_int, |
1427 | uint32_t second_int, char *arraydata, int size) |
1428 | { |
1429 | int inoutsize; |
1430 | |
1431 | AV_WB32(arraydata, first_int); |
1432 | AV_WB32(arraydata + 4, second_int); |
1433 | inoutsize = ffurl_write(rt->stream, arraydata, |
1434 | RTMP_HANDSHAKE_PACKET_SIZE); |
1435 | if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) { |
1436 | av_log(rt, AV_LOG_ERROR, "Unable to write answer\n"); |
1437 | return AVERROR(EIO); |
1438 | } |
1439 | |
1440 | return 0; |
1441 | } |
1442 | |
1443 | /** |
1444 | * rtmp handshake server side |
1445 | */ |
1446 | static int rtmp_server_handshake(URLContext *s, RTMPContext *rt) |
1447 | { |
1448 | uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE]; |
1449 | uint32_t hs_epoch; |
1450 | uint32_t hs_my_epoch; |
1451 | uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE]; |
1452 | uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE]; |
1453 | uint32_t zeroes; |
1454 | uint32_t temp = 0; |
1455 | int randomidx = 0; |
1456 | int inoutsize = 0; |
1457 | int ret; |
1458 | |
1459 | inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0 |
1460 | if (inoutsize <= 0) { |
1461 | av_log(s, AV_LOG_ERROR, "Unable to read handshake\n"); |
1462 | return AVERROR(EIO); |
1463 | } |
1464 | // Check Version |
1465 | if (buffer[0] != 3) { |
1466 | av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n"); |
1467 | return AVERROR(EIO); |
1468 | } |
1469 | if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0 |
1470 | av_log(s, AV_LOG_ERROR, |
1471 | "Unable to write answer - RTMP S0\n"); |
1472 | return AVERROR(EIO); |
1473 | } |
1474 | /* Receive C1 */ |
1475 | ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1, |
1476 | RTMP_HANDSHAKE_PACKET_SIZE); |
1477 | if (ret) { |
1478 | av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n"); |
1479 | return ret; |
1480 | } |
1481 | /* Send S1 */ |
1482 | /* By now same epoch will be sent */ |
1483 | hs_my_epoch = hs_epoch; |
1484 | /* Generate random */ |
1485 | for (randomidx = 8; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE); |
1486 | randomidx += 4) |
1487 | AV_WB32(hs_s1 + randomidx, av_get_random_seed()); |
1488 | |
1489 | ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1, |
1490 | RTMP_HANDSHAKE_PACKET_SIZE); |
1491 | if (ret) { |
1492 | av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n"); |
1493 | return ret; |
1494 | } |
1495 | /* Send S2 */ |
1496 | ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1, |
1497 | RTMP_HANDSHAKE_PACKET_SIZE); |
1498 | if (ret) { |
1499 | av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n"); |
1500 | return ret; |
1501 | } |
1502 | /* Receive C2 */ |
1503 | ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer, |
1504 | RTMP_HANDSHAKE_PACKET_SIZE); |
1505 | if (ret) { |
1506 | av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n"); |
1507 | return ret; |
1508 | } |
1509 | if (temp != hs_my_epoch) |
1510 | av_log(s, AV_LOG_WARNING, |
1511 | "Erroneous C2 Message epoch does not match up with C1 epoch\n"); |
1512 | if (memcmp(buffer + 8, hs_s1 + 8, |
1513 | RTMP_HANDSHAKE_PACKET_SIZE - 8)) |
1514 | av_log(s, AV_LOG_WARNING, |
1515 | "Erroneous C2 Message random does not match up\n"); |
1516 | |
1517 | return 0; |
1518 | } |
1519 | |
1520 | static int handle_chunk_size(URLContext *s, RTMPPacket *pkt) |
1521 | { |
1522 | RTMPContext *rt = s->priv_data; |
1523 | int ret; |
1524 | |
1525 | if (pkt->size < 4) { |
1526 | av_log(s, AV_LOG_ERROR, |
1527 | "Too short chunk size change packet (%d)\n", |
1528 | pkt->size); |
1529 | return AVERROR_INVALIDDATA; |
1530 | } |
1531 | |
1532 | if (!rt->is_input) { |
1533 | /* Send the same chunk size change packet back to the server, |
1534 | * setting the outgoing chunk size to the same as the incoming one. */ |
1535 | if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size, |
1536 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0) |
1537 | return ret; |
1538 | rt->out_chunk_size = AV_RB32(pkt->data); |
1539 | } |
1540 | |
1541 | rt->in_chunk_size = AV_RB32(pkt->data); |
1542 | if (rt->in_chunk_size <= 0) { |
1543 | av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n", |
1544 | rt->in_chunk_size); |
1545 | return AVERROR_INVALIDDATA; |
1546 | } |
1547 | av_log(s, AV_LOG_DEBUG, "New incoming chunk size = %d\n", |
1548 | rt->in_chunk_size); |
1549 | |
1550 | return 0; |
1551 | } |
1552 | |
1553 | static int handle_ping(URLContext *s, RTMPPacket *pkt) |
1554 | { |
1555 | RTMPContext *rt = s->priv_data; |
1556 | int t, ret; |
1557 | |
1558 | if (pkt->size < 2) { |
1559 | av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n", |
1560 | pkt->size); |
1561 | return AVERROR_INVALIDDATA; |
1562 | } |
1563 | |
1564 | t = AV_RB16(pkt->data); |
1565 | if (t == 6) { |
1566 | if ((ret = gen_pong(s, rt, pkt)) < 0) |
1567 | return ret; |
1568 | } else if (t == 26) { |
1569 | if (rt->swfsize) { |
1570 | if ((ret = gen_swf_verification(s, rt)) < 0) |
1571 | return ret; |
1572 | } else { |
1573 | av_log(s, AV_LOG_WARNING, "Ignoring SWFVerification request.\n"); |
1574 | } |
1575 | } |
1576 | |
1577 | return 0; |
1578 | } |
1579 | |
1580 | static int handle_client_bw(URLContext *s, RTMPPacket *pkt) |
1581 | { |
1582 | RTMPContext *rt = s->priv_data; |
1583 | |
1584 | if (pkt->size < 4) { |
1585 | av_log(s, AV_LOG_ERROR, |
1586 | "Client bandwidth report packet is less than 4 bytes long (%d)\n", |
1587 | pkt->size); |
1588 | return AVERROR_INVALIDDATA; |
1589 | } |
1590 | |
1591 | rt->client_report_size = AV_RB32(pkt->data); |
1592 | if (rt->client_report_size <= 0) { |
1593 | av_log(s, AV_LOG_ERROR, "Incorrect client bandwidth %d\n", |
1594 | rt->client_report_size); |
1595 | return AVERROR_INVALIDDATA; |
1596 | |
1597 | } |
1598 | av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", rt->client_report_size); |
1599 | rt->client_report_size >>= 1; |
1600 | |
1601 | return 0; |
1602 | } |
1603 | |
1604 | static int handle_server_bw(URLContext *s, RTMPPacket *pkt) |
1605 | { |
1606 | RTMPContext *rt = s->priv_data; |
1607 | |
1608 | if (pkt->size < 4) { |
1609 | av_log(s, AV_LOG_ERROR, |
1610 | "Too short server bandwidth report packet (%d)\n", |
1611 | pkt->size); |
1612 | return AVERROR_INVALIDDATA; |
1613 | } |
1614 | |
1615 | rt->server_bw = AV_RB32(pkt->data); |
1616 | if (rt->server_bw <= 0) { |
1617 | av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n", |
1618 | rt->server_bw); |
1619 | return AVERROR_INVALIDDATA; |
1620 | } |
1621 | av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw); |
1622 | |
1623 | return 0; |
1624 | } |
1625 | |
1626 | static int do_adobe_auth(RTMPContext *rt, const char *user, const char *salt, |
1627 | const char *opaque, const char *challenge) |
1628 | { |
1629 | uint8_t hash[16]; |
1630 | char hashstr[AV_BASE64_SIZE(sizeof(hash))], challenge2[10]; |
1631 | struct AVMD5 *md5 = av_md5_alloc(); |
1632 | if (!md5) |
1633 | return AVERROR(ENOMEM); |
1634 | |
1635 | snprintf(challenge2, sizeof(challenge2), "%08x", av_get_random_seed()); |
1636 | |
1637 | av_md5_init(md5); |
1638 | av_md5_update(md5, user, strlen(user)); |
1639 | av_md5_update(md5, salt, strlen(salt)); |
1640 | av_md5_update(md5, rt->password, strlen(rt->password)); |
1641 | av_md5_final(md5, hash); |
1642 | av_base64_encode(hashstr, sizeof(hashstr), hash, |
1643 | sizeof(hash)); |
1644 | av_md5_init(md5); |
1645 | av_md5_update(md5, hashstr, strlen(hashstr)); |
1646 | if (opaque) |
1647 | av_md5_update(md5, opaque, strlen(opaque)); |
1648 | else if (challenge) |
1649 | av_md5_update(md5, challenge, strlen(challenge)); |
1650 | av_md5_update(md5, challenge2, strlen(challenge2)); |
1651 | av_md5_final(md5, hash); |
1652 | av_base64_encode(hashstr, sizeof(hashstr), hash, |
1653 | sizeof(hash)); |
1654 | snprintf(rt->auth_params, sizeof(rt->auth_params), |
1655 | "?authmod=%s&user=%s&challenge=%s&response=%s", |
1656 | "adobe", user, challenge2, hashstr); |
1657 | if (opaque) |
1658 | av_strlcatf(rt->auth_params, sizeof(rt->auth_params), |
1659 | "&opaque=%s", opaque); |
1660 | |
1661 | av_free(md5); |
1662 | return 0; |
1663 | } |
1664 | |
1665 | static int do_llnw_auth(RTMPContext *rt, const char *user, const char *nonce) |
1666 | { |
1667 | uint8_t hash[16]; |
1668 | char hashstr1[33], hashstr2[33]; |
1669 | const char *realm = "live"; |
1670 | const char *method = "publish"; |
1671 | const char *qop = "auth"; |
1672 | const char *nc = "00000001"; |
1673 | char cnonce[10]; |
1674 | struct AVMD5 *md5 = av_md5_alloc(); |
1675 | if (!md5) |
1676 | return AVERROR(ENOMEM); |
1677 | |
1678 | snprintf(cnonce, sizeof(cnonce), "%08x", av_get_random_seed()); |
1679 | |
1680 | av_md5_init(md5); |
1681 | av_md5_update(md5, user, strlen(user)); |
1682 | av_md5_update(md5, ":", 1); |
1683 | av_md5_update(md5, realm, strlen(realm)); |
1684 | av_md5_update(md5, ":", 1); |
1685 | av_md5_update(md5, rt->password, strlen(rt->password)); |
1686 | av_md5_final(md5, hash); |
1687 | ff_data_to_hex(hashstr1, hash, 16, 1); |
1688 | hashstr1[32] = '\0'; |
1689 | |
1690 | av_md5_init(md5); |
1691 | av_md5_update(md5, method, strlen(method)); |
1692 | av_md5_update(md5, ":/", 2); |
1693 | av_md5_update(md5, rt->app, strlen(rt->app)); |
1694 | if (!strchr(rt->app, '/')) |
1695 | av_md5_update(md5, "/_definst_", strlen("/_definst_")); |
1696 | av_md5_final(md5, hash); |
1697 | ff_data_to_hex(hashstr2, hash, 16, 1); |
1698 | hashstr2[32] = '\0'; |
1699 | |
1700 | av_md5_init(md5); |
1701 | av_md5_update(md5, hashstr1, strlen(hashstr1)); |
1702 | av_md5_update(md5, ":", 1); |
1703 | if (nonce) |
1704 | av_md5_update(md5, nonce, strlen(nonce)); |
1705 | av_md5_update(md5, ":", 1); |
1706 | av_md5_update(md5, nc, strlen(nc)); |
1707 | av_md5_update(md5, ":", 1); |
1708 | av_md5_update(md5, cnonce, strlen(cnonce)); |
1709 | av_md5_update(md5, ":", 1); |
1710 | av_md5_update(md5, qop, strlen(qop)); |
1711 | av_md5_update(md5, ":", 1); |
1712 | av_md5_update(md5, hashstr2, strlen(hashstr2)); |
1713 | av_md5_final(md5, hash); |
1714 | ff_data_to_hex(hashstr1, hash, 16, 1); |
1715 | |
1716 | snprintf(rt->auth_params, sizeof(rt->auth_params), |
1717 | "?authmod=%s&user=%s&nonce=%s&cnonce=%s&nc=%s&response=%s", |
1718 | "llnw", user, nonce, cnonce, nc, hashstr1); |
1719 | |
1720 | av_free(md5); |
1721 | return 0; |
1722 | } |
1723 | |
1724 | static int handle_connect_error(URLContext *s, const char *desc) |
1725 | { |
1726 | RTMPContext *rt = s->priv_data; |
1727 | char buf[300], *ptr, authmod[15]; |
1728 | int i = 0, ret = 0; |
1729 | const char *user = "", *salt = "", *opaque = NULL, |
1730 | *challenge = NULL, *cptr = NULL, *nonce = NULL; |
1731 | |
1732 | if (!(cptr = strstr(desc, "authmod=adobe")) && |
1733 | !(cptr = strstr(desc, "authmod=llnw"))) { |
1734 | av_log(s, AV_LOG_ERROR, |
1735 | "Unknown connect error (unsupported authentication method?)\n"); |
1736 | return AVERROR_UNKNOWN; |
1737 | } |
1738 | cptr += strlen("authmod="); |
1739 | while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1) |
1740 | authmod[i++] = *cptr++; |
1741 | authmod[i] = '\0'; |
1742 | |
1743 | if (!rt->username[0] || !rt->password[0]) { |
1744 | av_log(s, AV_LOG_ERROR, "No credentials set\n"); |
1745 | return AVERROR_UNKNOWN; |
1746 | } |
1747 | |
1748 | if (strstr(desc, "?reason=authfailed")) { |
1749 | av_log(s, AV_LOG_ERROR, "Incorrect username/password\n"); |
1750 | return AVERROR_UNKNOWN; |
1751 | } else if (strstr(desc, "?reason=nosuchuser")) { |
1752 | av_log(s, AV_LOG_ERROR, "Incorrect username\n"); |
1753 | return AVERROR_UNKNOWN; |
1754 | } |
1755 | |
1756 | if (rt->auth_tried) { |
1757 | av_log(s, AV_LOG_ERROR, "Authentication failed\n"); |
1758 | return AVERROR_UNKNOWN; |
1759 | } |
1760 | |
1761 | rt->auth_params[0] = '\0'; |
1762 | |
1763 | if (strstr(desc, "code=403 need auth")) { |
1764 | snprintf(rt->auth_params, sizeof(rt->auth_params), |
1765 | "?authmod=%s&user=%s", authmod, rt->username); |
1766 | return 0; |
1767 | } |
1768 | |
1769 | if (!(cptr = strstr(desc, "?reason=needauth"))) { |
1770 | av_log(s, AV_LOG_ERROR, "No auth parameters found\n"); |
1771 | return AVERROR_UNKNOWN; |
1772 | } |
1773 | |
1774 | av_strlcpy(buf, cptr + 1, sizeof(buf)); |
1775 | ptr = buf; |
1776 | |
1777 | while (ptr) { |
1778 | char *next = strchr(ptr, '&'); |
1779 | char *value = strchr(ptr, '='); |
1780 | if (next) |
1781 | *next++ = '\0'; |
1782 | if (value) { |
1783 | *value++ = '\0'; |
1784 | if (!strcmp(ptr, "user")) { |
1785 | user = value; |
1786 | } else if (!strcmp(ptr, "salt")) { |
1787 | salt = value; |
1788 | } else if (!strcmp(ptr, "opaque")) { |
1789 | opaque = value; |
1790 | } else if (!strcmp(ptr, "challenge")) { |
1791 | challenge = value; |
1792 | } else if (!strcmp(ptr, "nonce")) { |
1793 | nonce = value; |
1794 | } else { |
1795 | av_log(s, AV_LOG_INFO, "Ignoring unsupported var %s\n", ptr); |
1796 | } |
1797 | } else { |
1798 | av_log(s, AV_LOG_WARNING, "Variable %s has NULL value\n", ptr); |
1799 | } |
1800 | ptr = next; |
1801 | } |
1802 | |
1803 | if (!strcmp(authmod, "adobe")) { |
1804 | if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0) |
1805 | return ret; |
1806 | } else { |
1807 | if ((ret = do_llnw_auth(rt, user, nonce)) < 0) |
1808 | return ret; |
1809 | } |
1810 | |
1811 | rt->auth_tried = 1; |
1812 | return 0; |
1813 | } |
1814 | |
1815 | static int handle_invoke_error(URLContext *s, RTMPPacket *pkt) |
1816 | { |
1817 | RTMPContext *rt = s->priv_data; |
1818 | const uint8_t *data_end = pkt->data + pkt->size; |
1819 | char *tracked_method = NULL; |
1820 | int level = AV_LOG_ERROR; |
1821 | uint8_t tmpstr[256]; |
1822 | int ret; |
1823 | |
1824 | if ((ret = find_tracked_method(s, pkt, 9, &tracked_method)) < 0) |
1825 | return ret; |
1826 | |
1827 | if (!ff_amf_get_field_value(pkt->data + 9, data_end, |
1828 | "description", tmpstr, sizeof(tmpstr))) { |
1829 | if (tracked_method && (!strcmp(tracked_method, "_checkbw") || |
1830 | !strcmp(tracked_method, "releaseStream") || |
1831 | !strcmp(tracked_method, "FCSubscribe") || |
1832 | !strcmp(tracked_method, "FCPublish"))) { |
1833 | /* Gracefully ignore Adobe-specific historical artifact errors. */ |
1834 | level = AV_LOG_WARNING; |
1835 | ret = 0; |
1836 | } else if (tracked_method && !strcmp(tracked_method, "getStreamLength")) { |
1837 | level = rt->live ? AV_LOG_DEBUG : AV_LOG_WARNING; |
1838 | ret = 0; |
1839 | } else if (tracked_method && !strcmp(tracked_method, "connect")) { |
1840 | ret = handle_connect_error(s, tmpstr); |
1841 | if (!ret) { |
1842 | rt->do_reconnect = 1; |
1843 | level = AV_LOG_VERBOSE; |
1844 | } |
1845 | } else |
1846 | ret = AVERROR_UNKNOWN; |
1847 | av_log(s, level, "Server error: %s\n", tmpstr); |
1848 | } |
1849 | |
1850 | av_free(tracked_method); |
1851 | return ret; |
1852 | } |
1853 | |
1854 | static int write_begin(URLContext *s) |
1855 | { |
1856 | RTMPContext *rt = s->priv_data; |
1857 | PutByteContext pbc; |
1858 | RTMPPacket spkt = { 0 }; |
1859 | int ret; |
1860 | |
1861 | // Send Stream Begin 1 |
1862 | if ((ret = ff_rtmp_packet_create(&spkt, RTMP_NETWORK_CHANNEL, |
1863 | RTMP_PT_PING, 0, 6)) < 0) { |
1864 | av_log(s, AV_LOG_ERROR, "Unable to create response packet\n"); |
1865 | return ret; |
1866 | } |
1867 | |
1868 | bytestream2_init_writer(&pbc, spkt.data, spkt.size); |
1869 | bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin |
1870 | bytestream2_put_be32(&pbc, rt->nb_streamid); |
1871 | |
1872 | ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size, |
1873 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
1874 | |
1875 | ff_rtmp_packet_destroy(&spkt); |
1876 | |
1877 | return ret; |
1878 | } |
1879 | |
1880 | static int write_status(URLContext *s, RTMPPacket *pkt, |
1881 | const char *status, const char *filename) |
1882 | { |
1883 | RTMPContext *rt = s->priv_data; |
1884 | RTMPPacket spkt = { 0 }; |
1885 | char statusmsg[128]; |
1886 | uint8_t *pp; |
1887 | int ret; |
1888 | |
1889 | if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL, |
1890 | RTMP_PT_INVOKE, 0, |
1891 | RTMP_PKTDATA_DEFAULT_SIZE)) < 0) { |
1892 | av_log(s, AV_LOG_ERROR, "Unable to create response packet\n"); |
1893 | return ret; |
1894 | } |
1895 | |
1896 | pp = spkt.data; |
1897 | spkt.extra = pkt->extra; |
1898 | ff_amf_write_string(&pp, "onStatus"); |
1899 | ff_amf_write_number(&pp, 0); |
1900 | ff_amf_write_null(&pp); |
1901 | |
1902 | ff_amf_write_object_start(&pp); |
1903 | ff_amf_write_field_name(&pp, "level"); |
1904 | ff_amf_write_string(&pp, "status"); |
1905 | ff_amf_write_field_name(&pp, "code"); |
1906 | ff_amf_write_string(&pp, status); |
1907 | ff_amf_write_field_name(&pp, "description"); |
1908 | snprintf(statusmsg, sizeof(statusmsg), |
1909 | "%s is now published", filename); |
1910 | ff_amf_write_string(&pp, statusmsg); |
1911 | ff_amf_write_field_name(&pp, "details"); |
1912 | ff_amf_write_string(&pp, filename); |
1913 | ff_amf_write_object_end(&pp); |
1914 | |
1915 | spkt.size = pp - spkt.data; |
1916 | ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size, |
1917 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
1918 | ff_rtmp_packet_destroy(&spkt); |
1919 | |
1920 | return ret; |
1921 | } |
1922 | |
1923 | static int send_invoke_response(URLContext *s, RTMPPacket *pkt) |
1924 | { |
1925 | RTMPContext *rt = s->priv_data; |
1926 | double seqnum; |
1927 | char filename[128]; |
1928 | char command[64]; |
1929 | int stringlen; |
1930 | char *pchar; |
1931 | const uint8_t *p = pkt->data; |
1932 | uint8_t *pp = NULL; |
1933 | RTMPPacket spkt = { 0 }; |
1934 | GetByteContext gbc; |
1935 | int ret; |
1936 | |
1937 | bytestream2_init(&gbc, p, pkt->size); |
1938 | if (ff_amf_read_string(&gbc, command, sizeof(command), |
1939 | &stringlen)) { |
1940 | av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n"); |
1941 | return AVERROR_INVALIDDATA; |
1942 | } |
1943 | |
1944 | ret = ff_amf_read_number(&gbc, &seqnum); |
1945 | if (ret) |
1946 | return ret; |
1947 | ret = ff_amf_read_null(&gbc); |
1948 | if (ret) |
1949 | return ret; |
1950 | if (!strcmp(command, "FCPublish") || |
1951 | !strcmp(command, "publish")) { |
1952 | ret = ff_amf_read_string(&gbc, filename, |
1953 | sizeof(filename), &stringlen); |
1954 | if (ret) { |
1955 | if (ret == AVERROR(EINVAL)) |
1956 | av_log(s, AV_LOG_ERROR, "Unable to parse stream name - name too long?\n"); |
1957 | else |
1958 | av_log(s, AV_LOG_ERROR, "Unable to parse stream name\n"); |
1959 | return ret; |
1960 | } |
1961 | // check with url |
1962 | if (s->filename) { |
1963 | pchar = strrchr(s->filename, '/'); |
1964 | if (!pchar) { |
1965 | av_log(s, AV_LOG_WARNING, |
1966 | "Unable to find / in url %s, bad format\n", |
1967 | s->filename); |
1968 | pchar = s->filename; |
1969 | } |
1970 | pchar++; |
1971 | if (strcmp(pchar, filename)) |
1972 | av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting" |
1973 | " %s\n", filename, pchar); |
1974 | } |
1975 | rt->state = STATE_RECEIVING; |
1976 | } |
1977 | |
1978 | if (!strcmp(command, "FCPublish")) { |
1979 | if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL, |
1980 | RTMP_PT_INVOKE, 0, |
1981 | RTMP_PKTDATA_DEFAULT_SIZE)) < 0) { |
1982 | av_log(s, AV_LOG_ERROR, "Unable to create response packet\n"); |
1983 | return ret; |
1984 | } |
1985 | pp = spkt.data; |
1986 | ff_amf_write_string(&pp, "onFCPublish"); |
1987 | } else if (!strcmp(command, "publish")) { |
1988 | ret = write_begin(s); |
1989 | if (ret < 0) |
1990 | return ret; |
1991 | |
1992 | // Send onStatus(NetStream.Publish.Start) |
1993 | return write_status(s, pkt, "NetStream.Publish.Start", |
1994 | filename); |
1995 | } else if (!strcmp(command, "play")) { |
1996 | ret = write_begin(s); |
1997 | if (ret < 0) |
1998 | return ret; |
1999 | rt->state = STATE_SENDING; |
2000 | return write_status(s, pkt, "NetStream.Play.Start", |
2001 | filename); |
2002 | } else { |
2003 | if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL, |
2004 | RTMP_PT_INVOKE, 0, |
2005 | RTMP_PKTDATA_DEFAULT_SIZE)) < 0) { |
2006 | av_log(s, AV_LOG_ERROR, "Unable to create response packet\n"); |
2007 | return ret; |
2008 | } |
2009 | pp = spkt.data; |
2010 | ff_amf_write_string(&pp, "_result"); |
2011 | ff_amf_write_number(&pp, seqnum); |
2012 | ff_amf_write_null(&pp); |
2013 | if (!strcmp(command, "createStream")) { |
2014 | rt->nb_streamid++; |
2015 | if (rt->nb_streamid == 0 || rt->nb_streamid == 2) |
2016 | rt->nb_streamid++; /* Values 0 and 2 are reserved */ |
2017 | ff_amf_write_number(&pp, rt->nb_streamid); |
2018 | /* By now we don't control which streams are removed in |
2019 | * deleteStream. There is no stream creation control |
2020 | * if a client creates more than 2^32 - 2 streams. */ |
2021 | } |
2022 | } |
2023 | spkt.size = pp - spkt.data; |
2024 | ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size, |
2025 | &rt->prev_pkt[1], &rt->nb_prev_pkt[1]); |
2026 | ff_rtmp_packet_destroy(&spkt); |
2027 | return ret; |
2028 | } |
2029 | |
2030 | /** |
2031 | * Read the AMF_NUMBER response ("_result") to a function call |
2032 | * (e.g. createStream()). This response should be made up of the AMF_STRING |
2033 | * "result", a NULL object and then the response encoded as AMF_NUMBER. On a |
2034 | * successful response, we will return set the value to number (otherwise number |
2035 | * will not be changed). |
2036 | * |
2037 | * @return 0 if reading the value succeeds, negative value otherwise |
2038 | */ |
2039 | static int read_number_result(RTMPPacket *pkt, double *number) |
2040 | { |
2041 | // We only need to fit "_result" in this. |
2042 | uint8_t strbuffer[8]; |
2043 | int stringlen; |
2044 | double numbuffer; |
2045 | GetByteContext gbc; |
2046 | |
2047 | bytestream2_init(&gbc, pkt->data, pkt->size); |
2048 | |
2049 | // Value 1/4: "_result" as AMF_STRING |
2050 | if (ff_amf_read_string(&gbc, strbuffer, sizeof(strbuffer), &stringlen)) |
2051 | return AVERROR_INVALIDDATA; |
2052 | if (strcmp(strbuffer, "_result")) |
2053 | return AVERROR_INVALIDDATA; |
2054 | // Value 2/4: The callee reference number |
2055 | if (ff_amf_read_number(&gbc, &numbuffer)) |
2056 | return AVERROR_INVALIDDATA; |
2057 | // Value 3/4: Null |
2058 | if (ff_amf_read_null(&gbc)) |
2059 | return AVERROR_INVALIDDATA; |
2060 | // Value 4/4: The response as AMF_NUMBER |
2061 | if (ff_amf_read_number(&gbc, &numbuffer)) |
2062 | return AVERROR_INVALIDDATA; |
2063 | else |
2064 | *number = numbuffer; |
2065 | |
2066 | return 0; |
2067 | } |
2068 | |
2069 | static int handle_invoke_result(URLContext *s, RTMPPacket *pkt) |
2070 | { |
2071 | RTMPContext *rt = s->priv_data; |
2072 | char *tracked_method = NULL; |
2073 | int ret = 0; |
2074 | |
2075 | if ((ret = find_tracked_method(s, pkt, 10, &tracked_method)) < 0) |
2076 | return ret; |
2077 | |
2078 | if (!tracked_method) { |
2079 | /* Ignore this reply when the current method is not tracked. */ |
2080 | return ret; |
2081 | } |
2082 | |
2083 | if (!strcmp(tracked_method, "connect")) { |
2084 | if (!rt->is_input) { |
2085 | if ((ret = gen_release_stream(s, rt)) < 0) |
2086 | goto fail; |
2087 | |
2088 | if ((ret = gen_fcpublish_stream(s, rt)) < 0) |
2089 | goto fail; |
2090 | } else { |
2091 | if ((ret = gen_server_bw(s, rt)) < 0) |
2092 | goto fail; |
2093 | } |
2094 | |
2095 | if ((ret = gen_create_stream(s, rt)) < 0) |
2096 | goto fail; |
2097 | |
2098 | if (rt->is_input) { |
2099 | /* Send the FCSubscribe command when the name of live |
2100 | * stream is defined by the user or if it's a live stream. */ |
2101 | if (rt->subscribe) { |
2102 | if ((ret = gen_fcsubscribe_stream(s, rt, rt->subscribe)) < 0) |
2103 | goto fail; |
2104 | } else if (rt->live == -1) { |
2105 | if ((ret = gen_fcsubscribe_stream(s, rt, rt->playpath)) < 0) |
2106 | goto fail; |
2107 | } |
2108 | } |
2109 | } else if (!strcmp(tracked_method, "createStream")) { |
2110 | double stream_id; |
2111 | if (read_number_result(pkt, &stream_id)) { |
2112 | av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n"); |
2113 | } else { |
2114 | rt->stream_id = stream_id; |
2115 | } |
2116 | |
2117 | if (!rt->is_input) { |
2118 | if ((ret = gen_publish(s, rt)) < 0) |
2119 | goto fail; |
2120 | } else { |
2121 | if (rt->live != -1) { |
2122 | if ((ret = gen_get_stream_length(s, rt)) < 0) |
2123 | goto fail; |
2124 | } |
2125 | if ((ret = gen_play(s, rt)) < 0) |
2126 | goto fail; |
2127 | if ((ret = gen_buffer_time(s, rt)) < 0) |
2128 | goto fail; |
2129 | } |
2130 | } else if (!strcmp(tracked_method, "getStreamLength")) { |
2131 | if (read_number_result(pkt, &rt->duration)) { |
2132 | av_log(s, AV_LOG_WARNING, "Unexpected reply on getStreamLength()\n"); |
2133 | } |
2134 | } |
2135 | |
2136 | fail: |
2137 | av_free(tracked_method); |
2138 | return ret; |
2139 | } |
2140 | |
2141 | static int handle_invoke_status(URLContext *s, RTMPPacket *pkt) |
2142 | { |
2143 | RTMPContext *rt = s->priv_data; |
2144 | const uint8_t *data_end = pkt->data + pkt->size; |
2145 | const uint8_t *ptr = pkt->data + RTMP_HEADER; |
2146 | uint8_t tmpstr[256]; |
2147 | int i, t; |
2148 | |
2149 | for (i = 0; i < 2; i++) { |
2150 | t = ff_amf_tag_size(ptr, data_end); |
2151 | if (t < 0) |
2152 | return 1; |
2153 | ptr += t; |
2154 | } |
2155 | |
2156 | t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr)); |
2157 | if (!t && !strcmp(tmpstr, "error")) { |
2158 | t = ff_amf_get_field_value(ptr, data_end, |
2159 | "description", tmpstr, sizeof(tmpstr)); |
2160 | if (t || !tmpstr[0]) |
2161 | t = ff_amf_get_field_value(ptr, data_end, "code", |
2162 | tmpstr, sizeof(tmpstr)); |
2163 | if (!t) |
2164 | av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr); |
2165 | return -1; |
2166 | } |
2167 | |
2168 | t = ff_amf_get_field_value(ptr, data_end, "code", tmpstr, sizeof(tmpstr)); |
2169 | if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING; |
2170 | if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED; |
2171 | if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED; |
2172 | if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING; |
2173 | if (!t && !strcmp(tmpstr, "NetStream.Seek.Notify")) rt->state = STATE_PLAYING; |
2174 | |
2175 | return 0; |
2176 | } |
2177 | |
2178 | static int handle_invoke(URLContext *s, RTMPPacket *pkt) |
2179 | { |
2180 | RTMPContext *rt = s->priv_data; |
2181 | int ret = 0; |
2182 | |
2183 | //TODO: check for the messages sent for wrong state? |
2184 | if (ff_amf_match_string(pkt->data, pkt->size, "_error")) { |
2185 | if ((ret = handle_invoke_error(s, pkt)) < 0) |
2186 | return ret; |
2187 | } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) { |
2188 | if ((ret = handle_invoke_result(s, pkt)) < 0) |
2189 | return ret; |
2190 | } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) { |
2191 | if ((ret = handle_invoke_status(s, pkt)) < 0) |
2192 | return ret; |
2193 | } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) { |
2194 | if ((ret = gen_check_bw(s, rt)) < 0) |
2195 | return ret; |
2196 | } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") || |
2197 | ff_amf_match_string(pkt->data, pkt->size, "FCPublish") || |
2198 | ff_amf_match_string(pkt->data, pkt->size, "publish") || |
2199 | ff_amf_match_string(pkt->data, pkt->size, "play") || |
2200 | ff_amf_match_string(pkt->data, pkt->size, "_checkbw") || |
2201 | ff_amf_match_string(pkt->data, pkt->size, "createStream")) { |
2202 | if ((ret = send_invoke_response(s, pkt)) < 0) |
2203 | return ret; |
2204 | } |
2205 | |
2206 | return ret; |
2207 | } |
2208 | |
2209 | static int update_offset(RTMPContext *rt, int size) |
2210 | { |
2211 | int old_flv_size; |
2212 | |
2213 | // generate packet header and put data into buffer for FLV demuxer |
2214 | if (rt->flv_off < rt->flv_size) { |
2215 | // There is old unread data in the buffer, thus append at the end |
2216 | old_flv_size = rt->flv_size; |
2217 | rt->flv_size += size; |
2218 | } else { |
2219 | // All data has been read, write the new data at the start of the buffer |
2220 | old_flv_size = 0; |
2221 | rt->flv_size = size; |
2222 | rt->flv_off = 0; |
2223 | } |
2224 | |
2225 | return old_flv_size; |
2226 | } |
2227 | |
2228 | static int append_flv_data(RTMPContext *rt, RTMPPacket *pkt, int skip) |
2229 | { |
2230 | int old_flv_size, ret; |
2231 | PutByteContext pbc; |
2232 | const uint8_t *data = pkt->data + skip; |
2233 | const int size = pkt->size - skip; |
2234 | uint32_t ts = pkt->timestamp; |
2235 | |
2236 | if (pkt->type == RTMP_PT_AUDIO) { |
2237 | rt->has_audio = 1; |
2238 | } else if (pkt->type == RTMP_PT_VIDEO) { |
2239 | rt->has_video = 1; |
2240 | } |
2241 | |
2242 | old_flv_size = update_offset(rt, size + 15); |
2243 | |
2244 | if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) { |
2245 | rt->flv_size = rt->flv_off = 0; |
2246 | return ret; |
2247 | } |
2248 | bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size); |
2249 | bytestream2_skip_p(&pbc, old_flv_size); |
2250 | bytestream2_put_byte(&pbc, pkt->type); |
2251 | bytestream2_put_be24(&pbc, size); |
2252 | bytestream2_put_be24(&pbc, ts); |
2253 | bytestream2_put_byte(&pbc, ts >> 24); |
2254 | bytestream2_put_be24(&pbc, 0); |
2255 | bytestream2_put_buffer(&pbc, data, size); |
2256 | bytestream2_put_be32(&pbc, size + RTMP_HEADER); |
2257 | |
2258 | return 0; |
2259 | } |
2260 | |
2261 | static int handle_notify(URLContext *s, RTMPPacket *pkt) |
2262 | { |
2263 | RTMPContext *rt = s->priv_data; |
2264 | uint8_t commandbuffer[64]; |
2265 | char statusmsg[128]; |
2266 | int stringlen, ret, skip = 0; |
2267 | GetByteContext gbc; |
2268 | |
2269 | bytestream2_init(&gbc, pkt->data, pkt->size); |
2270 | if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer), |
2271 | &stringlen)) |
2272 | return AVERROR_INVALIDDATA; |
2273 | |
2274 | if (!strcmp(commandbuffer, "onMetaData")) { |
2275 | // metadata properties should be stored in a mixed array |
2276 | if (bytestream2_get_byte(&gbc) == AMF_DATA_TYPE_MIXEDARRAY) { |
2277 | // We have found a metaData Array so flv can determine the streams |
2278 | // from this. |
2279 | rt->received_metadata = 1; |
2280 | // skip 32-bit max array index |
2281 | bytestream2_skip(&gbc, 4); |
2282 | while (bytestream2_get_bytes_left(&gbc) > 3) { |
2283 | if (ff_amf_get_string(&gbc, statusmsg, sizeof(statusmsg), |
2284 | &stringlen)) |
2285 | return AVERROR_INVALIDDATA; |
2286 | // We do not care about the content of the property (yet). |
2287 | stringlen = ff_amf_tag_size(gbc.buffer, gbc.buffer_end); |
2288 | if (stringlen < 0) |
2289 | return AVERROR_INVALIDDATA; |
2290 | bytestream2_skip(&gbc, stringlen); |
2291 | |
2292 | // The presence of the following properties indicates that the |
2293 | // respective streams are present. |
2294 | if (!strcmp(statusmsg, "videocodecid")) { |
2295 | rt->has_video = 1; |
2296 | } |
2297 | if (!strcmp(statusmsg, "audiocodecid")) { |
2298 | rt->has_audio = 1; |
2299 | } |
2300 | } |
2301 | if (bytestream2_get_be24(&gbc) != AMF_END_OF_OBJECT) |
2302 | return AVERROR_INVALIDDATA; |
2303 | } |
2304 | } |
2305 | |
2306 | // Skip the @setDataFrame string and validate it is a notification |
2307 | if (!strcmp(commandbuffer, "@setDataFrame")) { |
2308 | skip = gbc.buffer - pkt->data; |
2309 | ret = ff_amf_read_string(&gbc, statusmsg, |
2310 | sizeof(statusmsg), &stringlen); |
2311 | if (ret < 0) |
2312 | return AVERROR_INVALIDDATA; |
2313 | } |
2314 | |
2315 | return append_flv_data(rt, pkt, skip); |
2316 | } |
2317 | |
2318 | /** |
2319 | * Parse received packet and possibly perform some action depending on |
2320 | * the packet contents. |
2321 | * @return 0 for no errors, negative values for serious errors which prevent |
2322 | * further communications, positive values for uncritical errors |
2323 | */ |
2324 | static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt) |
2325 | { |
2326 | int ret; |
2327 | |
2328 | #ifdef DEBUG |
2329 | ff_rtmp_packet_dump(s, pkt); |
2330 | #endif |
2331 | |
2332 | switch (pkt->type) { |
2333 | case RTMP_PT_BYTES_READ: |
2334 | av_log(s, AV_LOG_TRACE, "received bytes read report\n"); |
2335 | break; |
2336 | case RTMP_PT_CHUNK_SIZE: |
2337 | if ((ret = handle_chunk_size(s, pkt)) < 0) |
2338 | return ret; |
2339 | break; |
2340 | case RTMP_PT_PING: |
2341 | if ((ret = handle_ping(s, pkt)) < 0) |
2342 | return ret; |
2343 | break; |
2344 | case RTMP_PT_CLIENT_BW: |
2345 | if ((ret = handle_client_bw(s, pkt)) < 0) |
2346 | return ret; |
2347 | break; |
2348 | case RTMP_PT_SERVER_BW: |
2349 | if ((ret = handle_server_bw(s, pkt)) < 0) |
2350 | return ret; |
2351 | break; |
2352 | case RTMP_PT_INVOKE: |
2353 | if ((ret = handle_invoke(s, pkt)) < 0) |
2354 | return ret; |
2355 | break; |
2356 | case RTMP_PT_VIDEO: |
2357 | case RTMP_PT_AUDIO: |
2358 | case RTMP_PT_METADATA: |
2359 | case RTMP_PT_NOTIFY: |
2360 | /* Audio, Video and Metadata packets are parsed in get_packet() */ |
2361 | break; |
2362 | default: |
2363 | av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type); |
2364 | break; |
2365 | } |
2366 | return 0; |
2367 | } |
2368 | |
2369 | static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt) |
2370 | { |
2371 | int ret, old_flv_size, type; |
2372 | const uint8_t *next; |
2373 | uint8_t *p; |
2374 | uint32_t size; |
2375 | uint32_t ts, cts, pts = 0; |
2376 | |
2377 | old_flv_size = update_offset(rt, pkt->size); |
2378 | |
2379 | if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) { |
2380 | rt->flv_size = rt->flv_off = 0; |
2381 | return ret; |
2382 | } |
2383 | |
2384 | next = pkt->data; |
2385 | p = rt->flv_data + old_flv_size; |
2386 | |
2387 | /* copy data while rewriting timestamps */ |
2388 | ts = pkt->timestamp; |
2389 | |
2390 | while (next - pkt->data < pkt->size - RTMP_HEADER) { |
2391 | type = bytestream_get_byte(&next); |
2392 | size = bytestream_get_be24(&next); |
2393 | cts = bytestream_get_be24(&next); |
2394 | cts |= bytestream_get_byte(&next) << 24; |
2395 | if (!pts) |
2396 | pts = cts; |
2397 | ts += cts - pts; |
2398 | pts = cts; |
2399 | if (size + 3 + 4 > pkt->data + pkt->size - next) |
2400 | break; |
2401 | bytestream_put_byte(&p, type); |
2402 | bytestream_put_be24(&p, size); |
2403 | bytestream_put_be24(&p, ts); |
2404 | bytestream_put_byte(&p, ts >> 24); |
2405 | memcpy(p, next, size + 3 + 4); |
2406 | p += size + 3; |
2407 | bytestream_put_be32(&p, size + RTMP_HEADER); |
2408 | next += size + 3 + 4; |
2409 | } |
2410 | if (p != rt->flv_data + rt->flv_size) { |
2411 | av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in " |
2412 | "RTMP_PT_METADATA packet\n"); |
2413 | rt->flv_size = p - rt->flv_data; |
2414 | } |
2415 | |
2416 | return 0; |
2417 | } |
2418 | |
2419 | /** |
2420 | * Interact with the server by receiving and sending RTMP packets until |
2421 | * there is some significant data (media data or expected status notification). |
2422 | * |
2423 | * @param s reading context |
2424 | * @param for_header non-zero value tells function to work until it |
2425 | * gets notification from the server that playing has been started, |
2426 | * otherwise function will work until some media data is received (or |
2427 | * an error happens) |
2428 | * @return 0 for successful operation, negative value in case of error |
2429 | */ |
2430 | static int get_packet(URLContext *s, int for_header) |
2431 | { |
2432 | RTMPContext *rt = s->priv_data; |
2433 | int ret; |
2434 | |
2435 | if (rt->state == STATE_STOPPED) |
2436 | return AVERROR_EOF; |
2437 | |
2438 | for (;;) { |
2439 | RTMPPacket rpkt = { 0 }; |
2440 | if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt, |
2441 | rt->in_chunk_size, &rt->prev_pkt[0], |
2442 | &rt->nb_prev_pkt[0])) <= 0) { |
2443 | if (ret == 0) { |
2444 | return AVERROR(EAGAIN); |
2445 | } else { |
2446 | return AVERROR(EIO); |
2447 | } |
2448 | } |
2449 | |
2450 | // Track timestamp for later use |
2451 | rt->last_timestamp = rpkt.timestamp; |
2452 | |
2453 | rt->bytes_read += ret; |
2454 | if (rt->bytes_read - rt->last_bytes_read > rt->client_report_size) { |
2455 | av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n"); |
2456 | if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0) |
2457 | return ret; |
2458 | rt->last_bytes_read = rt->bytes_read; |
2459 | } |
2460 | |
2461 | ret = rtmp_parse_result(s, rt, &rpkt); |
2462 | |
2463 | // At this point we must check if we are in the seek state and continue |
2464 | // with the next packet. handle_invoke will get us out of this state |
2465 | // when the right message is encountered |
2466 | if (rt->state == STATE_SEEKING) { |
2467 | ff_rtmp_packet_destroy(&rpkt); |
2468 | // We continue, let the natural flow of things happen: |
2469 | // AVERROR(EAGAIN) or handle_invoke gets us out of here |
2470 | continue; |
2471 | } |
2472 | |
2473 | if (ret < 0) {//serious error in current packet |
2474 | ff_rtmp_packet_destroy(&rpkt); |
2475 | return ret; |
2476 | } |
2477 | if (rt->do_reconnect && for_header) { |
2478 | ff_rtmp_packet_destroy(&rpkt); |
2479 | return 0; |
2480 | } |
2481 | if (rt->state == STATE_STOPPED) { |
2482 | ff_rtmp_packet_destroy(&rpkt); |
2483 | return AVERROR_EOF; |
2484 | } |
2485 | if (for_header && (rt->state == STATE_PLAYING || |
2486 | rt->state == STATE_PUBLISHING || |
2487 | rt->state == STATE_SENDING || |
2488 | rt->state == STATE_RECEIVING)) { |
2489 | ff_rtmp_packet_destroy(&rpkt); |
2490 | return 0; |
2491 | } |
2492 | if (!rpkt.size || !rt->is_input) { |
2493 | ff_rtmp_packet_destroy(&rpkt); |
2494 | continue; |
2495 | } |
2496 | if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO) { |
2497 | ret = append_flv_data(rt, &rpkt, 0); |
2498 | ff_rtmp_packet_destroy(&rpkt); |
2499 | return ret; |
2500 | } else if (rpkt.type == RTMP_PT_NOTIFY) { |
2501 | ret = handle_notify(s, &rpkt); |
2502 | ff_rtmp_packet_destroy(&rpkt); |
2503 | return ret; |
2504 | } else if (rpkt.type == RTMP_PT_METADATA) { |
2505 | ret = handle_metadata(rt, &rpkt); |
2506 | ff_rtmp_packet_destroy(&rpkt); |
2507 | return 0; |
2508 | } |
2509 | ff_rtmp_packet_destroy(&rpkt); |
2510 | } |
2511 | } |
2512 | |
2513 | static int rtmp_close(URLContext *h) |
2514 | { |
2515 | RTMPContext *rt = h->priv_data; |
2516 | int ret = 0, i, j; |
2517 | |
2518 | if (!rt->is_input) { |
2519 | rt->flv_data = NULL; |
2520 | if (rt->out_pkt.size) |
2521 | ff_rtmp_packet_destroy(&rt->out_pkt); |
2522 | if (rt->state > STATE_FCPUBLISH) |
2523 | ret = gen_fcunpublish_stream(h, rt); |
2524 | } |
2525 | if (rt->state > STATE_HANDSHAKED) |
2526 | ret = gen_delete_stream(h, rt); |
2527 | for (i = 0; i < 2; i++) { |
2528 | for (j = 0; j < rt->nb_prev_pkt[i]; j++) |
2529 | ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]); |
2530 | av_freep(&rt->prev_pkt[i]); |
2531 | } |
2532 | |
2533 | free_tracked_methods(rt); |
2534 | av_freep(&rt->flv_data); |
2535 | ffurl_close(rt->stream); |
2536 | return ret; |
2537 | } |
2538 | |
2539 | /** |
2540 | * Insert a fake onMetadata packet into the FLV stream to notify the FLV |
2541 | * demuxer about the duration of the stream. |
2542 | * |
2543 | * This should only be done if there was no real onMetadata packet sent by the |
2544 | * server at the start of the stream and if we were able to retrieve a valid |
2545 | * duration via a getStreamLength call. |
2546 | * |
2547 | * @return 0 for successful operation, negative value in case of error |
2548 | */ |
2549 | static int inject_fake_duration_metadata(RTMPContext *rt) |
2550 | { |
2551 | // We need to insert the metadata packet directly after the FLV |
2552 | // header, i.e. we need to move all other already read data by the |
2553 | // size of our fake metadata packet. |
2554 | |
2555 | uint8_t* p; |
2556 | // Keep old flv_data pointer |
2557 | uint8_t* old_flv_data = rt->flv_data; |
2558 | // Allocate a new flv_data pointer with enough space for the additional package |
2559 | if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) { |
2560 | rt->flv_data = old_flv_data; |
2561 | return AVERROR(ENOMEM); |
2562 | } |
2563 | |
2564 | // Copy FLV header |
2565 | memcpy(rt->flv_data, old_flv_data, 13); |
2566 | // Copy remaining packets |
2567 | memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13); |
2568 | // Increase the size by the injected packet |
2569 | rt->flv_size += 55; |
2570 | // Delete the old FLV data |
2571 | av_freep(&old_flv_data); |
2572 | |
2573 | p = rt->flv_data + 13; |
2574 | bytestream_put_byte(&p, FLV_TAG_TYPE_META); |
2575 | bytestream_put_be24(&p, 40); // size of data part (sum of all parts below) |
2576 | bytestream_put_be24(&p, 0); // timestamp |
2577 | bytestream_put_be32(&p, 0); // reserved |
2578 | |
2579 | // first event name as a string |
2580 | bytestream_put_byte(&p, AMF_DATA_TYPE_STRING); |
2581 | // "onMetaData" as AMF string |
2582 | bytestream_put_be16(&p, 10); |
2583 | bytestream_put_buffer(&p, "onMetaData", 10); |
2584 | |
2585 | // mixed array (hash) with size and string/type/data tuples |
2586 | bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY); |
2587 | bytestream_put_be32(&p, 1); // metadata_count |
2588 | |
2589 | // "duration" as AMF string |
2590 | bytestream_put_be16(&p, 8); |
2591 | bytestream_put_buffer(&p, "duration", 8); |
2592 | bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER); |
2593 | bytestream_put_be64(&p, av_double2int(rt->duration)); |
2594 | |
2595 | // Finalise object |
2596 | bytestream_put_be16(&p, 0); // Empty string |
2597 | bytestream_put_byte(&p, AMF_END_OF_OBJECT); |
2598 | bytestream_put_be32(&p, 40 + RTMP_HEADER); // size of data part (sum of all parts above) |
2599 | |
2600 | return 0; |
2601 | } |
2602 | |
2603 | /** |
2604 | * Open RTMP connection and verify that the stream can be played. |
2605 | * |
2606 | * URL syntax: rtmp://server[:port][/app][/playpath] |
2607 | * where 'app' is first one or two directories in the path |
2608 | * (e.g. /ondemand/, /flash/live/, etc.) |
2609 | * and 'playpath' is a file name (the rest of the path, |
2610 | * may be prefixed with "mp4:") |
2611 | */ |
2612 | static int rtmp_open(URLContext *s, const char *uri, int flags, AVDictionary **opts) |
2613 | { |
2614 | RTMPContext *rt = s->priv_data; |
2615 | char proto[8], hostname[256], path[1024], auth[100], *fname; |
2616 | char *old_app, *qmark, *n, fname_buffer[1024]; |
2617 | uint8_t buf[2048]; |
2618 | int port; |
2619 | int ret; |
2620 | |
2621 | if (rt->listen_timeout > 0) |
2622 | rt->listen = 1; |
2623 | |
2624 | rt->is_input = !(flags & AVIO_FLAG_WRITE); |
2625 | |
2626 | av_url_split(proto, sizeof(proto), auth, sizeof(auth), |
2627 | hostname, sizeof(hostname), &port, |
2628 | path, sizeof(path), s->filename); |
2629 | |
2630 | n = strchr(path, ' '); |
2631 | if (n) { |
2632 | av_log(s, AV_LOG_WARNING, |
2633 | "Detected librtmp style URL parameters, these aren't supported " |
2634 | "by the libavformat internal RTMP handler currently enabled. " |
2635 | "See the documentation for the correct way to pass parameters.\n"); |
2636 | *n = '\0'; // Trim not supported part |
2637 | } |
2638 | |
2639 | if (auth[0]) { |
2640 | char *ptr = strchr(auth, ':'); |
2641 | if (ptr) { |
2642 | *ptr = '\0'; |
2643 | av_strlcpy(rt->username, auth, sizeof(rt->username)); |
2644 | av_strlcpy(rt->password, ptr + 1, sizeof(rt->password)); |
2645 | } |
2646 | } |
2647 | |
2648 | if (rt->listen && strcmp(proto, "rtmp")) { |
2649 | av_log(s, AV_LOG_ERROR, "rtmp_listen not available for %s\n", |
2650 | proto); |
2651 | return AVERROR(EINVAL); |
2652 | } |
2653 | if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) { |
2654 | if (!strcmp(proto, "rtmpts")) |
2655 | av_dict_set(opts, "ffrtmphttp_tls", "1", 1); |
2656 | |
2657 | /* open the http tunneling connection */ |
2658 | ff_url_join(buf, sizeof(buf), "ffrtmphttp", NULL, hostname, port, NULL); |
2659 | } else if (!strcmp(proto, "rtmps")) { |
2660 | /* open the tls connection */ |
2661 | if (port < 0) |
2662 | port = RTMPS_DEFAULT_PORT; |
2663 | ff_url_join(buf, sizeof(buf), "tls", NULL, hostname, port, NULL); |
2664 | } else if (!strcmp(proto, "rtmpe") || (!strcmp(proto, "rtmpte"))) { |
2665 | if (!strcmp(proto, "rtmpte")) |
2666 | av_dict_set(opts, "ffrtmpcrypt_tunneling", "1", 1); |
2667 | |
2668 | /* open the encrypted connection */ |
2669 | ff_url_join(buf, sizeof(buf), "ffrtmpcrypt", NULL, hostname, port, NULL); |
2670 | rt->encrypted = 1; |
2671 | } else { |
2672 | /* open the tcp connection */ |
2673 | if (port < 0) |
2674 | port = RTMP_DEFAULT_PORT; |
2675 | if (rt->listen) |
2676 | ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, |
2677 | "?listen&listen_timeout=%d", |
2678 | rt->listen_timeout * 1000); |
2679 | else |
2680 | ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL); |
2681 | } |
2682 | |
2683 | reconnect: |
2684 | if ((ret = ffurl_open_whitelist(&rt->stream, buf, AVIO_FLAG_READ_WRITE, |
2685 | &s->interrupt_callback, opts, |
2686 | s->protocol_whitelist, s->protocol_blacklist, s)) < 0) { |
2687 | av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf); |
2688 | goto fail; |
2689 | } |
2690 | |
2691 | if (rt->swfverify) { |
2692 | if ((ret = rtmp_calc_swfhash(s)) < 0) |
2693 | goto fail; |
2694 | } |
2695 | |
2696 | rt->state = STATE_START; |
2697 | if (!rt->listen && (ret = rtmp_handshake(s, rt)) < 0) |
2698 | goto fail; |
2699 | if (rt->listen && (ret = rtmp_server_handshake(s, rt)) < 0) |
2700 | goto fail; |
2701 | |
2702 | rt->out_chunk_size = 128; |
2703 | rt->in_chunk_size = 128; // Probably overwritten later |
2704 | rt->state = STATE_HANDSHAKED; |
2705 | |
2706 | // Keep the application name when it has been defined by the user. |
2707 | old_app = rt->app; |
2708 | |
2709 | rt->app = av_malloc(APP_MAX_LENGTH); |
2710 | if (!rt->app) { |
2711 | ret = AVERROR(ENOMEM); |
2712 | goto fail; |
2713 | } |
2714 | |
2715 | //extract "app" part from path |
2716 | qmark = strchr(path, '?'); |
2717 | if (qmark && strstr(qmark, "slist=")) { |
2718 | char* amp; |
2719 | // After slist we have the playpath, the full path is used as app |
2720 | av_strlcpy(rt->app, path + 1, APP_MAX_LENGTH); |
2721 | fname = strstr(path, "slist=") + 6; |
2722 | // Strip any further query parameters from fname |
2723 | amp = strchr(fname, '&'); |
2724 | if (amp) { |
2725 | av_strlcpy(fname_buffer, fname, FFMIN(amp - fname + 1, |
2726 | sizeof(fname_buffer))); |
2727 | fname = fname_buffer; |
2728 | } |
2729 | } else if (!strncmp(path, "/ondemand/", 10)) { |
2730 | fname = path + 10; |
2731 | memcpy(rt->app, "ondemand", 9); |
2732 | } else { |
2733 | char *next = *path ? path + 1 : path; |
2734 | char *p = strchr(next, '/'); |
2735 | if (!p) { |
2736 | if (old_app) { |
2737 | // If name of application has been defined by the user, assume that |
2738 | // playpath is provided in the URL |
2739 | fname = next; |
2740 | } else { |
2741 | fname = NULL; |
2742 | av_strlcpy(rt->app, next, APP_MAX_LENGTH); |
2743 | } |
2744 | } else { |
2745 | // make sure we do not mismatch a playpath for an application instance |
2746 | char *c = strchr(p + 1, ':'); |
2747 | fname = strchr(p + 1, '/'); |
2748 | if (!fname || (c && c < fname)) { |
2749 | fname = p + 1; |
2750 | av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH)); |
2751 | } else { |
2752 | fname++; |
2753 | av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH)); |
2754 | } |
2755 | } |
2756 | } |
2757 | |
2758 | if (old_app) { |
2759 | // The name of application has been defined by the user, override it. |
2760 | if (strlen(old_app) >= APP_MAX_LENGTH) { |
2761 | ret = AVERROR(EINVAL); |
2762 | goto fail; |
2763 | } |
2764 | av_free(rt->app); |
2765 | rt->app = old_app; |
2766 | } |
2767 | |
2768 | if (!rt->playpath) { |
2769 | rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH); |
2770 | if (!rt->playpath) { |
2771 | ret = AVERROR(ENOMEM); |
2772 | goto fail; |
2773 | } |
2774 | |
2775 | if (fname) { |
2776 | int len = strlen(fname); |
2777 | if (!strchr(fname, ':') && len >= 4 && |
2778 | (!strcmp(fname + len - 4, ".f4v") || |
2779 | !strcmp(fname + len - 4, ".mp4"))) { |
2780 | memcpy(rt->playpath, "mp4:", 5); |
2781 | } else { |
2782 | if (len >= 4 && !strcmp(fname + len - 4, ".flv")) |
2783 | fname[len - 4] = '\0'; |
2784 | rt->playpath[0] = 0; |
2785 | } |
2786 | av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH); |
2787 | } else { |
2788 | rt->playpath[0] = '\0'; |
2789 | } |
2790 | } |
2791 | |
2792 | if (!rt->tcurl) { |
2793 | rt->tcurl = av_malloc(TCURL_MAX_LENGTH); |
2794 | if (!rt->tcurl) { |
2795 | ret = AVERROR(ENOMEM); |
2796 | goto fail; |
2797 | } |
2798 | ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname, |
2799 | port, "/%s", rt->app); |
2800 | } |
2801 | |
2802 | if (!rt->flashver) { |
2803 | rt->flashver = av_malloc(FLASHVER_MAX_LENGTH); |
2804 | if (!rt->flashver) { |
2805 | ret = AVERROR(ENOMEM); |
2806 | goto fail; |
2807 | } |
2808 | if (rt->is_input) { |
2809 | snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d", |
2810 | RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2, |
2811 | RTMP_CLIENT_VER3, RTMP_CLIENT_VER4); |
2812 | } else { |
2813 | snprintf(rt->flashver, FLASHVER_MAX_LENGTH, |
2814 | "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT); |
2815 | } |
2816 | } |
2817 | |
2818 | rt->client_report_size = 1048576; |
2819 | rt->bytes_read = 0; |
2820 | rt->has_audio = 0; |
2821 | rt->has_video = 0; |
2822 | rt->received_metadata = 0; |
2823 | rt->last_bytes_read = 0; |
2824 | rt->server_bw = 2500000; |
2825 | rt->duration = 0; |
2826 | |
2827 | av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n", |
2828 | proto, path, rt->app, rt->playpath); |
2829 | if (!rt->listen) { |
2830 | if ((ret = gen_connect(s, rt)) < 0) |
2831 | goto fail; |
2832 | } else { |
2833 | if ((ret = read_connect(s, s->priv_data)) < 0) |
2834 | goto fail; |
2835 | } |
2836 | |
2837 | do { |
2838 | ret = get_packet(s, 1); |
2839 | } while (ret == AVERROR(EAGAIN)); |
2840 | if (ret < 0) |
2841 | goto fail; |
2842 | |
2843 | if (rt->do_reconnect) { |
2844 | int i; |
2845 | ffurl_close(rt->stream); |
2846 | rt->stream = NULL; |
2847 | rt->do_reconnect = 0; |
2848 | rt->nb_invokes = 0; |
2849 | for (i = 0; i < 2; i++) |
2850 | memset(rt->prev_pkt[i], 0, |
2851 | sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]); |
2852 | free_tracked_methods(rt); |
2853 | goto reconnect; |
2854 | } |
2855 | |
2856 | if (rt->is_input) { |
2857 | // generate FLV header for demuxer |
2858 | rt->flv_size = 13; |
2859 | if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) |
2860 | goto fail; |
2861 | rt->flv_off = 0; |
2862 | memcpy(rt->flv_data, "FLV\1\0\0\0\0\011\0\0\0\0", rt->flv_size); |
2863 | |
2864 | // Read packets until we reach the first A/V packet or read metadata. |
2865 | // If there was a metadata package in front of the A/V packets, we can |
2866 | // build the FLV header from this. If we do not receive any metadata, |
2867 | // the FLV decoder will allocate the needed streams when their first |
2868 | // audio or video packet arrives. |
2869 | while (!rt->has_audio && !rt->has_video && !rt->received_metadata) { |
2870 | if ((ret = get_packet(s, 0)) < 0) |
2871 | goto fail; |
2872 | } |
2873 | |
2874 | // Either after we have read the metadata or (if there is none) the |
2875 | // first packet of an A/V stream, we have a better knowledge about the |
2876 | // streams, so set the FLV header accordingly. |
2877 | if (rt->has_audio) { |
2878 | rt->flv_data[4] |= FLV_HEADER_FLAG_HASAUDIO; |
2879 | } |
2880 | if (rt->has_video) { |
2881 | rt->flv_data[4] |= FLV_HEADER_FLAG_HASVIDEO; |
2882 | } |
2883 | |
2884 | // If we received the first packet of an A/V stream and no metadata but |
2885 | // the server returned a valid duration, create a fake metadata packet |
2886 | // to inform the FLV decoder about the duration. |
2887 | if (!rt->received_metadata && rt->duration > 0) { |
2888 | if ((ret = inject_fake_duration_metadata(rt)) < 0) |
2889 | goto fail; |
2890 | } |
2891 | } else { |
2892 | rt->flv_size = 0; |
2893 | rt->flv_data = NULL; |
2894 | rt->flv_off = 0; |
2895 | rt->skip_bytes = 13; |
2896 | } |
2897 | |
2898 | s->max_packet_size = rt->stream->max_packet_size; |
2899 | s->is_streamed = 1; |
2900 | return 0; |
2901 | |
2902 | fail: |
2903 | av_dict_free(opts); |
2904 | rtmp_close(s); |
2905 | return ret; |
2906 | } |
2907 | |
2908 | static int rtmp_read(URLContext *s, uint8_t *buf, int size) |
2909 | { |
2910 | RTMPContext *rt = s->priv_data; |
2911 | int orig_size = size; |
2912 | int ret; |
2913 | |
2914 | while (size > 0) { |
2915 | int data_left = rt->flv_size - rt->flv_off; |
2916 | |
2917 | if (data_left >= size) { |
2918 | memcpy(buf, rt->flv_data + rt->flv_off, size); |
2919 | rt->flv_off += size; |
2920 | return orig_size; |
2921 | } |
2922 | if (data_left > 0) { |
2923 | memcpy(buf, rt->flv_data + rt->flv_off, data_left); |
2924 | buf += data_left; |
2925 | size -= data_left; |
2926 | rt->flv_off = rt->flv_size; |
2927 | return data_left; |
2928 | } |
2929 | if ((ret = get_packet(s, 0)) < 0) |
2930 | return ret; |
2931 | } |
2932 | return orig_size; |
2933 | } |
2934 | |
2935 | static int64_t rtmp_seek(URLContext *s, int stream_index, int64_t timestamp, |
2936 | int flags) |
2937 | { |
2938 | RTMPContext *rt = s->priv_data; |
2939 | int ret; |
2940 | av_log(s, AV_LOG_DEBUG, |
2941 | "Seek on stream index %d at timestamp %"PRId64" with flags %08x\n", |
2942 | stream_index, timestamp, flags); |
2943 | if ((ret = gen_seek(s, rt, timestamp)) < 0) { |
2944 | av_log(s, AV_LOG_ERROR, |
2945 | "Unable to send seek command on stream index %d at timestamp " |
2946 | "%"PRId64" with flags %08x\n", |
2947 | stream_index, timestamp, flags); |
2948 | return ret; |
2949 | } |
2950 | rt->flv_off = rt->flv_size; |
2951 | rt->state = STATE_SEEKING; |
2952 | return timestamp; |
2953 | } |
2954 | |
2955 | static int rtmp_pause(URLContext *s, int pause) |
2956 | { |
2957 | RTMPContext *rt = s->priv_data; |
2958 | int ret; |
2959 | av_log(s, AV_LOG_DEBUG, "Pause at timestamp %d\n", |
2960 | rt->last_timestamp); |
2961 | if ((ret = gen_pause(s, rt, pause, rt->last_timestamp)) < 0) { |
2962 | av_log(s, AV_LOG_ERROR, "Unable to send pause command at timestamp %d\n", |
2963 | rt->last_timestamp); |
2964 | return ret; |
2965 | } |
2966 | return 0; |
2967 | } |
2968 | |
2969 | static int rtmp_write(URLContext *s, const uint8_t *buf, int size) |
2970 | { |
2971 | RTMPContext *rt = s->priv_data; |
2972 | int size_temp = size; |
2973 | int pktsize, pkttype, copy; |
2974 | uint32_t ts; |
2975 | const uint8_t *buf_temp = buf; |
2976 | uint8_t c; |
2977 | int ret; |
2978 | |
2979 | do { |
2980 | if (rt->skip_bytes) { |
2981 | int skip = FFMIN(rt->skip_bytes, size_temp); |
2982 | buf_temp += skip; |
2983 | size_temp -= skip; |
2984 | rt->skip_bytes -= skip; |
2985 | continue; |
2986 | } |
2987 | |
2988 | if (rt->flv_header_bytes < RTMP_HEADER) { |
2989 | const uint8_t *header = rt->flv_header; |
2990 | int channel = RTMP_AUDIO_CHANNEL; |
2991 | |
2992 | copy = FFMIN(RTMP_HEADER - rt->flv_header_bytes, size_temp); |
2993 | bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy); |
2994 | rt->flv_header_bytes += copy; |
2995 | size_temp -= copy; |
2996 | if (rt->flv_header_bytes < RTMP_HEADER) |
2997 | break; |
2998 | |
2999 | pkttype = bytestream_get_byte(&header); |
3000 | pktsize = bytestream_get_be24(&header); |
3001 | ts = bytestream_get_be24(&header); |
3002 | ts |= bytestream_get_byte(&header) << 24; |
3003 | bytestream_get_be24(&header); |
3004 | rt->flv_size = pktsize; |
3005 | |
3006 | if (pkttype == RTMP_PT_VIDEO) |
3007 | channel = RTMP_VIDEO_CHANNEL; |
3008 | |
3009 | if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) || |
3010 | pkttype == RTMP_PT_NOTIFY) { |
3011 | if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1], |
3012 | &rt->nb_prev_pkt[1], |
3013 | channel)) < 0) |
3014 | return ret; |
3015 | // Force sending a full 12 bytes header by clearing the |
3016 | // channel id, to make it not match a potential earlier |
3017 | // packet in the same channel. |
3018 | rt->prev_pkt[1][channel].channel_id = 0; |
3019 | } |
3020 | |
3021 | //this can be a big packet, it's better to send it right here |
3022 | if ((ret = ff_rtmp_packet_create(&rt->out_pkt, channel, |
3023 | pkttype, ts, pktsize)) < 0) |
3024 | return ret; |
3025 | |
3026 | rt->out_pkt.extra = rt->stream_id; |
3027 | rt->flv_data = rt->out_pkt.data; |
3028 | } |
3029 | |
3030 | copy = FFMIN(rt->flv_size - rt->flv_off, size_temp); |
3031 | bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, copy); |
3032 | rt->flv_off += copy; |
3033 | size_temp -= copy; |
3034 | |
3035 | if (rt->flv_off == rt->flv_size) { |
3036 | rt->skip_bytes = 4; |
3037 | |
3038 | if (rt->out_pkt.type == RTMP_PT_NOTIFY) { |
3039 | // For onMetaData and |RtmpSampleAccess packets, we want |
3040 | // @setDataFrame prepended to the packet before it gets sent. |
3041 | // However, not all RTMP_PT_NOTIFY packets (e.g., onTextData |
3042 | // and onCuePoint). |
3043 | uint8_t commandbuffer[64]; |
3044 | int stringlen = 0; |
3045 | GetByteContext gbc; |
3046 | |
3047 | bytestream2_init(&gbc, rt->flv_data, rt->flv_size); |
3048 | if (!ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer), |
3049 | &stringlen)) { |
3050 | if (!strcmp(commandbuffer, "onMetaData") || |
3051 | !strcmp(commandbuffer, "|RtmpSampleAccess")) { |
3052 | uint8_t *ptr; |
3053 | if ((ret = av_reallocp(&rt->out_pkt.data, rt->out_pkt.size + 16)) < 0) { |
3054 | rt->flv_size = rt->flv_off = rt->flv_header_bytes = 0; |
3055 | return ret; |
3056 | } |
3057 | memmove(rt->out_pkt.data + 16, rt->out_pkt.data, rt->out_pkt.size); |
3058 | rt->out_pkt.size += 16; |
3059 | ptr = rt->out_pkt.data; |
3060 | ff_amf_write_string(&ptr, "@setDataFrame"); |
3061 | } |
3062 | } |
3063 | } |
3064 | |
3065 | if ((ret = rtmp_send_packet(rt, &rt->out_pkt, 0)) < 0) |
3066 | return ret; |
3067 | rt->flv_size = 0; |
3068 | rt->flv_off = 0; |
3069 | rt->flv_header_bytes = 0; |
3070 | rt->flv_nb_packets++; |
3071 | } |
3072 | } while (buf_temp - buf < size); |
3073 | |
3074 | if (rt->flv_nb_packets < rt->flush_interval) |
3075 | return size; |
3076 | rt->flv_nb_packets = 0; |
3077 | |
3078 | /* set stream into nonblocking mode */ |
3079 | rt->stream->flags |= AVIO_FLAG_NONBLOCK; |
3080 | |
3081 | /* try to read one byte from the stream */ |
3082 | ret = ffurl_read(rt->stream, &c, 1); |
3083 | |
3084 | /* switch the stream back into blocking mode */ |
3085 | rt->stream->flags &= ~AVIO_FLAG_NONBLOCK; |
3086 | |
3087 | if (ret == AVERROR(EAGAIN)) { |
3088 | /* no incoming data to handle */ |
3089 | return size; |
3090 | } else if (ret < 0) { |
3091 | return ret; |
3092 | } else if (ret == 1) { |
3093 | RTMPPacket rpkt = { 0 }; |
3094 | |
3095 | if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt, |
3096 | rt->in_chunk_size, |
3097 | &rt->prev_pkt[0], |
3098 | &rt->nb_prev_pkt[0], c)) <= 0) |
3099 | return ret; |
3100 | |
3101 | if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0) |
3102 | return ret; |
3103 | |
3104 | ff_rtmp_packet_destroy(&rpkt); |
3105 | } |
3106 | |
3107 | return size; |
3108 | } |
3109 | |
3110 | #define OFFSET(x) offsetof(RTMPContext, x) |
3111 | #define DEC AV_OPT_FLAG_DECODING_PARAM |
3112 | #define ENC AV_OPT_FLAG_ENCODING_PARAM |
3113 | |
3114 | static const AVOption rtmp_options[] = { |
3115 | {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3116 | {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {.i64 = 3000}, 0, INT_MAX, DEC|ENC}, |
3117 | {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3118 | {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3119 | {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {.i64 = 10}, 0, INT_MAX, ENC}, |
3120 | {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {.i64 = -2}, INT_MIN, INT_MAX, DEC, "rtmp_live"}, |
3121 | {"any", "both", 0, AV_OPT_TYPE_CONST, {.i64 = -2}, 0, 0, DEC, "rtmp_live"}, |
3122 | {"live", "live stream", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, DEC, "rtmp_live"}, |
3123 | {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, DEC, "rtmp_live"}, |
3124 | {"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC}, |
3125 | {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3126 | {"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC}, |
3127 | {"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC}, |
3128 | {"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC}, |
3129 | {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3130 | {"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC}, |
3131 | {"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC}, |
3132 | {"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" }, |
3133 | {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" }, |
3134 | {"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" }, |
3135 | { NULL }, |
3136 | }; |
3137 | |
3138 | #define RTMP_PROTOCOL(flavor) \ |
3139 | static const AVClass flavor##_class = { \ |
3140 | .class_name = #flavor, \ |
3141 | .item_name = av_default_item_name, \ |
3142 | .option = rtmp_options, \ |
3143 | .version = LIBAVUTIL_VERSION_INT, \ |
3144 | }; \ |
3145 | \ |
3146 | const URLProtocol ff_##flavor##_protocol = { \ |
3147 | .name = #flavor, \ |
3148 | .url_open2 = rtmp_open, \ |
3149 | .url_read = rtmp_read, \ |
3150 | .url_read_seek = rtmp_seek, \ |
3151 | .url_read_pause = rtmp_pause, \ |
3152 | .url_write = rtmp_write, \ |
3153 | .url_close = rtmp_close, \ |
3154 | .priv_data_size = sizeof(RTMPContext), \ |
3155 | .flags = URL_PROTOCOL_FLAG_NETWORK, \ |
3156 | .priv_data_class= &flavor##_class, \ |
3157 | }; |
3158 | |
3159 | |
3160 | RTMP_PROTOCOL(rtmp) |
3161 | RTMP_PROTOCOL(rtmpe) |
3162 | RTMP_PROTOCOL(rtmps) |
3163 | RTMP_PROTOCOL(rtmpt) |
3164 | RTMP_PROTOCOL(rtmpte) |
3165 | RTMP_PROTOCOL(rtmpts) |
3166 |