FFMPEG代碼爲3.2 release版本, 輸入爲flv文件。app
/* If not enough info to get the stream parameters, we decode the
first frames to get it. (used in mpeg case for example) */
ret = avformat_find_stream_info(ic, opts);ide
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) { int i, count = 0, ret = 0, j; int64_t read_size; AVStream *st; AVCodecContext *avctx; AVPacket pkt1, *pkt; int64_t old_offset = avio_tell(ic->pb); // new streams might appear, no options for those int orig_nb_streams = ic->nb_streams; int flush_codecs; int64_t max_analyze_duration = ic->max_analyze_duration; int64_t max_stream_analyze_duration; int64_t max_subtitle_analyze_duration; int64_t probesize = ic->probesize; int eof_reached = 0; flush_codecs = probesize > 0; av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN); max_stream_analyze_duration = max_analyze_duration; max_subtitle_analyze_duration = max_analyze_duration; if (!max_analyze_duration) { max_stream_analyze_duration = max_analyze_duration = 5*AV_TIME_BASE; max_subtitle_analyze_duration = 30*AV_TIME_BASE; if (!strcmp(ic->iformat->name, "flv")) max_stream_analyze_duration = 90*AV_TIME_BASE; if (!strcmp(ic->iformat->name, "mpeg") || !strcmp(ic->iformat->name, "mpegts")) max_stream_analyze_duration = 7*AV_TIME_BASE; } if (ic->pb) av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d nb_streams:%d\n", avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, ic->nb_streams); for (i = 0; i < ic->nb_streams; i++) { const AVCodec *codec; AVDictionary *thread_opt = NULL; st = ic->streams[i]; avctx = st->internal->avctx; if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) { /* if (!st->time_base.num) st->time_base = */ if (!avctx->time_base.num) avctx->time_base = st->time_base; } /* check if the caller has overridden the codec id */ #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS if (st->codec->codec_id != st->internal->orig_codec_id) { st->codecpar->codec_id = st->codec->codec_id; st->codecpar->codec_type = st->codec->codec_type; st->internal->orig_codec_id = st->codec->codec_id; } FF_ENABLE_DEPRECATION_WARNINGS #endif // only for the split stuff if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE) && st->request_probe <= 0) { st->parser = av_parser_init(st->codecpar->codec_id); if (st->parser) { if (st->need_parsing == AVSTREAM_PARSE_HEADERS) { st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) { st->parser->flags |= PARSER_FLAG_USE_CODEC_TS; } } else if (st->need_parsing) { av_log(ic, AV_LOG_VERBOSE, "parser not found for codec " "%s, packets or times may be invalid.\n", avcodec_get_name(st->codecpar->codec_id)); } } if (st->codecpar->codec_id != st->internal->orig_codec_id) st->internal->orig_codec_id = st->codecpar->codec_id; ret = avcodec_parameters_to_context(avctx, st->codecpar); if (ret < 0) goto find_stream_info_err; if (st->request_probe <= 0) st->internal->avctx_inited = 1; codec = find_probe_decoder(ic, st, st->codecpar->codec_id); /* Force thread count to 1 since the H.264 decoder will not extract * SPS and PPS to extradata during multi-threaded decoding. */ av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0); if (ic->codec_whitelist) av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0); /* Ensure that subtitle_header is properly set. */ if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE && codec && !avctx->codec) { if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0) av_log(ic, AV_LOG_WARNING, "Failed to open codec in av_find_stream_info\n"); } // Try to just open decoders, in case this is enough to get parameters. if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) { if (codec && !avctx->codec) if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0) av_log(ic, AV_LOG_WARNING, "Failed to open codec in av_find_stream_info\n"); } if (!options) av_dict_free(&thread_opt); } for (i = 0; i < ic->nb_streams; i++) { #if FF_API_R_FRAME_RATE ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; #endif ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE; ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE; } read_size = 0; for (;;) { int analyzed_all_streams; if (ff_check_interrupt(&ic->interrupt_callback)) { ret = AVERROR_EXIT; av_log(ic, AV_LOG_DEBUG, "interrupted\n"); break; } /* check if one codec still needs to be handled */ for (i = 0; i < ic->nb_streams; i++) { int fps_analyze_framecount = 20; st = ic->streams[i]; if (!has_codec_parameters(st, NULL)) break; /* If the timebase is coarse (like the usual millisecond precision * of mkv), we need to analyze more frames to reliably arrive at * the correct fps. */ if (av_q2d(st->time_base) > 0.0005) fps_analyze_framecount *= 2; if (!tb_unreliable(st->internal->avctx)) fps_analyze_framecount = 0; if (ic->fps_probe_size >= 0) fps_analyze_framecount = ic->fps_probe_size; if (st->disposition & AV_DISPOSITION_ATTACHED_PIC) fps_analyze_framecount = 0; /* variable fps and no guess at the real fps */ if (!(st->r_frame_rate.num && st->avg_frame_rate.num) && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { int count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ? st->info->codec_info_duration_fields/2 : st->info->duration_count; if (count < fps_analyze_framecount) break; } if (st->parser && st->parser->parser->split && !st->internal->avctx->extradata) break; if (st->first_dts == AV_NOPTS_VALUE && !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) && st->codec_info_nb_frames < ((st->disposition & AV_DISPOSITION_ATTACHED_PIC) ? 1 : ic->max_ts_probe) && (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)) break; } analyzed_all_streams = 0; if (i == ic->nb_streams) { analyzed_all_streams = 1; /* NOTE: If the format has no header, then we need to read some * packets to get most of the streams, so we cannot stop here. */ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { /* If we found the info for all the codecs, we can stop. */ ret = count; av_log(ic, AV_LOG_DEBUG, "All info found\n"); flush_codecs = 0; break; } } /* We did not get all the codec info, but we read too much data. */ if (read_size >= probesize) { ret = count; av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %"PRId64" bytes reached\n", probesize); for (i = 0; i < ic->nb_streams; i++) if (!ic->streams[i]->r_frame_rate.num && ic->streams[i]->info->duration_count <= 1 && ic->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && strcmp(ic->iformat->name, "image2")) av_log(ic, AV_LOG_WARNING, "Stream #%d: not enough frames to estimate rate; " "consider increasing probesize\n", i); break; } /* NOTE: A new stream can be added there if no header in file * (AVFMTCTX_NOHEADER). */ ret = read_frame_internal(ic, &pkt1); if (ret == AVERROR(EAGAIN)) continue; if (ret < 0) { /* EOF or error*/ eof_reached = 1; break; } pkt = &pkt1; if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) { ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt, &ic->internal->packet_buffer_end, 0); if (ret < 0) goto find_stream_info_err; } st = ic->streams[pkt->stream_index]; if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) read_size += pkt->size; avctx = st->internal->avctx; if (!st->internal->avctx_inited) { ret = avcodec_parameters_to_context(avctx, st->codecpar); if (ret < 0) goto find_stream_info_err; st->internal->avctx_inited = 1; } if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) { /* check for non-increasing dts */ if (st->info->fps_last_dts != AV_NOPTS_VALUE && st->info->fps_last_dts >= pkt->dts) { av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: packet %d with DTS " "%"PRId64", packet %d with DTS %"PRId64"\n", st->index, st->info->fps_last_dts_idx, st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts); st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE; } /* Check for a discontinuity in dts. If the difference in dts * is more than 1000 times the average packet duration in the * sequence, we treat it as a discontinuity. */ if (st->info->fps_last_dts != AV_NOPTS_VALUE && st->info->fps_last_dts_idx > st->info->fps_first_dts_idx && (pkt->dts - st->info->fps_last_dts) / 1000 > (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) { av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: packet %d with DTS " "%"PRId64", packet %d with DTS %"PRId64"\n", st->index, st->info->fps_last_dts_idx, st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts); st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE; } /* update stored dts values */ if (st->info->fps_first_dts == AV_NOPTS_VALUE) { st->info->fps_first_dts = pkt->dts; st->info->fps_first_dts_idx = st->codec_info_nb_frames; } st->info->fps_last_dts = pkt->dts; st->info->fps_last_dts_idx = st->codec_info_nb_frames; } if (st->codec_info_nb_frames>1) { int64_t t = 0; int64_t limit; if (st->time_base.den > 0) t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q); if (st->avg_frame_rate.num > 0) t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q)); if ( t == 0 && st->codec_info_nb_frames>30 && st->info->fps_first_dts != AV_NOPTS_VALUE && st->info->fps_last_dts != AV_NOPTS_VALUE) t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q)); if (analyzed_all_streams) limit = max_analyze_duration; else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) limit = max_subtitle_analyze_duration; else limit = max_stream_analyze_duration; if (t >= limit) { av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds st:%d\n", limit, t, pkt->stream_index); if (ic->flags & AVFMT_FLAG_NOBUFFER) av_packet_unref(pkt); break; } if (pkt->duration) { if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE && pkt->pts != AV_NOPTS_VALUE && pkt->pts >= st->start_time) { st->info->codec_info_duration = FFMIN(pkt->pts - st->start_time, st->info->codec_info_duration + pkt->duration); } else st->info->codec_info_duration += pkt->duration; st->info->codec_info_duration_fields += st->parser && st->need_parsing && avctx->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2; } } #if FF_API_R_FRAME_RATE if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ff_rfps_add_frame(ic, st, pkt->dts); #endif if (st->parser && st->parser->parser->split && !avctx->extradata) { int i = st->parser->parser->split(avctx, pkt->data, pkt->size); if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) { avctx->extradata_size = i; avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) return AVERROR(ENOMEM); memcpy(avctx->extradata, pkt->data, avctx->extradata_size); } } /* If still no information, we try to open the codec and to * decompress the frame. We try to avoid that in most cases as * it takes longer and uses more memory. For MPEG-4, we need to * decompress for QuickTime. * * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at * least one frame of codec data, this makes sure the codec initializes * the channel configuration and does not only trust the values from * the container. */ try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams) ? &options[i] : NULL); if (ic->flags & AVFMT_FLAG_NOBUFFER) av_packet_unref(pkt); st->codec_info_nb_frames++; count++; } if (eof_reached) { int stream_index; for (stream_index = 0; stream_index < ic->nb_streams; stream_index++) { st = ic->streams[stream_index]; avctx = st->internal->avctx; if (!has_codec_parameters(st, NULL)) { const AVCodec *codec = find_probe_decoder(ic, st, st->codecpar->codec_id); if (codec && !avctx->codec) { if (avcodec_open2(avctx, codec, (options && stream_index < orig_nb_streams) ? &options[stream_index] : NULL) < 0) av_log(ic, AV_LOG_WARNING, "Failed to open codec in av_find_stream_info\n"); } } // EOF already reached while reading the stream above. // So continue with reoordering DTS with whatever delay we have. if (ic->internal->packet_buffer && !has_decode_delay_been_guessed(st)) { update_dts_from_pts(ic, stream_index, ic->internal->packet_buffer); } } } if (flush_codecs) { AVPacket empty_pkt = { 0 }; int err = 0; av_init_packet(&empty_pkt); for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; /* flush the decoders */ if (st->info->found_decoder == 1) { do { err = try_decode_frame(ic, st, &empty_pkt, (options && i < orig_nb_streams) ? &options[i] : NULL); } while (err > 0 && !has_codec_parameters(st, NULL)); if (err < 0) { av_log(ic, AV_LOG_INFO, "decoding for stream %d failed\n", st->index); } } } } // close codecs which were opened in try_decode_frame() for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; avcodec_close(st->internal->avctx); } ff_rfps_calculate(ic); for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; avctx = st->internal->avctx; if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { if (avctx->codec_id == AV_CODEC_ID_RAWVIDEO && !avctx->codec_tag && !avctx->bits_per_coded_sample) { uint32_t tag= avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); if (avpriv_find_pix_fmt(avpriv_get_raw_pix_fmt_tags(), tag) == avctx->pix_fmt) avctx->codec_tag= tag; } /* estimate average framerate if not set by demuxer */ if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) { int best_fps = 0; double best_error = 0.01; if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2|| st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den || st->info->codec_info_duration < 0) continue; av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, st->info->codec_info_duration_fields * (int64_t) st->time_base.den, st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000); /* Round guessed framerate to a "standard" framerate if it's * within 1% of the original estimate. */ for (j = 0; j < MAX_STD_TIMEBASES; j++) { AVRational std_fps = { get_std_framerate(j), 12 * 1001 }; double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1); if (error < best_error) { best_error = error; best_fps = std_fps.num; } } if (best_fps) av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, best_fps, 12 * 1001, INT_MAX); } if (!st->r_frame_rate.num) { if ( avctx->time_base.den * (int64_t) st->time_base.num <= avctx->time_base.num * avctx->ticks_per_frame * (int64_t) st->time_base.den) { st->r_frame_rate.num = avctx->time_base.den; st->r_frame_rate.den = avctx->time_base.num * avctx->ticks_per_frame; } else { st->r_frame_rate.num = st->time_base.den; st->r_frame_rate.den = st->time_base.num; } } if (st->display_aspect_ratio.num && st->display_aspect_ratio.den) { AVRational hw_ratio = { avctx->height, avctx->width }; st->sample_aspect_ratio = av_mul_q(st->display_aspect_ratio, hw_ratio); } } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { if (!avctx->bits_per_coded_sample) avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec_id); // set stream disposition based on audio service type switch (avctx->audio_service_type) { case AV_AUDIO_SERVICE_TYPE_EFFECTS: st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; case AV_AUDIO_SERVICE_TYPE_COMMENTARY: st->disposition = AV_DISPOSITION_COMMENT; break; case AV_AUDIO_SERVICE_TYPE_KARAOKE: st->disposition = AV_DISPOSITION_KARAOKE; break; } } } if (probesize) estimate_timings(ic, old_offset); av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN); if (ret >= 0 && ic->nb_streams) /* We could not have all the codec parameters before EOF. */ ret = -1; for (i = 0; i < ic->nb_streams; i++) { const char *errmsg; st = ic->streams[i]; /* if no packet was ever seen, update context now for has_codec_parameters */ if (!st->internal->avctx_inited) { if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && st->codecpar->format == AV_SAMPLE_FMT_NONE) st->codecpar->format = st->internal->avctx->sample_fmt; ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar); if (ret < 0) goto find_stream_info_err; } if (!has_codec_parameters(st, &errmsg)) { char buf[256]; avcodec_string(buf, sizeof(buf), st->internal->avctx, 0); av_log(ic, AV_LOG_WARNING, "Could not find codec parameters for stream %d (%s): %s\n" "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n", i, buf, errmsg); } else { ret = 0; } } compute_chapters_end(ic); /* update the stream parameters from the internal codec contexts */ for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->internal->avctx_inited) { int orig_w = st->codecpar->width; int orig_h = st->codecpar->height; ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx); if (ret < 0) goto find_stream_info_err; // The decoder might reduce the video size by the lowres factor. if (av_codec_get_lowres(st->internal->avctx) && orig_w) { st->codecpar->width = orig_w; st->codecpar->height = orig_h; } } #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS ret = avcodec_parameters_to_context(st->codec, st->codecpar); if (ret < 0) goto find_stream_info_err; // The old API (AVStream.codec) "requires" the resolution to be adjusted // by the lowres factor. if (av_codec_get_lowres(st->internal->avctx) && st->internal->avctx->width) { av_codec_set_lowres(st->codec, av_codec_get_lowres(st->internal->avctx)); st->codec->width = st->internal->avctx->width; st->codec->height = st->internal->avctx->height; } if (st->codec->codec_tag != MKTAG('t','m','c','d')) { st->codec->time_base = st->internal->avctx->time_base; st->codec->ticks_per_frame = st->internal->avctx->ticks_per_frame; } st->codec->framerate = st->avg_frame_rate; if (st->internal->avctx->subtitle_header) { st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size); if (!st->codec->subtitle_header) goto find_stream_info_err; st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size; memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header, st->codec->subtitle_header_size); } // Fields unavailable in AVCodecParameters st->codec->coded_width = st->internal->avctx->coded_width; st->codec->coded_height = st->internal->avctx->coded_height; st->codec->properties = st->internal->avctx->properties; FF_ENABLE_DEPRECATION_WARNINGS #endif st->internal->avctx_inited = 0; } find_stream_info_err: for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->info) av_freep(&st->info->duration_error); av_freep(&ic->streams[i]->info); } if (ic->pb) av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n", avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count); return ret; }
/* NOTE: A new stream can be added there if no header in file
* (AVFMTCTX_NOHEADER). */函數
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) { int ret = 0, i, got_packet = 0; AVDictionary *metadata = NULL; av_init_packet(pkt); while (!got_packet && !s->internal->parse_queue) { AVStream *st; AVPacket cur_pkt; /* read next packet */ ret = ff_read_packet(s, &cur_pkt); if (ret < 0) { if (ret == AVERROR(EAGAIN)) return ret; /* flush the parsers */ for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser && st->need_parsing) parse_packet(s, NULL, st->index); } /* all remaining packets are now in parse_queue => * really terminate parsing */ break; } ret = 0; st = s->streams[cur_pkt.stream_index]; /* update context if required */ if (st->internal->need_context_update) { if (avcodec_is_open(st->internal->avctx)) { av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n"); avcodec_close(st->internal->avctx); st->info->found_decoder = 0; } ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar); if (ret < 0) return ret; #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS /* update deprecated public codec context */ ret = avcodec_parameters_to_context(st->codec, st->codecpar); if (ret < 0) return ret; FF_ENABLE_DEPRECATION_WARNINGS #endif st->internal->need_context_update = 0; } if (cur_pkt.pts != AV_NOPTS_VALUE && cur_pkt.dts != AV_NOPTS_VALUE && cur_pkt.pts < cur_pkt.dts) { av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), cur_pkt.size); } if (s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), cur_pkt.size, cur_pkt.duration, cur_pkt.flags); if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { st->parser = av_parser_init(st->codecpar->codec_id); if (!st->parser) { av_log(s, AV_LOG_VERBOSE, "parser not found for codec " "%s, packets or times may be invalid.\n", avcodec_get_name(st->codecpar->codec_id)); /* no parser available: just output the raw packets */ st->need_parsing = AVSTREAM_PARSE_NONE; } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS) st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) st->parser->flags |= PARSER_FLAG_ONCE; else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) st->parser->flags |= PARSER_FLAG_USE_CODEC_TS; } if (!st->need_parsing || !st->parser) { /* no parsing needed: we just output the packet as is */ *pkt = cur_pkt; compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE); if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { ff_reduce_index(s, st->index); av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); } got_packet = 1; } else if (st->discard < AVDISCARD_ALL) { if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0) return ret; st->codecpar->sample_rate = st->internal->avctx->sample_rate; st->codecpar->bit_rate = st->internal->avctx->bit_rate; st->codecpar->channels = st->internal->avctx->channels; st->codecpar->channel_layout = st->internal->avctx->channel_layout; st->codecpar->codec_id = st->internal->avctx->codec_id; } else { /* free packet */ av_packet_unref(&cur_pkt); } if (pkt->flags & AV_PKT_FLAG_KEY) st->skip_to_keyframe = 0; if (st->skip_to_keyframe) { av_packet_unref(&cur_pkt); if (got_packet) { *pkt = cur_pkt; } got_packet = 0; } } if (!got_packet && s->internal->parse_queue) ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt); if (ret >= 0) { AVStream *st = s->streams[pkt->stream_index]; int discard_padding = 0; if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) { int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0); int64_t sample = ts_to_samples(st, pts); int duration = ts_to_samples(st, pkt->duration); int64_t end_sample = sample + duration; if (duration > 0 && end_sample >= st->first_discard_sample && sample < st->last_discard_sample) discard_padding = FFMIN(end_sample - st->first_discard_sample, duration); } if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE)) st->skip_samples = st->start_skip_samples; if (st->skip_samples || discard_padding) { uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10); if (p) { AV_WL32(p, st->skip_samples); AV_WL32(p + 4, discard_padding); av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding); } st->skip_samples = 0; } if (st->inject_global_side_data) { for (i = 0; i < st->nb_side_data; i++) { AVPacketSideData *src_sd = &st->side_data[i]; uint8_t *dst_data; if (av_packet_get_side_data(pkt, src_sd->type, NULL)) continue; dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size); if (!dst_data) { av_log(s, AV_LOG_WARNING, "Could not inject global side data\n"); continue; } memcpy(dst_data, src_sd->data, src_sd->size); } st->inject_global_side_data = 0; } if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA)) av_packet_merge_side_data(pkt); } av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata); if (metadata) { s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; av_dict_copy(&s->metadata, metadata, 0); av_dict_free(&metadata); av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN); } #if FF_API_LAVF_AVCTX update_stream_avctx(s); #endif if (s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, " "size=%d, duration=%"PRId64", flags=%d\n", pkt->stream_index, av_ts2str(pkt->pts), av_ts2str(pkt->dts), pkt->size, pkt->duration, pkt->flags); return ret; }
讀取packet數據ui
int ff_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, i, err; AVStream *st; for (;;) { AVPacketList *pktl = s->internal->raw_packet_buffer; if (pktl) { *pkt = pktl->pkt; st = s->streams[pkt->stream_index]; if (s->internal->raw_packet_buffer_remaining_size <= 0) if ((err = probe_codec(s, st, NULL)) < 0) return err; if (st->request_probe <= 0) { s->internal->raw_packet_buffer = pktl->next; s->internal->raw_packet_buffer_remaining_size += pkt->size; av_free(pktl); return 0; } } pkt->data = NULL; pkt->size = 0; av_init_packet(pkt); ret = s->iformat->read_packet(s, pkt); if (ret < 0) { /* Some demuxers return FFERROR_REDO when they consume data and discard it (ignored streams, junk, extradata). We must re-call the demuxer to get the real packet. */ if (ret == FFERROR_REDO) continue; if (!pktl || ret == AVERROR(EAGAIN)) return ret; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->probe_packets || st->request_probe > 0) if ((err = probe_codec(s, st, NULL)) < 0) return err; av_assert0(st->request_probe <= 0); } continue; } if (!pkt->buf) { AVPacket tmp = { 0 }; ret = av_packet_ref(&tmp, pkt); if (ret < 0) return ret; *pkt = tmp; } if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) && (pkt->flags & AV_PKT_FLAG_CORRUPT)) { av_log(s, AV_LOG_WARNING, "Dropped corrupted packet (stream = %d)\n", pkt->stream_index); av_packet_unref(pkt); continue; } if (pkt->stream_index >= (unsigned)s->nb_streams) { av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index); continue; } st = s->streams[pkt->stream_index]; if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) { // correct first time stamps to negative values if (!is_relative(st->first_dts)) st->first_dts = wrap_timestamp(st, st->first_dts); if (!is_relative(st->start_time)) st->start_time = wrap_timestamp(st, st->start_time); if (!is_relative(st->cur_dts)) st->cur_dts = wrap_timestamp(st, st->cur_dts); } pkt->dts = wrap_timestamp(st, pkt->dts); pkt->pts = wrap_timestamp(st, pkt->pts); force_codec_ids(s, st); /* TODO: audio: time filter; video: frame reordering (pts != dts) */ if (s->use_wallclock_as_timestamps) pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base); if (!pktl && st->request_probe <= 0) return ret; err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt, &s->internal->raw_packet_buffer_end, 0); if (err) return err; s->internal->raw_packet_buffer_remaining_size -= pkt->size; if ((err = probe_codec(s, st, pkt)) < 0) return err; } }
其中s->iformat->read_packet(s, pkt)回調函數flv_read_packet(AVFormatContext *s, AVPacket *pkt)this
函數avformat_find_stream_info()spa
/* check if one codec still needs to be handled */ for (i = 0; i < ic->nb_streams; i++) { int fps_analyze_framecount = 20; st = ic->streams[i]; if (!has_codec_parameters(st, NULL)) break; /* If the timebase is coarse (like the usual millisecond precision * of mkv), we need to analyze more frames to reliably arrive at * the correct fps. */ if (av_q2d(st->time_base) > 0.0005) fps_analyze_framecount *= 2; if (!tb_unreliable(st->internal->avctx)) fps_analyze_framecount = 0; if (ic->fps_probe_size >= 0) fps_analyze_framecount = ic->fps_probe_size; if (st->disposition & AV_DISPOSITION_ATTACHED_PIC) fps_analyze_framecount = 0; /* variable fps and no guess at the real fps */ if (!(st->r_frame_rate.num && st->avg_frame_rate.num) && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { int count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ? st->info->codec_info_duration_fields/2 : st->info->duration_count; if (count < fps_analyze_framecount) break; } if (st->parser && st->parser->parser->split && !st->internal->avctx->extradata) break; if (st->first_dts == AV_NOPTS_VALUE && !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) && st->codec_info_nb_frames < ((st->disposition & AV_DISPOSITION_ATTACHED_PIC) ? 1 : ic->max_ts_probe) && (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)) break; }
對於flv,time_base中分子是1,分母是1000,因此fps_analyze_framecount的值會是初始定義的兩倍。debug
由於st->r_frame_rate.num 和st->avg_frame_rate.num值均爲0,而且ic->iformat->flags爲0,因此會判斷st->info->duration_count和fps_analyze_framecount的值,而每解析一個視頻tag,st->info->duration_count的值加1.也就是說須要解析四十個視頻tag,才能跳出avformat_find_stream_info()函數的for循環。too long:)code