13 changed files with 297 additions and 44 deletions
			
			
		| @ -1,6 +1,6 @@ | |||
| --- a/libavformat/mpegts.c
 | |||
| +++ b/libavformat/mpegts.c
 | |||
| @@ -1014,10 +1014,12 @@ static int new_pes_packet(PESContext *pes, AVPacket *pkt)
 | |||
| @@ -1015,10 +1015,12 @@ static int new_pes_packet(PESContext *pes, AVPacket *pkt)
 | |||
|      pes->buffer = NULL; | |||
|      reset_pes_packet_state(pes); | |||
|   | |||
| @ -1,6 +1,6 @@ | |||
| --- a/libavformat/hls.c
 | |||
| +++ b/libavformat/hls.c
 | |||
| @@ -2080,8 +2080,10 @@
 | |||
| @@ -2140,8 +2140,10 @@
 | |||
|      HLSContext *c = s->priv_data; | |||
|      int ret, i, minplaylist = -1; | |||
|   | |||
| @ -1,6 +1,6 @@ | |||
| --- a/libavformat/mov.c
 | |||
| +++ b/libavformat/mov.c
 | |||
| @@ -3558,8 +3558,10 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
 | |||
| @@ -3615,8 +3615,10 @@
 | |||
|   | |||
|              if (ctts_data_old && ctts_index_old < ctts_count_old) { | |||
|                  curr_ctts = ctts_data_old[ctts_index_old].duration; | |||
| @ -0,0 +1,54 @@ | |||
| --- a/libavformat/rtsp.c | |||
| +++ b/libavformat/rtsp.c | |||
| @@ -2338,7 +2338,9 @@ | |||
|      RTSPStream *rtsp_st; | |||
|      int size, i, err; | |||
|      char *content; | |||
| +    const char *p, *sp="", *sources="", *sp2, *sources2; | |||
|      char url[1024]; | |||
| +    char sources_buf[1024]; | |||
|   | |||
|      if (!ff_network_init()) | |||
|          return AVERROR(EIO); | |||
| @@ -2364,6 +2366,16 @@ | |||
|      av_freep(&content); | |||
|      if (err) goto fail; | |||
|   | |||
| +    /* Search for sources= tag in original URL for rtp protocol only */ | |||
| +    if (strncmp(s->url, "rtp://", 6) == 0) { | |||
| +        p = strchr(s->url, '?'); | |||
| +        if (p && av_find_info_tag(sources_buf, sizeof(sources_buf), "sources", p)) { | |||
| +            /* av_log(s, AV_LOG_VERBOSE, "sdp_read_header found sources %s\n", sources_buf);  */ | |||
| +            sp = sources_buf; | |||
| +            sources = "&sources="; | |||
| +        } | |||
| +    } | |||
| + | |||
|      /* open each RTP stream */ | |||
|      for (i = 0; i < rt->nb_rtsp_streams; i++) { | |||
|          char namebuf[50]; | |||
| @@ -2381,12 +2393,22 @@ | |||
|                  av_dict_free(&opts); | |||
|                  goto fail; | |||
|              } | |||
| + | |||
| +            /* Prepare to add sources to the url to be opened. | |||
| +               Otherwise the join to the source specific muliticast will be missing */ | |||
| +            sources2 = sources; | |||
| +            sp2 = sp; | |||
| +            /* ignore sources from original URL, when sources are already set in rtsp_st */ | |||
| +            if (rtsp_st->nb_include_source_addrs > 0) | |||
| +                sources2 = sp2 = ""; | |||
| + | |||
|              ff_url_join(url, sizeof(url), "rtp", NULL, | |||
|                          namebuf, rtsp_st->sdp_port, | |||
| -                        "?localport=%d&ttl=%d&connect=%d&write_to_source=%d", | |||
| +                        "?localport=%d&ttl=%d&connect=%d&write_to_source=%d%s%s", | |||
|                          rtsp_st->sdp_port, rtsp_st->sdp_ttl, | |||
|                          rt->rtsp_flags & RTSP_FLAG_FILTER_SRC ? 1 : 0, | |||
| -                        rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 : 0); | |||
| +                        rt->rtsp_flags & RTSP_FLAG_RTCP_TO_SOURCE ? 1 : 0, | |||
| +                        sources2, sp2); | |||
|   | |||
|              append_source_addrs(url, sizeof(url), "sources", | |||
|                                  rtsp_st->nb_include_source_addrs, | |||
| @ -0,0 +1,108 @@ | |||
| --- a/libavcodec/dxva2.c | |||
| +++ b/libavcodec/dxva2.c | |||
| @@ -771,16 +771,18 @@ | |||
|  #if CONFIG_D3D11VA | |||
|      if (avctx->pix_fmt == AV_PIX_FMT_D3D11) | |||
|          return (intptr_t)frame->data[1]; | |||
| -    if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { | |||
| +    if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD && surface) { | |||
|          D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC viewDesc; | |||
|          ID3D11VideoDecoderOutputView_GetDesc((ID3D11VideoDecoderOutputView*) surface, &viewDesc); | |||
|          return viewDesc.Texture2D.ArraySlice; | |||
|      } | |||
|  #endif | |||
|  #if CONFIG_DXVA2 | |||
| -    for (i = 0; i < DXVA_CONTEXT_COUNT(avctx, ctx); i++) { | |||
| -        if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD && ctx->dxva2.surface[i] == surface) | |||
| -            return i; | |||
| +    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { | |||
| +        for (i = 0; i < DXVA_CONTEXT_COUNT(avctx, ctx); i++) { | |||
| +            if (ctx->dxva2.surface[i] == surface) | |||
| +                return i; | |||
| +        } | |||
|      } | |||
|  #endif | |||
|   | |||
| --- a/libavcodec/dxva2_h264.c | |||
| +++ b/libavcodec/dxva2_h264.c | |||
| @@ -504,6 +504,14 @@ | |||
|   | |||
|      if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0) | |||
|          return -1; | |||
| + | |||
| +    // Wait for an I-frame before start decoding. Workaround for ATI UVD and UVD+ GPUs | |||
| +    if (!h->got_first_iframe) { | |||
| +        if (!(ctx_pic->pp.wBitFields & (1 << 15))) | |||
| +            return -1; | |||
| +        h->got_first_iframe = 1; | |||
| +    } | |||
| + | |||
|      ret = ff_dxva2_common_end_frame(avctx, h->cur_pic_ptr->f, | |||
|                                      &ctx_pic->pp, sizeof(ctx_pic->pp), | |||
|                                      &ctx_pic->qm, sizeof(ctx_pic->qm), | |||
| --- a/libavcodec/h264_slice.c | |||
| +++ b/libavcodec/h264_slice.c | |||
| @@ -928,6 +928,7 @@ | |||
|   | |||
|      h->first_field           = 0; | |||
|      h->prev_interlaced_frame = 1; | |||
| +    h->got_first_iframe = 0; | |||
|   | |||
|      init_scan_tables(h); | |||
|      ret = ff_h264_alloc_tables(h); | |||
| --- a/libavcodec/h264dec.c | |||
| +++ b/libavcodec/h264dec.c | |||
| @@ -467,6 +467,7 @@ | |||
|   | |||
|      h->next_outputed_poc = INT_MIN; | |||
|      h->prev_interlaced_frame = 1; | |||
| +    h->got_first_iframe = 0; | |||
|      idr(h); | |||
|   | |||
|      h->poc.prev_frame_num = -1; | |||
| --- a/libavcodec/h264dec.h | |||
| +++ b/libavcodec/h264dec.h | |||
| @@ -539,6 +539,8 @@ | |||
|       * slices) anymore */ | |||
|      int setup_finished; | |||
|   | |||
| +    int got_first_iframe; | |||
| + | |||
|      int cur_chroma_format_idc; | |||
|      int cur_bit_depth_luma; | |||
|      int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low | |||
| --- a/libavcodec/vaapi_h264.c | |||
| +++ b/libavcodec/vaapi_h264.c | |||
| @@ -314,6 +314,11 @@ | |||
|      H264SliceContext *sl = &h->slice_ctx[0]; | |||
|      int ret; | |||
|   | |||
| +    if (pic->nb_slices == 0) { | |||
| +        ret = AVERROR_INVALIDDATA; | |||
| +        goto finish; | |||
| +    } | |||
| + | |||
|      ret = ff_vaapi_decode_issue(avctx, pic); | |||
|      if (ret < 0) | |||
|          goto finish; | |||
| --- a/libavformat/bintext.c | |||
| +++ b/libavformat/bintext.c | |||
| @@ -149,7 +149,7 @@ | |||
|              return AVPROBE_SCORE_EXTENSION + 1; | |||
|   | |||
|          predict_width(&par, p->buf_size, got_width); | |||
| -        if (par.width < 8) | |||
| +        if (par.width <= 0) | |||
|              return 0; | |||
|          calculate_height(&par, p->buf_size); | |||
|          if (par.height <= 0) | |||
| @@ -195,8 +195,6 @@ | |||
|              next_tag_read(s, &bin->fsize); | |||
|          if (!bin->width) { | |||
|              predict_width(st->codecpar, bin->fsize, got_width); | |||
| -            if (st->codecpar->width < 8) | |||
| -                return AVERROR_INVALIDDATA; | |||
|              calculate_height(st->codecpar, bin->fsize); | |||
|          } | |||
|          avio_seek(pb, 0, SEEK_SET); | |||
| 
 | |||
| @ -0,0 +1,14 @@ | |||
| --- a/libavcodec/h264_slice.c
 | |||
| +++ b/libavcodec/h264_slice.c
 | |||
| @@ -1460,6 +1460,11 @@
 | |||
|          h->avctx->has_b_frames = sps->num_reorder_frames; | |||
|      } | |||
|   | |||
| +    if (sps && sps->bitstream_restriction_flag &&
 | |||
| +        h->avctx->has_b_frames < sps->num_reorder_frames) {
 | |||
| +        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
 | |||
| +    }
 | |||
| +
 | |||
|      last_pic_droppable   = h->droppable; | |||
|      last_pic_structure   = h->picture_structure; | |||
|      h->droppable         = (nal->ref_idc == 0); | |||
| @ -0,0 +1,78 @@ | |||
| From patchwork Mon Jan 14 19:02:20 2019 | |||
| Content-Type: text/plain; charset="utf-8" | |||
| MIME-Version: 1.0 | |||
| Content-Transfer-Encoding: 7bit | |||
| Subject: [FFmpeg-devel] amfenc: Add support for pict_type field | |||
| From: Michael Fabian 'Xaymar' Dirks <info@xaymar.com> | |||
| X-Patchwork-Id: 11748 | |||
| Message-Id: <20190114190220.16236-1-info@xaymar.com> | |||
| To: ffmpeg-devel@ffmpeg.org | |||
| Cc: Michael Fabian 'Xaymar' Dirks <info@xaymar.com> | |||
| Date: Mon, 14 Jan 2019 20:02:20 +0100 | |||
| 
 | |||
| Adds support for the pict_type field in AVFrame to amf_h264 and amf_h265 simultaneously. This field is needed in cases where the application wishes to override the frame type with another one, such as forcefully inserting a key frame for chapter markers or similar. | |||
| 
 | |||
| Additionally this abuses AV_PICTURE_TYPE_S for marking Skip frames, a special type of frame in AVC, SVC and HEVC which is a flag for the decoder to repeat the last frame. | |||
| 
 | |||
| Signed-off-by: Michael Fabian 'Xaymar' Dirks <info@xaymar.com> | |||
| ---
 | |||
|  libavcodec/amfenc.c | 46 +++++++++++++++++++++++++++++++++++++++++++++ | |||
|  1 file changed, 46 insertions(+) | |||
| 
 | |||
| diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
 | |||
| index 384d8efc92..eb4b65e4f2 100644
 | |||
| --- a/libavcodec/amfenc.c
 | |||
| +++ b/libavcodec/amfenc.c
 | |||
| @@ -693,6 +693,52 @@ int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
 | |||
|              break; | |||
|          } | |||
| 
 | |||
| +        // Override Picture Type for Frame
 | |||
| +        if (avctx->codec->id == AV_CODEC_ID_H264) {
 | |||
| +            switch (frame->pict_type) {
 | |||
| +            case AV_PICTURE_TYPE_I:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_I);
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_P:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_P);
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_B:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_B);
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_S:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_SKIP);
 | |||
| +                break;
 | |||
| +            default:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_NONE);
 | |||
| +                break;
 | |||
| +            }
 | |||
| +            // Keyframe overrides previous assignment.
 | |||
| +            if (frame->key_frame) {
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_PICTURE_TYPE_IDR);
 | |||
| +            }
 | |||
| +        } else if (avctx->codec->id == AV_CODEC_ID_HEVC) {
 | |||
| +            switch (frame->pict_type) {
 | |||
| +            case AV_PICTURE_TYPE_I:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_I);
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_P:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_P);
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_B:
 | |||
| +                av_log(ctx, AV_LOG_WARNING, "Ignoring B-Frame, unsupported by AMD AMF H.265 Encoder.");
 | |||
| +                break;
 | |||
| +            case AV_PICTURE_TYPE_S:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_SKIP);
 | |||
| +                break;
 | |||
| +            default:
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_NONE);
 | |||
| +                break;
 | |||
| +            }
 | |||
| +            // Keyframe overrides previous assignment.
 | |||
| +            if (frame->key_frame) {
 | |||
| +                AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE, AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_IDR);
 | |||
| +            }
 | |||
| +        }
 | |||
| 
 | |||
|          // submit surface | |||
|          res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); | |||
					Loading…
					
					
				
		Reference in new issue