MythTV  master
mythavutil.cpp
Go to the documentation of this file.
1 //
2 // mythavutil.cpp
3 // MythTV
4 //
5 // Created by Jean-Yves Avenard on 28/06/2014.
6 // Copyright (c) 2014 Bubblestuff Pty Ltd. All rights reserved.
7 //
8 
9 #include "mythframe.h"
10 #include "mythavutil.h"
11 #include "mythcorecontext.h"
12 #include "mythconfig.h"
13 extern "C" {
14 #include "libswscale/swscale.h"
15 #include "libavfilter/avfilter.h"
16 #include "libavcodec/avcodec.h"
17 #include "libavfilter/buffersrc.h"
18 #include "libavfilter/buffersink.h"
19 #include "libavutil/imgutils.h"
20 #include "libavformat/avformat.h"
21 }
22 #include <QMutexLocker>
23 
25 {
26  switch (type)
27  {
28  case FMT_YV12: return AV_PIX_FMT_YUV420P;
29  case FMT_YUV420P9: return AV_PIX_FMT_YUV420P9;
30  case FMT_YUV420P10: return AV_PIX_FMT_YUV420P10;
31  case FMT_YUV420P12: return AV_PIX_FMT_YUV420P12;
32  case FMT_YUV420P14: return AV_PIX_FMT_YUV420P14;
33  case FMT_YUV420P16: return AV_PIX_FMT_YUV420P16;
34  case FMT_NV12: return AV_PIX_FMT_NV12;
35  case FMT_P010: return AV_PIX_FMT_P010;
36  case FMT_P016: return AV_PIX_FMT_P016;
37  case FMT_YUV422P: return AV_PIX_FMT_YUV422P;
38  case FMT_YUV422P9: return AV_PIX_FMT_YUV422P9;
39  case FMT_YUV422P10: return AV_PIX_FMT_YUV422P10;
40  case FMT_YUV422P12: return AV_PIX_FMT_YUV422P12;
41  case FMT_YUV422P14: return AV_PIX_FMT_YUV422P14;
42  case FMT_YUV422P16: return AV_PIX_FMT_YUV422P16;
43  case FMT_YUV444P: return AV_PIX_FMT_YUV444P;
44  case FMT_YUV444P9: return AV_PIX_FMT_YUV444P9;
45  case FMT_YUV444P10: return AV_PIX_FMT_YUV444P10;
46  case FMT_YUV444P12: return AV_PIX_FMT_YUV444P12;
47  case FMT_YUV444P14: return AV_PIX_FMT_YUV444P14;
48  case FMT_YUV444P16: return AV_PIX_FMT_YUV444P16;
49  case FMT_RGB24: return AV_PIX_FMT_RGB24;
50  case FMT_BGRA: return AV_PIX_FMT_BGRA; // NOLINT(bugprone-branch-clone)
51  case FMT_RGB32: return AV_PIX_FMT_RGB32;
52  case FMT_ARGB32: return AV_PIX_FMT_ARGB;
53  case FMT_RGBA32: return AV_PIX_FMT_RGBA;
54  case FMT_YUY2: return AV_PIX_FMT_UYVY422;
55  case FMT_VDPAU: return AV_PIX_FMT_VDPAU;
56  case FMT_VTB: return AV_PIX_FMT_VIDEOTOOLBOX;
57  case FMT_VAAPI: return AV_PIX_FMT_VAAPI;
58  case FMT_MEDIACODEC: return AV_PIX_FMT_MEDIACODEC;
59  case FMT_NVDEC: return AV_PIX_FMT_CUDA;
60  case FMT_DXVA2: return AV_PIX_FMT_DXVA2_VLD;
61  case FMT_MMAL: return AV_PIX_FMT_MMAL;
62  case FMT_DRMPRIME: return AV_PIX_FMT_DRM_PRIME;
63  case FMT_NONE: break;
64  }
65  return AV_PIX_FMT_NONE;
66 }
67 
69 {
70  switch (fmt)
71  {
72  case AV_PIX_FMT_YUVJ420P:
73  case AV_PIX_FMT_YUV420P: return FMT_YV12;
74  case AV_PIX_FMT_YUV420P9: return FMT_YUV420P9;
75  case AV_PIX_FMT_YUV420P10: return FMT_YUV420P10;
76  case AV_PIX_FMT_YUV420P12: return FMT_YUV420P12;
77  case AV_PIX_FMT_YUV420P14: return FMT_YUV420P14;
78  case AV_PIX_FMT_YUV420P16: return FMT_YUV420P16;
79  case AV_PIX_FMT_NV12: return FMT_NV12;
80  case AV_PIX_FMT_P010: return FMT_P010;
81  case AV_PIX_FMT_P016: return FMT_P016;
82  case AV_PIX_FMT_YUVJ422P:
83  case AV_PIX_FMT_YUV422P: return FMT_YUV422P;
84  case AV_PIX_FMT_YUV422P9: return FMT_YUV422P9;
85  case AV_PIX_FMT_YUV422P10: return FMT_YUV422P10;
86  case AV_PIX_FMT_YUV422P12: return FMT_YUV422P12;
87  case AV_PIX_FMT_YUV422P14: return FMT_YUV422P14;
88  case AV_PIX_FMT_YUV422P16: return FMT_YUV422P16;
89  case AV_PIX_FMT_YUVJ444P:
90  case AV_PIX_FMT_YUV444P: return FMT_YUV444P;
91  case AV_PIX_FMT_YUV444P9: return FMT_YUV444P9;
92  case AV_PIX_FMT_YUV444P10: return FMT_YUV444P10;
93  case AV_PIX_FMT_YUV444P12: return FMT_YUV444P12;
94  case AV_PIX_FMT_YUV444P14: return FMT_YUV444P14;
95  case AV_PIX_FMT_YUV444P16: return FMT_YUV444P16;
96  case AV_PIX_FMT_UYVY422: return FMT_YUY2;
97  case AV_PIX_FMT_RGB24: return FMT_RGB24;
98  case AV_PIX_FMT_ARGB: return FMT_ARGB32;
99  case AV_PIX_FMT_RGBA: return FMT_RGBA32;
100  case AV_PIX_FMT_BGRA: return FMT_BGRA;
101  case AV_PIX_FMT_CUDA: return FMT_NVDEC;
102  case AV_PIX_FMT_MMAL: return FMT_MMAL;
103  case AV_PIX_FMT_VDPAU: return FMT_VDPAU;
104  case AV_PIX_FMT_VIDEOTOOLBOX: return FMT_VTB;
105  case AV_PIX_FMT_VAAPI: return FMT_VAAPI;
106  case AV_PIX_FMT_DXVA2_VLD: return FMT_DXVA2;
107  case AV_PIX_FMT_MEDIACODEC: return FMT_MEDIACODEC;
108  case AV_PIX_FMT_DRM_PRIME: return FMT_DRMPRIME;
109  default: break;
110  }
111  return FMT_NONE;
112 }
113 
114 QString DeinterlacerName(MythDeintType Deint, bool DoubleRate, VideoFrameType Format)
115 {
116  MythDeintType deint = GetDeinterlacer(Deint);
117  QString result = DoubleRate ? "2x " : "";
118  if (Deint & DEINT_CPU)
119  {
120  result += "CPU ";
121  switch (deint)
122  {
123  case DEINT_HIGH: return result + "bwdif";
124  case DEINT_MEDIUM: return result + "yadif";
125  case DEINT_BASIC: return result + "onefield";
126  default: break;
127  }
128  }
129  else if (Deint & DEINT_SHADER)
130  {
131  result += "GLSL ";
132  switch (deint)
133  {
134  case DEINT_HIGH: return result + "Kernel";
135  case DEINT_MEDIUM: return result + "Linearblend";
136  case DEINT_BASIC: return result + "Onefield";
137  default: break;
138  }
139  }
140  else if (Deint & DEINT_DRIVER)
141  {
142  switch (Format)
143  {
144  case FMT_MEDIACODEC: return "MediaCodec";
145  case FMT_DRMPRIME: return result + "EGL Onefield";
146  case FMT_VDPAU:
147  result += "VDPAU ";
148  switch (deint)
149  {
150  case DEINT_HIGH: return result + "Advanced";
151  case DEINT_MEDIUM: return result + "Temporal";
152  case DEINT_BASIC: return result + "Basic";
153  default: break;
154  }
155  break;
156  case FMT_NVDEC:
157  result += "NVDec ";
158  switch (deint)
159  {
160  case DEINT_HIGH:
161  case DEINT_MEDIUM: return result + "Adaptive";
162  case DEINT_BASIC: return result + "Basic";
163  default: break;
164  }
165  break;
166  case FMT_VAAPI:
167  result += "VAAPI ";
168  switch (deint)
169  {
170  case DEINT_HIGH: return result + "Compensated";
171  case DEINT_MEDIUM: return result + "Adaptive";
172  case DEINT_BASIC: return result + "Basic";
173  default: break;
174  }
175  break;
176  default: break;
177  }
178  }
179  return "None";
180 }
181 
183 {
184  if (DEINT_NONE == Deint)
185  return QString("None");
186  QString result;
187  if (Deint & DEINT_BASIC) result = "Basic";
188  else if (Deint & DEINT_MEDIUM) result = "Medium";
189  else if (Deint & DEINT_HIGH) result = "High";
190  if (Deint & DEINT_CPU) result += "|CPU";
191  if (Deint & DEINT_SHADER) result += "|GLSL";
192  if (Deint & DEINT_DRIVER) result += "|DRIVER";
193  return result;
194 }
195 
196 int AVPictureFill(AVFrame *pic, const VideoFrame *frame, AVPixelFormat fmt)
197 {
198  if (fmt == AV_PIX_FMT_NONE)
199  {
200  fmt = FrameTypeToPixelFormat(frame->codec);
201  }
202 
203  av_image_fill_arrays(pic->data, pic->linesize, frame->buf,
204  fmt, frame->width, frame->height, IMAGE_ALIGN);
205  pic->data[1] = frame->buf + frame->offsets[1];
206  pic->data[2] = frame->buf + frame->offsets[2];
207  pic->linesize[0] = frame->pitches[0];
208  pic->linesize[1] = frame->pitches[1];
209  pic->linesize[2] = frame->pitches[2];
210  return static_cast<int>(GetBufferSize(frame->codec, frame->width, frame->height));
211 }
212 
214 {
215 public:
216  explicit MythAVCopyPrivate(bool uswc)
217  : m_swsctx(nullptr), m_copyctx(new MythUSWCCopy(4096, !uswc)),
218  m_width(0), m_height(0), m_size(0), m_format(AV_PIX_FMT_NONE)
219  {
220  }
221 
223  {
224  if (m_swsctx)
225  {
226  sws_freeContext(m_swsctx);
227  }
228  delete m_copyctx;
229  }
230 
231  MythAVCopyPrivate(const MythAVCopyPrivate &) = delete; // not copyable
232  MythAVCopyPrivate &operator=(const MythAVCopyPrivate &) = delete; // not copyable
233 
234  int SizeData(int _width, int _height, AVPixelFormat _fmt)
235  {
236  if (_width == m_width && _height == m_height && _fmt == m_format)
237  {
238  return m_size;
239  }
240  m_size = av_image_get_buffer_size(_fmt, _width, _height, IMAGE_ALIGN);
241  m_width = _width;
242  m_height = _height;
243  m_format = _fmt;
244  return m_size;
245  }
246 
247  SwsContext *m_swsctx;
249  int m_width;
250  int m_height;
251  int m_size;
252  AVPixelFormat m_format;
253 };
254 
256 {
257 }
258 
260 {
261  delete d;
262 }
263 
264 void MythAVCopy::FillFrame(VideoFrame *frame, const AVFrame *pic, int pitch,
265  int width, int height, AVPixelFormat pix_fmt)
266 {
267  int size = av_image_get_buffer_size(pix_fmt, width, height, IMAGE_ALIGN);
268 
269  if (pix_fmt == AV_PIX_FMT_YUV420P)
270  {
271  int chroma_pitch = pitch >> 1;
272  int chroma_height = height >> 1;
273  int offsets[3] =
274  { 0,
275  pitch * height,
276  pitch * height + chroma_pitch * chroma_height };
277  int pitches[3] = { pitch, chroma_pitch, chroma_pitch };
278 
279  init(frame, FMT_YV12, pic->data[0], width, height, size, pitches, offsets);
280  }
281  else if (pix_fmt == AV_PIX_FMT_NV12)
282  {
283  int offsets[3] = { 0, pitch * height, 0 };
284  int pitches[3] = { pitch, pitch, 0 };
285 
286  init(frame, FMT_NV12, pic->data[0], width, height, size, pitches, offsets);
287  }
288 }
289 
290 int MythAVCopy::Copy(AVFrame *dst, AVPixelFormat dst_pix_fmt,
291  const AVFrame *src, AVPixelFormat pix_fmt,
292  int width, int height)
293 {
294  if ((pix_fmt == AV_PIX_FMT_YUV420P || pix_fmt == AV_PIX_FMT_NV12) &&
295  (dst_pix_fmt == AV_PIX_FMT_YUV420P))
296  {
297  VideoFrame framein, frameout;
298 
299  FillFrame(&framein, src, width, width, height, pix_fmt);
300  FillFrame(&frameout, dst, width, width, height, dst_pix_fmt);
301 
302  d->m_copyctx->copy(&frameout, &framein);
303  return frameout.size;
304  }
305 
306  int new_width = width;
307 #if ARCH_ARM
308  // The ARM build of FFMPEG has a bug that if sws_scale is
309  // called with source and dest sizes the same, and
310  // formats as shown below, it causes a bus error and the
311  // application core dumps. To avoid this I make a -1
312  // difference in the new width, causing it to bypass
313  // the code optimization which is failing.
314  if (pix_fmt == AV_PIX_FMT_YUV420P
315  && dst_pix_fmt == AV_PIX_FMT_BGRA)
316  new_width = width - 1;
317 #endif
318  d->m_swsctx = sws_getCachedContext(d->m_swsctx, width, height, pix_fmt,
319  new_width, height, dst_pix_fmt,
320  SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
321  if (d->m_swsctx == nullptr)
322  {
323  return -1;
324  }
325 
326  sws_scale(d->m_swsctx, src->data, src->linesize,
327  0, height, dst->data, dst->linesize);
328 
329  return d->SizeData(width, height, dst_pix_fmt);
330 }
331 
333 {
334  if ((src->codec == FMT_YV12 || src->codec == FMT_NV12) &&
335  (dst->codec == FMT_YV12))
336  {
337  d->m_copyctx->copy(dst, src);
338  return dst->size;
339  }
340 
341  AVFrame srcpic, dstpic;
342 
343  AVPictureFill(&srcpic, src);
344  AVPictureFill(&dstpic, dst);
345 
346  return Copy(&dstpic, FrameTypeToPixelFormat(dst->codec),
347  &srcpic, FrameTypeToPixelFormat(src->codec),
348  src->width, src->height);
349 }
350 
351 int MythAVCopy::Copy(AVFrame *pic, const VideoFrame *frame,
352  unsigned char *buffer, AVPixelFormat fmt)
353 {
355  unsigned char *sbuf = buffer ? buffer : CreateBuffer(type, frame->width, frame->height);
356 
357  if (!sbuf)
358  {
359  return 0;
360  }
361 
362  AVFrame pic_in;
363  AVPixelFormat fmt_in = FrameTypeToPixelFormat(frame->codec);
364 
365  AVPictureFill(&pic_in, frame, fmt_in);
366  av_image_fill_arrays(pic->data, pic->linesize, sbuf, fmt, frame->width, frame->height, IMAGE_ALIGN);
367  return Copy(pic, fmt, &pic_in, fmt_in, frame->width, frame->height);
368 }
369 
370 int MythAVCopy::Copy(VideoFrame *frame, const AVFrame *pic, AVPixelFormat fmt)
371 {
372  if (fmt == AV_PIX_FMT_NV12 || AV_PIX_FMT_YUV420P)
373  {
374  VideoFrame framein;
375  FillFrame(&framein, pic, frame->width, frame->width, frame->height, fmt);
376  return Copy(frame, &framein);
377  }
378 
379  AVFrame frame_out;
380  AVPixelFormat fmt_out = FrameTypeToPixelFormat(frame->codec);
381 
382  AVPictureFill(&frame_out, frame, fmt_out);
383  return Copy(&frame_out, fmt_out, pic, fmt, frame->width, frame->height);
384 }
385 
387  int width, int height, float ar)
388  : m_filter_graph(nullptr)
389  , m_buffersink_ctx(nullptr)
390  , m_buffersrc_ctx(nullptr)
391  , m_pixfmt(pixfmt)
392  , m_width(width)
393  , m_height(height)
394  , m_ar(ar)
395  , m_errored(false)
396 {
397  if (Flush() < 0)
398  {
399  m_errored = true;
400  }
401 }
402 
404 {
405  if (m_errored)
406  {
407  return -1;
408  }
409  if (src)
410  {
411  memcpy(m_filter_frame->data, src->data, sizeof(src->data));
412  memcpy(m_filter_frame->linesize, src->linesize, sizeof(src->linesize));
413  m_filter_frame->width = m_width;
414  m_filter_frame->height = m_height;
415  m_filter_frame->format = m_pixfmt;
416  }
417  int res = av_buffersrc_add_frame(m_buffersrc_ctx, m_filter_frame);
418  if (res < 0)
419  {
420  return res;
421  }
422  res = av_buffersink_get_frame(m_buffersink_ctx, m_filter_frame);
423  if (res < 0)
424  {
425  return res;
426  }
427 
428  av_image_copy(dst->data, dst->linesize,
429  (const uint8_t **)((AVFrame*)m_filter_frame)->data,
430  (const int*)((AVFrame*)m_filter_frame)->linesize,
432 
433  av_frame_unref(m_filter_frame);
434 
435  return 0;
436 }
437 
439 {
440  if (m_errored)
441  {
442  return -1;
443  }
444  if (!m_filter_graph && Flush() < 0)
445  {
446  return -1;
447  }
448  int res = Deinterlace(dst, src);
449  if (res == AVERROR(EAGAIN))
450  {
451  res = Deinterlace(dst, nullptr);
452  // We have drained the filter, we need to recreate it on the next run.
453  avfilter_graph_free(&m_filter_graph);
454  }
455  return res;
456 }
457 
459 {
460  if (m_filter_graph)
461  {
462  avfilter_graph_free(&m_filter_graph);
463  }
464 
465  m_filter_graph = avfilter_graph_alloc();
466  if (!m_filter_graph)
467  {
468  return -1;
469  }
470 
471  AVFilterInOut *inputs = nullptr, *outputs = nullptr;
472  AVRational ar = av_d2q(m_ar, 100000);
473  QString args = QString("buffer=video_size=%1x%2:pix_fmt=%3:time_base=1/1:pixel_aspect=%4/%5[in];"
474  "[in]yadif[out];[out] buffersink")
475  .arg(m_width).arg(m_height).arg(m_pixfmt).arg(ar.num).arg(ar.den);
476  int res = avfilter_graph_parse2(m_filter_graph, args.toLatin1().data(), &inputs, &outputs);
477  while (true)
478  {
479  if (res < 0 || inputs || outputs)
480  {
481  break;
482  }
483  res = avfilter_graph_config(m_filter_graph, nullptr);
484  if (res < 0)
485  {
486  break;
487  }
488  if (!(m_buffersrc_ctx = avfilter_graph_get_filter(m_filter_graph, "Parsed_buffer_0")))
489  {
490  break;
491  }
492  if (!(m_buffersink_ctx = avfilter_graph_get_filter(m_filter_graph, "Parsed_buffersink_2")))
493  {
494  break;
495  }
496  return 0;
497  }
498  avfilter_inout_free(&inputs);
499  avfilter_inout_free(&outputs);
500  return -1;
501 }
502 
504 {
505  if (m_filter_graph)
506  {
507  avfilter_graph_free(&m_filter_graph);
508  }
509 }
510 
511 
513 
514 MythCodecMap::MythCodecMap() : mapLock(QMutex::Recursive)
515 {
516 }
517 
519 {
521 }
522 
523 AVCodecContext *MythCodecMap::getCodecContext(const AVStream *stream,
524  const AVCodec *pCodec, bool nullCodec)
525 {
526  QMutexLocker lock(&mapLock);
527  AVCodecContext *avctx = streamMap.value(stream, nullptr);
528  if (!avctx)
529  {
530  if (stream == nullptr || stream->codecpar == nullptr)
531  return nullptr;
532  if (nullCodec)
533  pCodec = nullptr;
534  else
535  {
536  if (!pCodec)
537  pCodec = avcodec_find_decoder(stream->codecpar->codec_id);
538  if (!pCodec)
539  {
540  LOG(VB_GENERAL, LOG_WARNING,
541  QString("avcodec_find_decoder fail for %1").arg(stream->codecpar->codec_id));
542  return nullptr;
543  }
544  }
545  avctx = avcodec_alloc_context3(pCodec);
546  if (avcodec_parameters_to_context(avctx, stream->codecpar) < 0)
547  avcodec_free_context(&avctx);
548  if (avctx)
549  {
550  avctx->pkt_timebase = stream->time_base;
551  streamMap.insert(stream, avctx);
552  }
553  }
554  return avctx;
555 }
556 
557 AVCodecContext *MythCodecMap::hasCodecContext(const AVStream *stream)
558 {
559  return streamMap.value(stream, nullptr);
560 }
561 
565 void MythCodecMap::freeCodecContext(const AVStream *stream)
566 {
567  QMutexLocker lock(&mapLock);
568  AVCodecContext *avctx = streamMap.take(stream);
569  if (avctx)
570  {
571  if (avctx->internal)
572  avcodec_flush_buffers(avctx);
573  avcodec_free_context(&avctx);
574  }
575 }
576 
578 {
579  QMutexLocker lock(&mapLock);
580  QMap<const AVStream*, AVCodecContext*>::iterator i = streamMap.begin();
581  while (i != streamMap.end()) {
582  const AVStream *stream = i.key();
583  ++i;
584  freeCodecContext(stream);
585  }
586 }
QMutex mapLock
Definition: mythavutil.h:101
int pitches[3]
Y, U, & V pitches.
Definition: mythframe.h:159
unsigned char * buf
Definition: mythframe.h:139
int Copy(VideoFrame *dst, const VideoFrame *src)
Definition: mythavutil.cpp:332
virtual ~MythAVCopy()
Definition: mythavutil.cpp:259
int SizeData(int _width, int _height, AVPixelFormat _fmt)
Definition: mythavutil.cpp:234
VideoFrameType codec
Definition: mythframe.h:138
endian dependent format, ARGB or BGRA
Definition: mythframe.h:36
void freeCodecContext(const AVStream *)
Definition: mythavutil.cpp:565
struct AVFrame AVFrame
AVPixelFormat FrameTypeToPixelFormat(VideoFrameType type)
Convert VideoFrameType into FFmpeg's PixelFormat equivalent and vice-versa.
Definition: mythavutil.cpp:24
VideoFrameType PixelFormatToFrameType(AVPixelFormat fmt)
Definition: mythavutil.cpp:68
VideoFrameType
Definition: mythframe.h:23
static void FillFrame(VideoFrame *frame, const AVFrame *pic, int pitch, int width, int height, AVPixelFormat pix_fmt)
Definition: mythavutil.cpp:264
MythAVFrame m_filter_frame
Definition: mythavutil.h:192
MythDeintType
Definition: mythframe.h:120
QString DeinterlacerName(MythDeintType Deint, bool DoubleRate, VideoFrameType Format)
Return a user friendly description of the given deinterlacer.
Definition: mythavutil.cpp:114
AVFilterContext * m_buffersrc_ctx
Definition: mythavutil.h:194
MythAVCopyPrivate & operator=(const MythAVCopyPrivate &)=delete
int AVPictureFill(AVFrame *pic, const VideoFrame *frame, AVPixelFormat fmt)
AVPictureFill Initialise AVFrame pic with content from VideoFrame frame.
Definition: mythavutil.cpp:196
VERBOSE_PREAMBLE false
Definition: verbosedefs.h:85
AVFilterContext * m_buffersink_ctx
Definition: mythavutil.h:193
MythCodecMap * gCodecMap
This global variable contains the MythCodecMap instance for the app.
Definition: mythavutil.cpp:512
void copy(VideoFrame *dst, const VideoFrame *src)
Definition: mythframe.cpp:628
MythAVCopyPrivate(bool uswc)
Definition: mythavutil.cpp:216
static const uint16_t * d
int DeinterlaceSingle(AVFrame *dst, const AVFrame *src)
Definition: mythavutil.cpp:438
MythAVCopyPrivate * d
Definition: mythavutil.h:148
QString DeinterlacerPref(MythDeintType Deint)
Definition: mythavutil.cpp:182
SwsContext * m_swsctx
Definition: mythavutil.cpp:247
static unsigned char * CreateBuffer(VideoFrameType Type, int Width, int Height)
Definition: mythframe.h:686
AVCodecContext * getCodecContext(const AVStream *, const AVCodec *pCodec=nullptr, bool nullCodec=false)
Definition: mythavutil.cpp:523
int Deinterlace(AVFrame *dst, const AVFrame *src)
Definition: mythavutil.cpp:403
static void init(VideoFrame *vf, VideoFrameType _codec, unsigned char *_buf, int _width, int _height, int _size, const int *p=nullptr, const int *o=nullptr, float _aspect=-1.0F, double _rate=-1.0F, int _aligned=MYTH_WIDTH_ALIGNMENT)
Definition: mythframe.h:230
AVFilterGraph * m_filter_graph
Definition: mythavutil.h:191
MythPictureDeinterlacer(AVPixelFormat pixfmt, int width, int height, float ar=1.0F)
Definition: mythavutil.cpp:386
MythUSWCCopy * m_copyctx
Definition: mythavutil.cpp:248
#define LOG(_MASK_, _LEVEL_, _STRING_)
Definition: mythlogging.h:41
AVPixelFormat m_format
Definition: mythavutil.cpp:252
int offsets[3]
Y, U, & V offsets.
Definition: mythframe.h:160
AVPixelFormat m_pixfmt
Definition: mythavutil.h:195
int height
Definition: mythframe.h:142
void freeAllCodecContexts()
Definition: mythavutil.cpp:577
MythAVCopy(bool USWC=true)
Definition: mythavutil.cpp:255
static size_t GetBufferSize(VideoFrameType Type, int Width, int Height, int Aligned=MYTH_WIDTH_ALIGNMENT)
Definition: mythframe.h:655
MythDeintType GetDeinterlacer(MythDeintType Option)
Definition: mythframe.cpp:866
MythCodecMap Utility class that keeps pointers to an AVStream and its AVCodecContext.
Definition: mythavutil.h:88
QMap< const AVStream *, AVCodecContext * > streamMap
Definition: mythavutil.h:100
AVCodecContext * hasCodecContext(const AVStream *)
Definition: mythavutil.cpp:557