Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
SkVideoDecoder.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
15
16static SkYUVColorSpace get_yuvspace(AVColorSpace space) {
17 // this is pretty incomplete -- TODO: look to convert more AVColorSpaces
18 switch (space) {
19 case AVCOL_SPC_RGB: return kIdentity_SkYUVColorSpace;
20 case AVCOL_SPC_BT709: return kRec709_SkYUVColorSpace;
21 case AVCOL_SPC_SMPTE170M:
22 case AVCOL_SPC_SMPTE240M:
23 case AVCOL_SPC_BT470BG: return kRec601_SkYUVColorSpace;
24 default: break;
25 }
27}
28
30 // if x < beta delta * x
31 // else alpha * (x^gama)
33};
34
35// Tables extracted from vf_colorspace.c
36
38 [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
39 [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
40 [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
41 [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
42 [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
43 [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
44 [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
45 [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
46 [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
47};
48
49static skcms_TransferFunction compute_transfer(AVColorTransferCharacteristic t) {
50 const av_transfer_characteristics* av = &gTransfer[AVCOL_TRC_BT709];
51 if ((unsigned)t < AVCOL_TRC_NB) {
52 av = &gTransfer[t];
53 }
54 if (av->alpha == 0) {
55 av = &gTransfer[AVCOL_TRC_BT709];
56 }
57
58 skcms_TransferFunction linear_to_encoded = {
59 av->gamma, std::pow(av->alpha, 1/av->gamma), 0, av->delta, av->beta, 1 - av->alpha, 0,
60 };
61 skcms_TransferFunction encoded_to_linear;
62 bool success = skcms_TransferFunction_invert(&linear_to_encoded, &encoded_to_linear);
63 SkASSERT(success);
64
65 return encoded_to_linear;
66}
67
75
76const SkPoint gWP[WP_NB] = {
77 [WP_D65] = { 0.3127f, 0.3290f },
78 [WP_C] = { 0.3100f, 0.3160f },
79 [WP_DCI] = { 0.3140f, 0.3510f },
80 [WP_E] = { 1/3.0f, 1/3.0f },
81};
82
83#define ExpandWP(index) gWP[index].fX, gWP[index].fY
84
85const SkColorSpacePrimaries gPrimaries[AVCOL_PRI_NB] = {
86 [AVCOL_PRI_BT709] = { 0.640f, 0.330f, 0.300f, 0.600f, 0.150f, 0.060f, ExpandWP(WP_D65) },
87 [AVCOL_PRI_BT470M] = { 0.670f, 0.330f, 0.210f, 0.710f, 0.140f, 0.080f, ExpandWP(WP_C) },
88 [AVCOL_PRI_BT470BG] = { 0.640f, 0.330f, 0.290f, 0.600f, 0.150f, 0.060f, ExpandWP(WP_D65) },
89 [AVCOL_PRI_SMPTE170M] = { 0.630f, 0.340f, 0.310f, 0.595f, 0.155f, 0.070f, ExpandWP(WP_D65) },
90 [AVCOL_PRI_SMPTE240M] = { 0.630f, 0.340f, 0.310f, 0.595f, 0.155f, 0.070f, ExpandWP(WP_D65) },
91 [AVCOL_PRI_SMPTE428] = { 0.735f, 0.265f, 0.274f, 0.718f, 0.167f, 0.009f, ExpandWP(WP_E) },
92 [AVCOL_PRI_SMPTE431] = { 0.680f, 0.320f, 0.265f, 0.690f, 0.150f, 0.060f, ExpandWP(WP_DCI) },
93 [AVCOL_PRI_SMPTE432] = { 0.680f, 0.320f, 0.265f, 0.690f, 0.150f, 0.060f, ExpandWP(WP_D65) },
94 [AVCOL_PRI_FILM] = { 0.681f, 0.319f, 0.243f, 0.692f, 0.145f, 0.049f, ExpandWP(WP_C) },
95 [AVCOL_PRI_BT2020] = { 0.708f, 0.292f, 0.170f, 0.797f, 0.131f, 0.046f, ExpandWP(WP_D65) },
96 [AVCOL_PRI_JEDEC_P22] = { 0.630f, 0.340f, 0.295f, 0.605f, 0.155f, 0.077f, ExpandWP(WP_D65) },
97};
98
99sk_sp<SkColorSpace> make_colorspace(AVColorPrimaries primaries,
100 AVColorTransferCharacteristic transfer) {
101 if (primaries == AVCOL_PRI_BT709 && transfer == AVCOL_TRC_BT709) {
102 return SkColorSpace::MakeSRGB();
103 }
104
105 const SkColorSpacePrimaries* p = &gPrimaries[0];
106 if ((unsigned)primaries < (unsigned)AVCOL_PRI_NB) {
107 p = &gPrimaries[primaries];
108 }
109
110 skcms_Matrix3x3 matrix;
111 p->toXYZD50(&matrix);
112 return SkColorSpace::MakeRGB(compute_transfer(transfer), matrix);
113}
114
115// returns true on error (and may dump the particular error message)
116static bool check_err(int err, const int silentList[] = nullptr) {
117 if (err >= 0) {
118 return false;
119 }
120
121 if (silentList) {
122 for (; *silentList; ++silentList) {
123 if (*silentList == err) {
124 return true; // we still report the error, but we don't printf
125 }
126 }
127 }
128
129 char errbuf[128];
130 const char *errbuf_ptr = errbuf;
131
132 if (av_strerror(err, errbuf, sizeof(errbuf)) < 0) {
133 errbuf_ptr = strerror(AVUNERROR(err));
134 }
135 SkDebugf("%s\n", errbuf_ptr);
136 return true;
137}
138
139static int skstream_read_packet(void* ctx, uint8_t* dstBuffer, int dstSize) {
140 SkStream* stream = (SkStream*)ctx;
141 int result = (int)stream->read(dstBuffer, dstSize);
142 if (result == 0) {
143 result = AVERROR_EOF;
144 }
145 return result;
146}
147
148static int64_t skstream_seek_packet(void* ctx, int64_t pos, int whence) {
149 SkStream* stream = (SkStream*)ctx;
150 switch (whence) {
151 case SEEK_SET:
152 break;
153 case SEEK_CUR:
154 pos = (int64_t)stream->getPosition() + pos;
155 break;
156 case SEEK_END:
157 pos = (int64_t)stream->getLength() + pos;
158 break;
159 default:
160 return -1;
161 }
162 return stream->seek(SkToSizeT(pos)) ? pos : -1;
163}
164
166 int w, int h,
167 uint8_t* const data[],
168 int const strides[],
169 SkYUVColorSpace yuvSpace,
171 SkYUVAInfo yuvaInfo({w, h},
174 yuvSpace);
175 SkPixmap pixmaps[3];
176 pixmaps[0].reset(SkImageInfo::MakeA8(w, h), data[0], strides[0]);
177 w = (w + 1)/2;
178 h = (h + 1)/2;
179 pixmaps[1].reset(SkImageInfo::MakeA8(w, h), data[1], strides[1]);
180 pixmaps[2].reset(SkImageInfo::MakeA8(w, h), data[2], strides[2]);
181 auto yuvaPixmaps = SkYUVAPixmaps::FromExternalPixmaps(yuvaInfo, pixmaps);
182
184 rContext, yuvaPixmaps, skgpu::Mipmapped::kNo, false, std::move(cs));
185}
186
187// Init with illegal values, so our first compare will fail, forcing us to compute
188// the skcolorspace.
189SkVideoDecoder::ConvertedColorSpace::ConvertedColorSpace()
190 : fPrimaries(AVCOL_PRI_NB), fTransfer(AVCOL_TRC_NB)
191{}
192
193void SkVideoDecoder::ConvertedColorSpace::update(AVColorPrimaries primaries,
194 AVColorTransferCharacteristic transfer) {
195 if (fPrimaries != primaries || fTransfer != transfer) {
196 fPrimaries = primaries;
197 fTransfer = transfer;
198 fCS = make_colorspace(primaries, transfer);
199 }
200}
201
202double SkVideoDecoder::computeTimeStamp(const AVFrame* frame) const {
203 AVRational base = fFormatCtx->streams[fStreamIndex]->time_base;
204 return 1.0 * frame->pts * base.num / base.den;
205}
206
207sk_sp<SkImage> SkVideoDecoder::convertFrame(const AVFrame* frame) {
208 auto yuv_space = get_yuvspace(frame->colorspace);
209
210 // we have a 1-entry cache for converting colorspaces
211 fCSCache.update(frame->color_primaries, frame->color_trc);
212
213 // Are these always true? If so, we don't need to check our "cache" on each frame...
214 SkASSERT(fDecoderCtx->colorspace == frame->colorspace);
215 SkASSERT(fDecoderCtx->color_primaries == frame->color_primaries);
216 SkASSERT(fDecoderCtx->color_trc == frame->color_trc);
217
218 // Is this always true? If so, we might take advantage of it, knowing up-front if we support
219 // the format for the whole stream, in which case we might have to ask ffmpeg to convert it
220 // to something more reasonable (for us)...
221 SkASSERT(fDecoderCtx->pix_fmt == frame->format);
222
223 switch (frame->format) {
224 case AV_PIX_FMT_YUV420P:
225 if (auto image = make_yuv_420(fRecordingContext, frame->width, frame->height,
226 frame->data, frame->linesize, yuv_space, fCSCache.fCS)) {
227 return image;
228 }
229 break;
230 default:
231 break;
232 }
233
234 // General N32 fallback.
235 const auto info = SkImageInfo::MakeN32(frame->width, frame->height,
237
238 SkBitmap bm;
239 bm.allocPixels(info, info.minRowBytes());
240
241 constexpr auto fmt = SK_PMCOLOR_BYTE_ORDER(R,G,B,A) ? AV_PIX_FMT_RGBA : AV_PIX_FMT_BGRA;
242
243 // TODO: should we cache these?
244 auto* ctx = sws_getContext(frame->width, frame->height, (AVPixelFormat)frame->format,
245 info.width(), info.height(), fmt,
246 SWS_BILINEAR, nullptr, nullptr, nullptr);
247
248 uint8_t* dst[] = { (uint8_t*)bm.pixmap().writable_addr() };
249 int dst_stride[] = { SkToInt(bm.pixmap().rowBytes()) };
250
251 sws_scale(ctx, frame->data, frame->linesize, 0, frame->height, dst, dst_stride);
252
253 sws_freeContext(ctx);
254
255 bm.setImmutable();
256
258}
259
261 double defaultTimeStampStorage = 0;
262 if (!timeStamp) {
263 timeStamp = &defaultTimeStampStorage;
264 }
265
266 if (fFormatCtx == nullptr) {
267 return nullptr;
268 }
269
270 if (fMode == kProcessing_Mode) {
271 // We sit in a loop, waiting for the codec to have received enough data (packets)
272 // to have at least one frame available.
273 // Treat non-zero return as EOF (or error, which we will decide is also EOF)
274 while (!av_read_frame(fFormatCtx, &fPacket)) {
275 if (fPacket.stream_index != fStreamIndex) {
276 // got a packet for a stream other than our (video) stream, so continue
277 continue;
278 }
279
280 int ret = avcodec_send_packet(fDecoderCtx, &fPacket);
281 if (ret == AVERROR(EAGAIN)) {
282 // may signal that we have plenty already, encouraging us to call receive_frame
283 // so we don't treat this as an error.
284 ret = 0;
285 }
286 (void)check_err(ret); // we try to continue if there was an error
287
288 int silentList[] = {
289 -35, // Resource temporarily unavailable (need more packets)
290 0,
291 };
292 if (check_err(avcodec_receive_frame(fDecoderCtx, fFrame), silentList)) {
293 // this may be just "needs more input", so we try to continue
294 } else {
295 *timeStamp = this->computeTimeStamp(fFrame);
296 return this->convertFrame(fFrame);
297 }
298 }
299
300 fMode = kDraining_Mode;
301 (void)avcodec_send_packet(fDecoderCtx, nullptr); // signal to start draining
302 }
303 if (fMode == kDraining_Mode) {
304 if (avcodec_receive_frame(fDecoderCtx, fFrame) >= 0) {
305 *timeStamp = this->computeTimeStamp(fFrame);
306 return this->convertFrame(fFrame);
307 }
308 // else we decide we're done
309 fMode = kDone_Mode;
310 }
311 return nullptr;
312}
313
314SkVideoDecoder::SkVideoDecoder(GrRecordingContext* rContext) : fRecordingContext(rContext) {}
315
319
321 if (fFrame) {
322 av_frame_free(&fFrame);
323 fFrame = nullptr;
324 }
325 if (fDecoderCtx) {
326 avcodec_free_context(&fDecoderCtx);
327 fDecoderCtx = nullptr;
328 }
329 if (fFormatCtx) {
330 avformat_close_input(&fFormatCtx);
331 fFormatCtx = nullptr;
332 }
333 if (fStreamCtx) {
334 av_freep(&fStreamCtx->buffer);
335 avio_context_free(&fStreamCtx);
336 fStreamCtx = nullptr;
337 }
338
339 fStream.reset(nullptr);
340 fStreamIndex = -1;
341 fMode = kDone_Mode;
342}
343
344bool SkVideoDecoder::loadStream(std::unique_ptr<SkStream> stream) {
345 this->reset();
346 if (!stream) {
347 return false;
348 }
349
350 int bufferSize = 4 * 1024;
351 uint8_t* buffer = (uint8_t*)av_malloc(bufferSize);
352 if (!buffer) {
353 return false;
354 }
355
356 fStream = std::move(stream);
357 fStreamCtx = avio_alloc_context(buffer, bufferSize, 0, fStream.get(),
359 if (!fStreamCtx) {
360 av_freep(buffer);
361 this->reset();
362 return false;
363 }
364
365 fFormatCtx = avformat_alloc_context();
366 if (!fFormatCtx) {
367 this->reset();
368 return false;
369 }
370 fFormatCtx->pb = fStreamCtx;
371
372 int err = avformat_open_input(&fFormatCtx, nullptr, nullptr, nullptr);
373 if (err < 0) {
374 SkDebugf("avformat_open_input failed %d\n", err);
375 return false;
376 }
377
378 const AVCodec* codec;
379 fStreamIndex = av_find_best_stream(fFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);
380 if (fStreamIndex < 0) {
381 SkDebugf("av_find_best_stream failed %d\n", fStreamIndex);
382 this->reset();
383 return false;
384 }
385
386 SkASSERT(codec);
387 fDecoderCtx = avcodec_alloc_context3(codec);
388
389 AVStream* strm = fFormatCtx->streams[fStreamIndex];
390 if ((err = avcodec_parameters_to_context(fDecoderCtx, strm->codecpar)) < 0) {
391 SkDebugf("avcodec_parameters_to_context failed %d\n", err);
392 this->reset();
393 return false;
394 }
395
396 if ((err = avcodec_open2(fDecoderCtx, codec, nullptr)) < 0) {
397 SkDebugf("avcodec_open2 failed %d\n", err);
398 this->reset();
399 return false;
400 }
401
402 fFrame = av_frame_alloc();
403 SkASSERT(fFrame);
404
405 av_init_packet(&fPacket); // is there a "free" call?
406
407 fMode = kProcessing_Mode;
408
409 return true;
410}
411
413 if (!fFormatCtx) {
414 return {0, 0};
415 }
416
417 AVStream* strm = fFormatCtx->streams[fStreamIndex];
418 return {strm->codecpar->width, strm->codecpar->height};
419}
420
422 if (!fFormatCtx) {
423 return 0;
424 }
425
426 AVStream* strm = fFormatCtx->streams[fStreamIndex];
427 AVRational base = strm->time_base;
428 return 1.0 * strm->duration * base.num / base.den;
429}
430
432 auto stream = std::move(fStream);
433 this->reset();
434 if (stream) {
435 stream->rewind();
436 }
437 return this->loadStream(std::move(stream));
438}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
SkPoint pos
@ kOpaque_SkAlphaType
pixel is opaque
Definition SkAlphaType.h:28
#define SkASSERT(cond)
Definition SkAssert.h:116
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
SkYUVColorSpace
Definition SkImageInfo.h:68
@ kRec601_SkYUVColorSpace
Definition SkImageInfo.h:99
@ kRec709_SkYUVColorSpace
@ kIdentity_SkYUVColorSpace
maps Y->R, U->G, V->B
Definition SkImageInfo.h:93
constexpr size_t SkToSizeT(S x)
Definition SkTo.h:31
constexpr int SkToInt(S x)
Definition SkTo.h:29
#define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3)
Definition SkTypes.h:66
static skcms_TransferFunction compute_transfer(AVColorTransferCharacteristic t)
#define ExpandWP(index)
const SkColorSpacePrimaries gPrimaries[AVCOL_PRI_NB]
const SkPoint gWP[WP_NB]
sk_sp< SkColorSpace > make_colorspace(AVColorPrimaries primaries, AVColorTransferCharacteristic transfer)
Whitepoint
@ WP_C
@ WP_E
@ WP_NB
@ WP_D65
@ WP_DCI
const av_transfer_characteristics gTransfer[AVCOL_TRC_NB]
static bool check_err(int err, const int silentList[]=nullptr)
static int skstream_read_packet(void *ctx, uint8_t *dstBuffer, int dstSize)
static SkYUVColorSpace get_yuvspace(AVColorSpace space)
static sk_sp< SkImage > make_yuv_420(GrRecordingContext *rContext, int w, int h, uint8_t *const data[], int const strides[], SkYUVColorSpace yuvSpace, sk_sp< SkColorSpace > cs)
static int64_t skstream_seek_packet(void *ctx, int64_t pos, int whence)
Type::kYUV Type::kRGBA() int(0.7 *637)
void allocPixels(const SkImageInfo &info, size_t rowBytes)
Definition SkBitmap.cpp:258
void setImmutable()
Definition SkBitmap.cpp:400
const SkPixmap & pixmap() const
Definition SkBitmap.h:133
static sk_sp< SkColorSpace > MakeSRGB()
static sk_sp< SkColorSpace > MakeRGB(const skcms_TransferFunction &transferFn, const skcms_Matrix3x3 &toXYZ)
size_t rowBytes() const
Definition SkPixmap.h:145
void * writable_addr() const
Definition SkPixmap.h:483
void reset()
Definition SkPixmap.cpp:32
double duration() const
SkISize dimensions() const
SkVideoDecoder(GrRecordingContext *=nullptr)
bool loadStream(std::unique_ptr< SkStream >)
sk_sp< SkImage > nextImage(double *timeStamp=nullptr)
@ kY_U_V
Plane 0: Y, Plane 1: U, Plane 2: V.
@ k420
1 set of UV values for each 2x2 block of Y values.
static SkYUVAPixmaps FromExternalPixmaps(const SkYUVAInfo &, const SkPixmap[kMaxPlanes])
sk_sp< SkImage > image
Definition examples.cpp:29
double frame
Definition examples.cpp:31
static const uint8_t buffer[]
GAsyncResult * result
#define R(r)
SK_API sk_sp< SkImage > TextureFromYUVAPixmaps(GrRecordingContext *context, const SkYUVAPixmaps &pixmaps, skgpu::Mipmapped buildMips, bool limitToMaxTextureSize, sk_sp< SkColorSpace > imageColorSpace)
SK_API sk_sp< SkImage > RasterFromBitmap(const SkBitmap &bitmap)
dst
Definition cp.py:12
static SkString fmt(SkColor4f c)
Definition p3.cpp:43
SkScalar w
SkScalar h
bool skcms_TransferFunction_invert(const skcms_TransferFunction *src, skcms_TransferFunction *dst)
Definition skcms.cc:1863
Definition SkMD5.cpp:125
static SkImageInfo MakeN32(int width, int height, SkAlphaType at)
static SkImageInfo MakeA8(int width, int height)