Part of overall #118493 task: video input/output support at 10 and 12 bit/component formats. (note: this is still LDR videos just at higher precision; there's no HDR handling (yet)). Movie reading/playback: when movie file pixel format has >8 bit components, decode those into a floating point ImBuf result. Previously all movies were decoded into 8 bit/channel ImBufs, so 10- and 12-bit movie pixel colors were getting quantized. Movie output: when ffmpeg video with suitable codec is selected, there's a color depth setting under Encoding block. Currently that is: - 10 bit option for H.264, H.265, AV1 (VP9 could do 10 bit in theory too, but ffmpeg that is built in Blender does not have that compiled in) - 12 bit option for H.265, AV1 When "lossless" is picked, then similar to how for regular 8-bit video it switches from YUV 4:2:0 to 4:4:4, this also switches to 4:4:4 10- or 12-bit variant. Pull Request: https://projects.blender.org/blender/blender/pulls/129298
100 lines
2.8 KiB
C++
100 lines
2.8 KiB
C++
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
|
|
#pragma once
|
|
|
|
/** \file
|
|
* \ingroup bke
|
|
*/
|
|
|
|
#ifdef WITH_FFMPEG
|
|
|
|
enum {
|
|
FFMPEG_MPEG1 = 0,
|
|
FFMPEG_MPEG2 = 1,
|
|
FFMPEG_MPEG4 = 2,
|
|
FFMPEG_AVI = 3,
|
|
FFMPEG_MOV = 4,
|
|
FFMPEG_DV = 5,
|
|
FFMPEG_H264 = 6,
|
|
FFMPEG_XVID = 7,
|
|
FFMPEG_FLV = 8,
|
|
FFMPEG_MKV = 9,
|
|
FFMPEG_OGG = 10,
|
|
FFMPEG_INVALID = 11,
|
|
FFMPEG_WEBM = 12,
|
|
FFMPEG_AV1 = 13,
|
|
};
|
|
|
|
enum {
|
|
FFMPEG_PRESET_NONE = 0,
|
|
FFMPEG_PRESET_H264 = 1,
|
|
FFMPEG_PRESET_THEORA = 2,
|
|
FFMPEG_PRESET_XVID = 3,
|
|
FFMPEG_PRESET_AV1 = 4,
|
|
};
|
|
|
|
struct AVFrame;
|
|
struct ImageFormatData;
|
|
struct ImBuf;
|
|
struct RenderData;
|
|
struct ReportList;
|
|
struct Scene;
|
|
struct SwsContext;
|
|
|
|
bool BKE_ffmpeg_start(void *context_v,
|
|
const Scene *scene,
|
|
RenderData *rd,
|
|
int rectx,
|
|
int recty,
|
|
ReportList *reports,
|
|
bool preview,
|
|
const char *suffix);
|
|
void BKE_ffmpeg_end(void *context_v);
|
|
bool BKE_ffmpeg_append(void *context_v,
|
|
RenderData *rd,
|
|
int start_frame,
|
|
int frame,
|
|
const ImBuf *image,
|
|
const char *suffix,
|
|
ReportList *reports);
|
|
void BKE_ffmpeg_filepath_get(char filepath[/*FILE_MAX*/ 1024],
|
|
const RenderData *rd,
|
|
bool preview,
|
|
const char *suffix);
|
|
|
|
void BKE_ffmpeg_preset_set(RenderData *rd, int preset);
|
|
void BKE_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf);
|
|
bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd);
|
|
bool BKE_ffmpeg_codec_supports_crf(int av_codec_id);
|
|
/**
|
|
* Which pixel bit depths are supported by a given video codec.
|
|
* Returns bitmask of `R_IMF_CHAN_DEPTH_` flags.
|
|
*/
|
|
int BKE_ffmpeg_valid_bit_depths(int av_codec_id);
|
|
|
|
void *BKE_ffmpeg_context_create();
|
|
void BKE_ffmpeg_context_free(void *context_v);
|
|
|
|
void BKE_ffmpeg_exit();
|
|
|
|
/**
|
|
* Gets a `libswscale` context for given size and format parameters.
|
|
* After you're done using the context, call #BKE_ffmpeg_sws_release_context
|
|
* to release it. Internally the contexts are coming from the context
|
|
* pool/cache.
|
|
*/
|
|
SwsContext *BKE_ffmpeg_sws_get_context(int src_width,
|
|
int src_height,
|
|
int av_src_format,
|
|
int dst_width,
|
|
int dst_height,
|
|
int av_dst_format,
|
|
int sws_flags);
|
|
void BKE_ffmpeg_sws_release_context(SwsContext *ctx);
|
|
|
|
void BKE_ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src);
|
|
|
|
#endif
|