Refactor: move ffmpeg/video related code into one place

Previously, code related to reading/writing movie files via ffmpeg was
scattered around: some under blenkernel, some directly in generic
imbuf headers, some under intern/ffmpeg. Some of the files were named
with not exactly clear names. Some parts not directly related to movies
were including ffmpeg headers directly (rna_scene.cc).

What is in this PR:

Movie and ffmpeg related code is now under imbuf/movie:
- IMB_anim.hh: movie reading, proxy querying, various utility functions.
- IMB_movie_enums.hh: simple enum definitions,
- IMB_movie_write.hh: movie writing functions.
- intern: actual implementation and private headers.
    - ffmpeg_compat.h: various ffmpeg version difference handling
      utilities,
    - ffmpeg_swscale.hh/cc: scaling and format conversion utilities
      for ffmpeg libswscale,
    - ffmpeg_util.hh/cc: misc utilities related to ffmpeg,
    - movie_proxy_indexer.hh/cc: proxies and timecode indexing for movies,
    - movie_read.hh/cc: decoding of movies into images,
    - movie_write.cc: encoding of images into movies.
- tests: basic ffmpeg library unit tests that previously
  lived under intern/ffmpeg.

Interface changes (at C++ level, no Python API changes):
- Mostly just movie related functions that were BKE_ previously, are now IMB_.
- I did one large-ish change though, and that is to remove bMovieHandle
  struct that had pointers to several functions. Now that is
  IMB_movie_write_begin, IMB_movie_write_append, IMB_movie_write_end
  functions using a single opaque struct handle. As a result, usages
  of that in pipeline.cc and render_opengl.cc have changed.

Pull Request: https://projects.blender.org/blender/blender/pulls/132074
This commit is contained in:
Aras Pranckevicius
2024-12-19 12:34:30 +01:00
committed by Aras Pranckevicius
parent 307f50f3f0
commit 974efe7d23
78 changed files with 1509 additions and 1470 deletions

View File

@@ -64,10 +64,6 @@ if(WITH_QUADRIFLOW)
add_subdirectory(quadriflow)
endif()
if(WITH_CODEC_FFMPEG)
add_subdirectory(ffmpeg)
endif()
if(UNIX AND NOT APPLE)
add_subdirectory(libc_compat)
endif()

View File

@@ -1,27 +0,0 @@
# SPDX-FileCopyrightText: 2020-2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
if(WITH_GTESTS)
set(TEST_SRC
tests/ffmpeg_codecs.cc
tests/ffmpeg_cpu_flags.cc
)
set(TEST_INC
.
)
set(TEST_INC_SYS
${FFMPEG_INCLUDE_DIRS}
${PNG_INCLUDE_DIRS}
${ZLIB_INCLUDE_DIRS}
)
set(TEST_LIB
${PNG_LIBRARIES}
${FFMPEG_LIBRARIES}
${ZLIB_LIBRARIES}
)
if(WITH_IMAGE_OPENJPEG)
set(TEST_LIB ${TEST_LIB} ${OPENJPEG_LIBRARIES})
endif()
blender_add_test_suite_lib(ffmpeg_libs "${TEST_SRC}" "${TEST_INC}" "${TEST_INC_SYS}" "${TEST_LIB}")
endif()

View File

@@ -446,7 +446,7 @@ class RENDER_PT_encoding_video(RenderOutputButtonsPanel, Panel):
return
# Color depth. List of codecs needs to be in sync with
# `BKE_ffmpeg_valid_bit_depths` in source code.
# `IMB_ffmpeg_valid_bit_depths` in source code.
use_bpp = needs_codec and ffmpeg.codec in {'H264', 'H265', 'AV1'}
if use_bpp:
image_settings = context.scene.render.image_settings

View File

@@ -1,99 +0,0 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bke
*/
#ifdef WITH_FFMPEG
enum {
FFMPEG_MPEG1 = 0,
FFMPEG_MPEG2 = 1,
FFMPEG_MPEG4 = 2,
FFMPEG_AVI = 3,
FFMPEG_MOV = 4,
FFMPEG_DV = 5,
FFMPEG_H264 = 6,
FFMPEG_XVID = 7,
FFMPEG_FLV = 8,
FFMPEG_MKV = 9,
FFMPEG_OGG = 10,
FFMPEG_INVALID = 11,
FFMPEG_WEBM = 12,
FFMPEG_AV1 = 13,
};
enum {
FFMPEG_PRESET_NONE = 0,
FFMPEG_PRESET_H264 = 1,
FFMPEG_PRESET_THEORA = 2,
FFMPEG_PRESET_XVID = 3,
FFMPEG_PRESET_AV1 = 4,
};
struct AVFrame;
struct ImageFormatData;
struct ImBuf;
struct RenderData;
struct ReportList;
struct Scene;
struct SwsContext;
bool BKE_ffmpeg_start(void *context_v,
const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix);
void BKE_ffmpeg_end(void *context_v);
bool BKE_ffmpeg_append(void *context_v,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports);
void BKE_ffmpeg_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix);
void BKE_ffmpeg_preset_set(RenderData *rd, int preset);
void BKE_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf);
bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd);
bool BKE_ffmpeg_codec_supports_crf(int av_codec_id);
/**
* Which pixel bit depths are supported by a given video codec.
* Returns bitmask of `R_IMF_CHAN_DEPTH_` flags.
*/
int BKE_ffmpeg_valid_bit_depths(int av_codec_id);
void *BKE_ffmpeg_context_create();
void BKE_ffmpeg_context_free(void *context_v);
void BKE_ffmpeg_exit();
/**
* Gets a `libswscale` context for given size and format parameters.
* After you're done using the context, call #BKE_ffmpeg_sws_release_context
* to release it. Internally the contexts are coming from the context
* pool/cache.
*/
SwsContext *BKE_ffmpeg_sws_get_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags);
void BKE_ffmpeg_sws_release_context(SwsContext *ctx);
void BKE_ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src);
#endif

View File

@@ -1,54 +0,0 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bke
*/
/* generic blender movie support, could move to own module */
struct ImBuf;
struct RenderData;
struct ReportList;
struct Scene;
struct bMovieHandle {
bool (*start_movie)(void *context_v,
const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix);
bool (*append_movie)(void *context_v,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports);
void (*end_movie)(void *context_v);
/* Optional function. */
void (*get_movie_path)(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix);
void *(*context_create)();
void (*context_free)(void *context_v);
};
bMovieHandle *BKE_movie_handle_get(char imtype);
/**
* \note Similar to #BKE_image_path_from_imformat()
*/
void BKE_movie_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix);

View File

@@ -322,7 +322,6 @@ set(SRC
intern/wm_runtime.cc
intern/workspace.cc
intern/world.cc
intern/writemovie.cc
BKE_action.hh
BKE_addon.h
@@ -535,7 +534,6 @@ set(SRC
BKE_wm_runtime.hh
BKE_workspace.hh
BKE_world.h
BKE_writemovie.hh
nla_private.h
particle_private.h
@@ -574,6 +572,7 @@ set(LIB
bf_gpu
bf_ikplugin
bf_imbuf
bf::imbuf_movie
PRIVATE bf::intern::clog
bf_intern_ghost
PRIVATE bf::intern::guardedalloc
@@ -654,19 +653,6 @@ if(WITH_IMAGE_WEBP)
endif()
if(WITH_CODEC_FFMPEG)
list(APPEND SRC
intern/writeffmpeg.cc
BKE_writeffmpeg.hh
)
list(APPEND INC
../../../intern/ffmpeg
)
list(APPEND INC_SYS
${FFMPEG_INCLUDE_DIRS}
)
list(APPEND LIB
${FFMPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
endif()

View File

@@ -18,6 +18,7 @@
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_moviecache.hh"
@@ -36,7 +37,6 @@
#include "BKE_report.hh"
#include "BKE_screen.hh"
#include "BKE_studiolight.h"
#include "BKE_writeffmpeg.hh"
#include "DEG_depsgraph.hh"
@@ -79,9 +79,7 @@ void BKE_blender_free()
IMB_moviecache_destruct();
SEQ_fontmap_clear();
#ifdef WITH_FFMPEG
BKE_ffmpeg_exit();
#endif
IMB_ffmpeg_exit();
blender::bke::node_system_exit();
}

View File

@@ -29,6 +29,7 @@
#include "MEM_guardedalloc.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -15,16 +15,13 @@
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf_types.hh"
#include "BKE_colortools.hh"
#include "BKE_image_format.hh"
#ifdef WITH_FFMPEG
# include "BKE_writeffmpeg.hh"
#endif
/* Init/Copy/Free */
void BKE_image_format_init(ImageFormatData *imf, const bool render)
@@ -329,16 +326,14 @@ char BKE_imtype_valid_depths_with_video(char imtype, const ID *owner_id)
UNUSED_VARS(owner_id); /* Might be unused depending on build options. */
int depths = BKE_imtype_valid_depths(imtype);
#ifdef WITH_FFMPEG
/* Depending on video codec selected, valid color bit depths might vary. */
if (imtype == R_IMF_IMTYPE_FFMPEG) {
const bool is_render_out = (owner_id && GS(owner_id->name) == ID_SCE);
if (is_render_out) {
const Scene *scene = (const Scene *)owner_id;
depths |= BKE_ffmpeg_valid_bit_depths(scene->r.ffcodecdata.codec);
depths |= IMB_ffmpeg_valid_bit_depths(scene->r.ffcodecdata.codec);
}
}
#endif
return depths;
}

View File

@@ -55,6 +55,7 @@
#include "BKE_node_tree_update.hh"
#include "BKE_tracking.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -1,105 +0,0 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* Functions for writing movie files.
* \ingroup bke
*/
#include <cstring>
#include "MEM_guardedalloc.h"
#include "DNA_scene_types.h"
#include "BLI_utildefines.h"
#include "BKE_report.hh"
#ifdef WITH_FFMPEG
# include "BKE_writeffmpeg.hh"
#endif
#include "BKE_writemovie.hh"
static bool start_stub(void * /*context_v*/,
const Scene * /*scene*/,
RenderData * /*rd*/,
int /*rectx*/,
int /*recty*/,
ReportList * /*reports*/,
bool /*preview*/,
const char * /*suffix*/)
{
return false;
}
static void end_stub(void * /*context_v*/) {}
static bool append_stub(void * /*context_v*/,
RenderData * /*rd*/,
int /*start_frame*/,
int /*frame*/,
const ImBuf * /*image*/,
const char * /*suffix*/,
ReportList * /*reports*/)
{
return false;
}
static void *context_create_stub()
{
return nullptr;
}
static void context_free_stub(void * /*context_v*/) {}
bMovieHandle *BKE_movie_handle_get(const char imtype)
{
static bMovieHandle mh = {nullptr};
/* Stub callbacks in case ffmpeg is not compiled in. */
mh.start_movie = start_stub;
mh.append_movie = append_stub;
mh.end_movie = end_stub;
mh.get_movie_path = nullptr;
mh.context_create = context_create_stub;
mh.context_free = context_free_stub;
#ifdef WITH_FFMPEG
if (ELEM(imtype,
R_IMF_IMTYPE_AVIRAW,
R_IMF_IMTYPE_AVIJPEG,
R_IMF_IMTYPE_FFMPEG,
R_IMF_IMTYPE_H264,
R_IMF_IMTYPE_XVID,
R_IMF_IMTYPE_THEORA,
R_IMF_IMTYPE_AV1))
{
mh.start_movie = BKE_ffmpeg_start;
mh.append_movie = BKE_ffmpeg_append;
mh.end_movie = BKE_ffmpeg_end;
mh.get_movie_path = BKE_ffmpeg_filepath_get;
mh.context_create = BKE_ffmpeg_context_create;
mh.context_free = BKE_ffmpeg_context_free;
}
#else
(void)imtype;
#endif
return (mh.append_movie != append_stub) ? &mh : nullptr;
}
void BKE_movie_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix)
{
bMovieHandle *mh = BKE_movie_handle_get(rd->im_format.imtype);
if (mh && mh->get_movie_path) {
mh->get_movie_path(filepath, rd, preview, suffix);
}
else {
filepath[0] = '\0';
}
}

View File

@@ -11,6 +11,7 @@ set(INC
../bmesh
../gpu
../imbuf
../imbuf/movie
../makesrna
../nodes
../render

View File

@@ -67,11 +67,8 @@
#include "SEQ_modifier.hh"
#include "SEQ_utils.hh"
#ifdef WITH_FFMPEG
# include "BKE_writeffmpeg.hh"
#endif
#include "IMB_imbuf.hh" /* for proxy / time-code versioning stuff. */
#include "IMB_imbuf_enums.h"
#include "IMB_movie_enums.hh"
#include "NOD_common.h"
#include "NOD_composite.hh"
@@ -2754,12 +2751,10 @@ void blo_do_versions_260(FileData *fd, Library * /*lib*/, Main *bmain)
scene->toolsettings->snap_node_mode = 8; /* SCE_SNAP_TO_GRID */
}
#ifdef WITH_FFMPEG
/* Update for removed "sound-only" option in FFMPEG export settings. */
if (scene->r.ffcodecdata.type >= FFMPEG_INVALID) {
scene->r.ffcodecdata.type = FFMPEG_AVI;
}
#endif
}
}

View File

@@ -76,7 +76,7 @@
#include "BKE_screen.hh"
#include "BKE_tracking.h"
#include "IMB_imbuf_enums.h"
#include "IMB_movie_enums.hh"
#include "SEQ_iterator.hh"
#include "SEQ_retiming.hh"

View File

@@ -14,6 +14,7 @@ set(INC
../../functions
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../nodes
../../python

View File

@@ -54,6 +54,7 @@
#include "GPU_immediate_util.hh"
#include "GPU_state.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_thumbs.hh"

View File

@@ -11,6 +11,7 @@ set(INC
../../draw
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../nodes
../../render

View File

@@ -19,6 +19,7 @@
#include "BLI_task.hh"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
#include "BLI_vector.hh"
#include "DNA_action_types.h"
#include "DNA_anim_types.h"
@@ -40,7 +41,6 @@
#include "BKE_main.hh"
#include "BKE_report.hh"
#include "BKE_scene.hh"
#include "BKE_writemovie.hh"
#include "DEG_depsgraph.hh"
#include "DEG_depsgraph_query.hh"
@@ -57,6 +57,7 @@
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_movie_write.hh"
#include "RE_pipeline.h"
@@ -120,7 +121,6 @@ struct OGLRender {
GPUViewport *viewport;
ReportList *reports;
bMovieHandle *mh;
int cfrao, nfra;
int totvideos;
@@ -138,7 +138,7 @@ struct OGLRender {
/** Use to check if running modal or not (invoked or executed). */
wmTimer *timer;
void **movie_ctx_arr;
blender::Vector<ImbMovieWriter *> movie_writers;
TaskPool *task_pool;
bool pool_ok;
@@ -847,8 +847,6 @@ static bool screen_opengl_render_init(bContext *C, wmOperator *op)
oglrender->win = win;
oglrender->totvideos = 0;
oglrender->mh = nullptr;
oglrender->movie_ctx_arr = nullptr;
if (is_animation) {
if (is_render_keyed_only) {
@@ -881,7 +879,6 @@ static bool screen_opengl_render_init(bContext *C, wmOperator *op)
static void screen_opengl_render_end(bContext *C, OGLRender *oglrender)
{
Scene *scene = oglrender->scene;
int i;
if (oglrender->is_animation) {
/* Trickery part for movie output:
@@ -913,17 +910,13 @@ static void screen_opengl_render_end(bContext *C, OGLRender *oglrender)
MEM_SAFE_FREE(oglrender->render_frames);
if (oglrender->mh) {
if (!oglrender->movie_writers.is_empty()) {
if (BKE_imtype_is_movie(scene->r.im_format.imtype)) {
for (i = 0; i < oglrender->totvideos; i++) {
oglrender->mh->end_movie(oglrender->movie_ctx_arr[i]);
oglrender->mh->context_free(oglrender->movie_ctx_arr[i]);
for (ImbMovieWriter *writer : oglrender->movie_writers) {
IMB_movie_write_end(writer);
}
}
if (oglrender->movie_ctx_arr) {
MEM_freeN(oglrender->movie_ctx_arr);
}
oglrender->movie_writers.clear_and_shrink();
}
if (oglrender->timer) { /* exec will not have a timer */
@@ -982,34 +975,25 @@ static bool screen_opengl_render_anim_init(bContext *C, wmOperator *op)
BKE_scene_multiview_videos_dimensions_get(
&scene->r, oglrender->sizex, oglrender->sizey, &width, &height);
oglrender->mh = BKE_movie_handle_get(scene->r.im_format.imtype);
if (oglrender->mh == nullptr) {
BKE_report(oglrender->reports, RPT_ERROR, "Movie format unsupported");
screen_opengl_render_end(C, oglrender);
return false;
}
oglrender->movie_ctx_arr = static_cast<void **>(
MEM_mallocN(sizeof(void *) * oglrender->totvideos, "Movies"));
oglrender->movie_writers.reserve(oglrender->totvideos);
for (i = 0; i < oglrender->totvideos; i++) {
Scene *scene_eval = DEG_get_evaluated_scene(oglrender->depsgraph);
const char *suffix = BKE_scene_multiview_view_id_suffix_get(&scene->r, i);
oglrender->movie_ctx_arr[i] = oglrender->mh->context_create();
if (!oglrender->mh->start_movie(oglrender->movie_ctx_arr[i],
scene_eval,
&scene->r,
oglrender->sizex,
oglrender->sizey,
oglrender->reports,
PRVRANGEON != 0,
suffix))
{
ImbMovieWriter *writer = IMB_movie_write_begin(scene->r.im_format.imtype,
scene_eval,
&scene->r,
oglrender->sizex,
oglrender->sizey,
oglrender->reports,
PRVRANGEON != 0,
suffix);
if (writer == nullptr) {
BKE_report(oglrender->reports, RPT_ERROR, "Movie format unsupported");
screen_opengl_render_end(C, oglrender);
return false;
}
oglrender->movie_writers.append(writer);
}
}
@@ -1059,8 +1043,7 @@ static void write_result(TaskPool *__restrict pool, WriteTaskData *task_data)
rr,
scene,
&scene->r,
oglrender->mh,
oglrender->movie_ctx_arr,
oglrender->movie_writers.data(),
oglrender->totvideos,
PRVRANGEON != 0);
}

View File

@@ -9,6 +9,7 @@ set(INC
../../blentranslation
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../windowmanager

View File

@@ -41,6 +41,7 @@
#include "WM_api.hh"
#include "WM_types.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -44,6 +44,7 @@
#include "WM_api.hh"
#include "WM_types.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -11,6 +11,7 @@ set(INC
../../blentranslation
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../render
../../windowmanager

View File

@@ -64,6 +64,7 @@
#include "ED_datafiles.h"
#include "ED_fileselect.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_thumbs.hh"

View File

@@ -12,6 +12,7 @@ set(INC
../../draw
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../render
../../windowmanager

View File

@@ -27,6 +27,7 @@
#include "RE_pipeline.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -49,6 +49,7 @@
#include "DEG_depsgraph.hh"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -10,6 +10,7 @@ set(INC
../../draw
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../sequencer
../../windowmanager

View File

@@ -33,6 +33,7 @@
#include "ED_screen.hh"
#include "ED_transform.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -2,8 +2,11 @@
#
# SPDX-License-Identifier: GPL-2.0-or-later
add_subdirectory(movie)
set(INC
.
./movie
../blenkernel
../blenloader
../gpu
@@ -20,7 +23,6 @@ set(INC_SYS
set(SRC
intern/allocimbuf.cc
intern/anim_movie.cc
intern/colormanagement.cc
intern/colormanagement_inline.h
intern/divers.cc
@@ -35,7 +37,6 @@ set(SRC
intern/format_targa.cc
intern/format_tiff.cc
intern/imageprocess.cc
intern/indexer.cc
intern/interp.cc
intern/iris.cc
intern/jpeg.cc
@@ -65,15 +66,10 @@ set(SRC
IMB_openexr.hh
IMB_thumbs.hh
intern/IMB_allocimbuf.hh
intern/IMB_anim.hh
intern/IMB_colormanagement_intern.hh
intern/IMB_filetype.hh
intern/IMB_filter.hh
intern/IMB_indexer.hh
intern/imbuf.hh
# orphan include
../../../intern/ffmpeg/ffmpeg_compat.h
)
set(LIB
@@ -123,20 +119,6 @@ if(WITH_IMAGE_OPENJPEG)
add_definitions(-DWITH_OPENJPEG ${OPENJPEG_DEFINES})
endif()
if(WITH_CODEC_FFMPEG)
list(APPEND INC
../../../intern/ffmpeg
)
list(APPEND INC_SYS
${FFMPEG_INCLUDE_DIRS}
)
list(APPEND LIB
${FFMPEG_LIBRARIES}
${OPENJPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
endif()
if(WITH_IMAGE_CINEON)
list(APPEND SRC
intern/format_dpx.cc

View File

@@ -52,8 +52,6 @@ struct ImBuf;
struct rctf;
struct rcti;
struct ImBufAnim;
struct ColorManagedDisplay;
struct GSet;
@@ -293,94 +291,6 @@ enum eIMBInterpolationFilterMode {
IMB_FILTER_BOX,
};
/**
* Defaults to BL_proxy within the directory of the animation.
*/
void IMB_anim_set_index_dir(ImBufAnim *anim, const char *dir);
void IMB_anim_get_filename(ImBufAnim *anim, char *filename, int filename_maxncpy);
int IMB_anim_index_get_frame_index(ImBufAnim *anim, IMB_Timecode_Type tc, int position);
int IMB_anim_proxy_get_existing(ImBufAnim *anim);
struct IndexBuildContext;
/**
* Prepare context for proxies/time-codes builder
*/
IndexBuildContext *IMB_anim_index_rebuild_context(ImBufAnim *anim,
IMB_Timecode_Type tcs_in_use,
int proxy_sizes_in_use,
int quality,
const bool overwrite,
GSet *file_list,
bool build_only_on_bad_performance);
/**
* Will rebuild all used indices and proxies at once.
*/
void IMB_anim_index_rebuild(IndexBuildContext *context,
bool *stop,
bool *do_update,
float *progress);
/**
* Finish rebuilding proxies/time-codes and free temporary contexts used.
*/
void IMB_anim_index_rebuild_finish(IndexBuildContext *context, bool stop);
/**
* Return the length (in frames) of the given \a anim.
*/
int IMB_anim_get_duration(ImBufAnim *anim, IMB_Timecode_Type tc);
/**
* Return the encoded start offset (in seconds) of the given \a anim.
*/
double IMD_anim_get_offset(ImBufAnim *anim);
/**
* Return the fps contained in movie files (function rval is false,
* and frs_sec and frs_sec_base untouched if none available!)
*/
bool IMB_anim_get_fps(const ImBufAnim *anim,
bool no_av_base,
short *r_frs_sec,
float *r_frs_sec_base);
ImBufAnim *IMB_open_anim(const char *filepath,
int ib_flags,
int streamindex,
char colorspace[IM_MAX_SPACE]);
void IMB_suffix_anim(ImBufAnim *anim, const char *suffix);
void IMB_close_anim(ImBufAnim *anim);
void IMB_close_anim_proxies(ImBufAnim *anim);
bool IMB_anim_can_produce_frames(const ImBufAnim *anim);
int IMB_anim_get_image_width(ImBufAnim *anim);
int IMB_anim_get_image_height(ImBufAnim *anim);
bool IMB_get_gop_decode_time(ImBufAnim *anim);
/**
* Fetches a frame from a movie at given frame position.
*
* Movies that are <= 8 bits/color channel are returned as byte images;
* higher bit depth movies are returned as float images. Note that the
* color space is returned as-is, i.e. a float image might not be in
* linear space.
*/
ImBuf *IMB_anim_absolute(ImBufAnim *anim,
int position,
IMB_Timecode_Type tc /* = 1 = IMB_TC_RECORD_RUN */,
IMB_Proxy_Size preview_size /* = 0 = IMB_PROXY_NONE */);
/**
* fetches a define preview-frame, usually half way into the movie.
*/
ImBuf *IMB_anim_previewframe(ImBufAnim *anim);
void IMB_free_anim(ImBufAnim *anim);
#define FILTER_MASK_NULL 0
#define FILTER_MASK_MARGIN 1
#define FILTER_MASK_USED 2
@@ -450,12 +360,6 @@ bool IMB_ispic_type_matches(const char *filepath, int filetype);
int IMB_ispic_type_from_memory(const unsigned char *buf, size_t buf_size);
int IMB_ispic_type(const char *filepath);
/**
* Test if the file is a video file (known format, has a video stream and
* supported video codec).
*/
bool IMB_isanim(const char *filepath);
/**
* Test if color-space conversions of pixels in buffer need to take into account alpha.
*/
@@ -726,11 +630,6 @@ void IMB_transform(const ImBuf *src,
const float transform_matrix[4][4],
const rctf *src_crop);
/* FFMPEG */
void IMB_ffmpeg_init();
const char *IMB_ffmpeg_last_error();
GPUTexture *IMB_create_gpu_texture(const char *name,
ImBuf *ibuf,
bool use_high_bitdepth,

View File

@@ -45,33 +45,6 @@ enum eImbFileType {
#endif
};
/**
* Time-code files contain timestamps (PTS, DTS) and packet seek position.
* These values are obtained by decoding each frame in movie stream. Time-code types define how
* these map to frame index in Blender. This is used when seeking in movie stream. Note, that
* meaning of terms time-code and record run here has little connection to their actual meaning.
*/
typedef enum IMB_Timecode_Type {
/** Don't use time-code files at all. Use FFmpeg API to seek to PTS calculated on the fly. */
IMB_TC_NONE = 0,
/**
* TC entries (and therefore frames in movie stream) are mapped to frame index, such that
* timestamp in Blender matches timestamp in the movie stream. This assumes, that time starts at
* 0 in both cases.
*
* Simplified formula is `frame_index = movie_stream_timestamp * FPS`.
*/
IMB_TC_RECORD_RUN = 1,
/**
* Each TC entry (and therefore frame in movie stream) is mapped to new frame index in Blender.
*
* For example: FFmpeg may say, that a frame should be displayed for 0.5 seconds, but this option
* ignores that and only displays it in one particular frame index in Blender.
*/
IMB_TC_RECORD_RUN_NO_GAPS = 8,
IMB_TC_NUM_TYPES = 2,
} IMB_Timecode_Type;
typedef enum IMB_Proxy_Size {
IMB_PROXY_NONE = 0,
IMB_PROXY_25 = 1,

View File

@@ -27,6 +27,7 @@
#include "DNA_space_types.h" /* For FILE_MAX_LIBEXTRA */
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_metadata.hh"

View File

@@ -24,24 +24,6 @@
#include "IMB_imbuf_types.hh"
#include "imbuf.hh"
#include "IMB_anim.hh"
#ifdef WITH_FFMPEG
# include "BLI_string.h" /* BLI_vsnprintf */
# include "BKE_global.hh" /* G.debug */
extern "C" {
# include <libavcodec/avcodec.h>
# include <libavdevice/avdevice.h>
# include <libavformat/avformat.h>
# include <libavutil/log.h>
# include "ffmpeg_compat.h" /* Keep for compatibility. */
}
#endif
#define UTIL_DEBUG 0
const char *imb_ext_image[] = {
@@ -170,148 +152,3 @@ bool IMB_ispic(const char *filepath)
{
return (IMB_ispic_type(filepath) != IMB_FTYPE_NONE);
}
#ifdef WITH_FFMPEG
/* BLI_vsnprintf in ffmpeg_log_callback() causes invalid warning */
# ifdef __GNUC__
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wmissing-format-attribute"
# endif
static char ffmpeg_last_error[1024];
static void ffmpeg_log_callback(void *ptr, int level, const char *format, va_list arg)
{
if (ELEM(level, AV_LOG_FATAL, AV_LOG_ERROR)) {
size_t n;
va_list args_cpy;
va_copy(args_cpy, arg);
n = VSNPRINTF(ffmpeg_last_error, format, args_cpy);
va_end(args_cpy);
/* strip trailing \n */
ffmpeg_last_error[n - 1] = '\0';
}
if (G.debug & G_DEBUG_FFMPEG) {
/* call default logger to print all message to console */
av_log_default_callback(ptr, level, format, arg);
}
}
# ifdef __GNUC__
# pragma GCC diagnostic pop
# endif
void IMB_ffmpeg_init()
{
avdevice_register_all();
ffmpeg_last_error[0] = '\0';
if (G.debug & G_DEBUG_FFMPEG) {
av_log_set_level(AV_LOG_DEBUG);
}
/* set separate callback which could store last error to report to UI */
av_log_set_callback(ffmpeg_log_callback);
}
const char *IMB_ffmpeg_last_error()
{
return ffmpeg_last_error;
}
static int isffmpeg(const char *filepath)
{
AVFormatContext *pFormatCtx = nullptr;
uint i;
int videoStream;
const AVCodec *pCodec;
if (BLI_path_extension_check_n(filepath,
".swf",
".jpg",
".jp2",
".j2c",
".png",
".dds",
".tga",
".bmp",
".tif",
".exr",
".cin",
".wav",
nullptr))
{
return 0;
}
if (avformat_open_input(&pFormatCtx, filepath, nullptr, nullptr) != 0) {
if (UTIL_DEBUG) {
fprintf(stderr, "isffmpeg: av_open_input_file failed\n");
}
return 0;
}
if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
if (UTIL_DEBUG) {
fprintf(stderr, "isffmpeg: avformat_find_stream_info failed\n");
}
avformat_close_input(&pFormatCtx);
return 0;
}
if (UTIL_DEBUG) {
av_dump_format(pFormatCtx, 0, filepath, 0);
}
/* Find the first video stream */
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codecpar &&
(pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
{
videoStream = i;
break;
}
}
if (videoStream == -1) {
avformat_close_input(&pFormatCtx);
return 0;
}
AVCodecParameters *codec_par = pFormatCtx->streams[videoStream]->codecpar;
/* Find the decoder for the video stream */
pCodec = avcodec_find_decoder(codec_par->codec_id);
if (pCodec == nullptr) {
avformat_close_input(&pFormatCtx);
return 0;
}
avformat_close_input(&pFormatCtx);
return 1;
}
#endif
bool IMB_isanim(const char *filepath)
{
BLI_assert(!BLI_path_is_rel(filepath));
if (UTIL_DEBUG) {
printf("%s: %s\n", __func__, filepath);
}
#ifdef WITH_FFMPEG
if (isffmpeg(filepath)) {
return true;
}
#endif
return false;
}

View File

@@ -0,0 +1,85 @@
# SPDX-FileCopyrightText: 2024 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
set(INC
PUBLIC .
..
../../blenkernel
)
set(INC_SYS
)
set(SRC
IMB_anim.hh
IMB_movie_enums.hh
IMB_movie_write.hh
intern/ffmpeg_compat.h
intern/ffmpeg_util.cc
intern/ffmpeg_util.hh
intern/movie_proxy_indexer.cc
intern/movie_proxy_indexer.hh
intern/movie_read.cc
intern/movie_read.hh
intern/movie_write.cc
)
set(LIB
PRIVATE bf_blenkernel
PRIVATE bf::blenlib
PRIVATE bf_imbuf
PRIVATE bf::intern::guardedalloc
)
if(WITH_CODEC_FFMPEG)
list(APPEND SRC
intern/ffmpeg_swscale.cc
intern/ffmpeg_swscale.hh
)
list(APPEND INC_SYS
${FFMPEG_INCLUDE_DIRS}
)
list(APPEND LIB
${FFMPEG_LIBRARIES}
${OPENJPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
endif()
if(WITH_AUDASPACE)
list(APPEND INC_SYS
${AUDASPACE_C_INCLUDE_DIRS}
)
if(WITH_SYSTEM_AUDASPACE)
list(APPEND LIB
${AUDASPACE_C_LIBRARIES}
)
endif()
add_definitions(-DWITH_AUDASPACE)
endif()
blender_add_lib(bf_imbuf_movie "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
add_library(bf::imbuf_movie ALIAS bf_imbuf_movie)
if(WITH_GTESTS AND WITH_CODEC_FFMPEG)
set(TEST_SRC
tests/ffmpeg_codecs.cc
tests/ffmpeg_cpu_flags.cc
)
set(TEST_INC
intern
)
set(TEST_INC_SYS
${FFMPEG_INCLUDE_DIRS}
)
set(TEST_LIB
${FFMPEG_LIBRARIES}
)
if(WITH_IMAGE_OPENJPEG)
set(TEST_LIB ${TEST_LIB} ${OPENJPEG_LIBRARIES})
endif()
blender_add_test_suite_lib(ffmpeg_libs "${TEST_SRC}" "${TEST_INC}" "${TEST_INC_SYS}" "${TEST_LIB}")
endif()

View File

@@ -0,0 +1,121 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup imbuf
*/
#pragma once
#include "IMB_imbuf_enums.h"
#include "IMB_movie_enums.hh"
struct ImageFormatData;
struct ImBuf;
struct ImBufAnim;
struct IndexBuildContext;
struct GSet;
struct RenderData;
/**
* Defaults to BL_proxy within the directory of the animation.
*/
void IMB_anim_set_index_dir(ImBufAnim *anim, const char *dir);
void IMB_anim_get_filename(ImBufAnim *anim, char *filename, int filename_maxncpy);
int IMB_anim_index_get_frame_index(ImBufAnim *anim, IMB_Timecode_Type tc, int position);
int IMB_anim_proxy_get_existing(ImBufAnim *anim);
/**
* Prepare context for proxies/time-codes builder
*/
IndexBuildContext *IMB_anim_index_rebuild_context(ImBufAnim *anim,
IMB_Timecode_Type tcs_in_use,
int proxy_sizes_in_use,
int quality,
const bool overwrite,
GSet *file_list,
bool build_only_on_bad_performance);
/**
* Will rebuild all used indices and proxies at once.
*/
void IMB_anim_index_rebuild(IndexBuildContext *context,
bool *stop,
bool *do_update,
float *progress);
/**
* Finish rebuilding proxies/time-codes and free temporary contexts used.
*/
void IMB_anim_index_rebuild_finish(IndexBuildContext *context, bool stop);
/**
* Return the length (in frames) of the given \a anim.
*/
int IMB_anim_get_duration(ImBufAnim *anim, IMB_Timecode_Type tc);
/**
* Return the encoded start offset (in seconds) of the given \a anim.
*/
double IMB_anim_get_offset(ImBufAnim *anim);
/**
* Return the fps contained in movie files (function rval is false,
* and frs_sec and frs_sec_base untouched if none available!)
*/
bool IMB_anim_get_fps(const ImBufAnim *anim,
bool no_av_base,
short *r_frs_sec,
float *r_frs_sec_base);
ImBufAnim *IMB_open_anim(const char *filepath, int ib_flags, int streamindex, char *colorspace);
void IMB_suffix_anim(ImBufAnim *anim, const char *suffix);
void IMB_close_anim(ImBufAnim *anim);
void IMB_close_anim_proxies(ImBufAnim *anim);
bool IMB_anim_can_produce_frames(const ImBufAnim *anim);
int IMB_anim_get_image_width(ImBufAnim *anim);
int IMB_anim_get_image_height(ImBufAnim *anim);
bool IMB_get_gop_decode_time(ImBufAnim *anim);
/**
* Fetches a frame from a movie at given frame position.
*
* Movies that are <= 8 bits/color channel are returned as byte images;
* higher bit depth movies are returned as float images. Note that the
* color space is returned as-is, i.e. a float image might not be in
* linear space.
*/
ImBuf *IMB_anim_absolute(ImBufAnim *anim,
int position,
IMB_Timecode_Type tc /* = 1 = IMB_TC_RECORD_RUN */,
IMB_Proxy_Size preview_size /* = 0 = IMB_PROXY_NONE */);
/**
* fetches a define preview-frame, usually half way into the movie.
*/
ImBuf *IMB_anim_previewframe(ImBufAnim *anim);
void IMB_free_anim(ImBufAnim *anim);
/**
* Test if the file is a video file (known format, has a video stream and
* supported video codec).
*/
bool IMB_isanim(const char *filepath);
void IMB_ffmpeg_init();
void IMB_ffmpeg_exit();
bool IMB_ffmpeg_alpha_channel_is_supported(int av_codec_id);
bool IMB_ffmpeg_codec_supports_crf(int av_codec_id);
void IMB_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf);
/**
* Which pixel bit depths are supported by a given video codec.
* Returns bitmask of `R_IMF_CHAN_DEPTH_` flags.
*/
int IMB_ffmpeg_valid_bit_depths(int av_codec_id);

View File

@@ -0,0 +1,89 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup imbuf
*/
enum {
FFMPEG_MPEG1 = 0,
FFMPEG_MPEG2 = 1,
FFMPEG_MPEG4 = 2,
FFMPEG_AVI = 3,
FFMPEG_MOV = 4,
FFMPEG_DV = 5,
FFMPEG_H264 = 6,
FFMPEG_XVID = 7,
FFMPEG_FLV = 8,
FFMPEG_MKV = 9,
FFMPEG_OGG = 10,
FFMPEG_INVALID = 11,
FFMPEG_WEBM = 12,
FFMPEG_AV1 = 13,
};
enum {
FFMPEG_PRESET_NONE = 0,
FFMPEG_PRESET_H264 = 1,
FFMPEG_PRESET_THEORA = 2,
FFMPEG_PRESET_XVID = 3,
FFMPEG_PRESET_AV1 = 4,
};
/* Note: match ffmpeg AVCodecID enum values. */
enum IMB_Ffmpeg_Codec_ID {
FFMPEG_CODEC_ID_NONE = 0,
FFMPEG_CODEC_ID_MPEG1VIDEO = 1,
FFMPEG_CODEC_ID_MPEG2VIDEO = 2,
FFMPEG_CODEC_ID_MPEG4 = 12,
FFMPEG_CODEC_ID_FLV1 = 21,
FFMPEG_CODEC_ID_DVVIDEO = 24,
FFMPEG_CODEC_ID_HUFFYUV = 25,
FFMPEG_CODEC_ID_H264 = 27,
FFMPEG_CODEC_ID_THEORA = 30,
FFMPEG_CODEC_ID_FFV1 = 33,
FFMPEG_CODEC_ID_QTRLE = 55,
FFMPEG_CODEC_ID_PNG = 61,
FFMPEG_CODEC_ID_DNXHD = 99,
FFMPEG_CODEC_ID_VP9 = 167,
FFMPEG_CODEC_ID_H265 = 173,
FFMPEG_CODEC_ID_AV1 = 226,
FFMPEG_CODEC_ID_PCM_S16LE = 65536,
FFMPEG_CODEC_ID_MP2 = 86016,
FFMPEG_CODEC_ID_MP3 = 86017,
FFMPEG_CODEC_ID_AAC = 86018,
FFMPEG_CODEC_ID_AC3 = 86019,
FFMPEG_CODEC_ID_VORBIS = 86021,
FFMPEG_CODEC_ID_FLAC = 86028,
FFMPEG_CODEC_ID_OPUS = 86076,
};
/**
* Time-code files contain timestamps (PTS, DTS) and packet seek position.
* These values are obtained by decoding each frame in movie stream. Time-code types define how
* these map to frame index in Blender. This is used when seeking in movie stream. Note, that
* meaning of terms time-code and record run here has little connection to their actual meaning.
*/
enum IMB_Timecode_Type {
/** Don't use time-code files at all. Use FFmpeg API to seek to PTS calculated on the fly. */
IMB_TC_NONE = 0,
/**
* TC entries (and therefore frames in movie stream) are mapped to frame index, such that
* timestamp in Blender matches timestamp in the movie stream. This assumes, that time starts at
* 0 in both cases.
*
* Simplified formula is `frame_index = movie_stream_timestamp * FPS`.
*/
IMB_TC_RECORD_RUN = 1,
/**
* Each TC entry (and therefore frame in movie stream) is mapped to new frame index in Blender.
*
* For example: FFmpeg may say, that a frame should be displayed for 0.5 seconds, but this option
* ignores that and only displays it in one particular frame index in Blender.
*/
IMB_TC_RECORD_RUN_NO_GAPS = 8,
IMB_TC_NUM_TYPES = 2,
};

View File

@@ -0,0 +1,41 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup imbuf
*/
struct ImBuf;
struct ImbMovieWriter;
struct RenderData;
struct ReportList;
struct Scene;
ImbMovieWriter *IMB_movie_write_begin(const char imtype,
const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix);
bool IMB_movie_write_append(ImbMovieWriter *writer,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports);
void IMB_movie_write_end(ImbMovieWriter *writer);
/**
* \note Similar to #BKE_image_path_from_imformat()
*/
void IMB_movie_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix);

View File

@@ -207,209 +207,4 @@ FFMPEG_INLINE int ffmpeg_get_video_rotation(const AVStream *stream)
return 0;
}
/* -------------------------------------------------------------------- */
/** \name Deinterlace code block
*
* NOTE: The code in this block are from FFmpeg 2.6.4, which is licensed by LGPL.
* \{ */
#define MAX_NEG_CROP 1024
#define times4(x) x, x, x, x
#define times256(x) times4(times4(times4(times4(times4(x)))))
static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
times256(0x00), 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22,
0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46,
0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52,
0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E,
0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A,
0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82,
0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E,
0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A,
0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2,
0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE,
0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA,
0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF, 0xE0, 0xE1, 0xE2,
0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE,
0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA,
0xFB, 0xFC, 0xFD, 0xFE, 0xFF, times256(0xFF)};
#undef times4
#undef times256
/* filter parameters: [-1 4 2 4 -1] // 8 */
FFMPEG_INLINE
void deinterlace_line(uint8_t *dst,
const uint8_t *lum_m4,
const uint8_t *lum_m3,
const uint8_t *lum_m2,
const uint8_t *lum_m1,
const uint8_t *lum,
int size)
{
const uint8_t *cm = ff_compat_crop_tab + MAX_NEG_CROP;
int sum;
for (; size > 0; size--) {
sum = -lum_m4[0];
sum += lum_m3[0] << 2;
sum += lum_m2[0] << 1;
sum += lum_m1[0] << 2;
sum += -lum[0];
dst[0] = cm[(sum + 4) >> 3];
lum_m4++;
lum_m3++;
lum_m2++;
lum_m1++;
lum++;
dst++;
}
}
FFMPEG_INLINE
void deinterlace_line_inplace(
uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum, int size)
{
const uint8_t *cm = ff_compat_crop_tab + MAX_NEG_CROP;
int sum;
for (; size > 0; size--) {
sum = -lum_m4[0];
sum += lum_m3[0] << 2;
sum += lum_m2[0] << 1;
lum_m4[0] = lum_m2[0];
sum += lum_m1[0] << 2;
sum += -lum[0];
lum_m2[0] = cm[(sum + 4) >> 3];
lum_m4++;
lum_m3++;
lum_m2++;
lum_m1++;
lum++;
}
}
/* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
* top field is copied as is, but the bottom field is deinterlaced
* against the top field. */
FFMPEG_INLINE
void deinterlace_bottom_field(
uint8_t *dst, int dst_wrap, const uint8_t *src1, int src_wrap, int width, int height)
{
const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
int y;
src_m2 = src1;
src_m1 = src1;
src_0 = &src_m1[src_wrap];
src_p1 = &src_0[src_wrap];
src_p2 = &src_p1[src_wrap];
for (y = 0; y < (height - 2); y += 2) {
memcpy(dst, src_m1, width);
dst += dst_wrap;
deinterlace_line(dst, src_m2, src_m1, src_0, src_p1, src_p2, width);
src_m2 = src_0;
src_m1 = src_p1;
src_0 = src_p2;
src_p1 += 2 * src_wrap;
src_p2 += 2 * src_wrap;
dst += dst_wrap;
}
memcpy(dst, src_m1, width);
dst += dst_wrap;
/* do last line */
deinterlace_line(dst, src_m2, src_m1, src_0, src_0, src_0, width);
}
FFMPEG_INLINE
int deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap, int width, int height)
{
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
int y;
uint8_t *buf = (uint8_t *)av_malloc(width);
if (!buf) {
return AVERROR(ENOMEM);
}
src_m1 = src1;
memcpy(buf, src_m1, width);
src_0 = &src_m1[src_wrap];
src_p1 = &src_0[src_wrap];
src_p2 = &src_p1[src_wrap];
for (y = 0; y < (height - 2); y += 2) {
deinterlace_line_inplace(buf, src_m1, src_0, src_p1, src_p2, width);
src_m1 = src_p1;
src_0 = src_p2;
src_p1 += 2 * src_wrap;
src_p2 += 2 * src_wrap;
}
/* do last line */
deinterlace_line_inplace(buf, src_m1, src_0, src_0, src_0, width);
av_free(buf);
return 0;
}
FFMPEG_INLINE
int av_image_deinterlace(
AVFrame *dst, const AVFrame *src, enum AVPixelFormat pix_fmt, int width, int height)
{
int i, ret;
if (pix_fmt != AV_PIX_FMT_YUV420P && pix_fmt != AV_PIX_FMT_YUVJ420P &&
pix_fmt != AV_PIX_FMT_YUV422P && pix_fmt != AV_PIX_FMT_YUVJ422P &&
pix_fmt != AV_PIX_FMT_YUV444P && pix_fmt != AV_PIX_FMT_YUV411P &&
pix_fmt != AV_PIX_FMT_GRAY8)
{
return -1;
}
if ((width & 3) != 0 || (height & 3) != 0) {
return -1;
}
for (i = 0; i < 3; i++) {
if (i == 1) {
switch (pix_fmt) {
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUV420P:
width >>= 1;
height >>= 1;
break;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUVJ422P:
width >>= 1;
break;
case AV_PIX_FMT_YUV411P:
width >>= 2;
break;
default:
break;
}
if (pix_fmt == AV_PIX_FMT_GRAY8) {
break;
}
}
if (src == dst) {
ret = deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i], width, height);
if (ret < 0) {
return ret;
}
}
else {
deinterlace_bottom_field(
dst->data[i], dst->linesize[i], src->data[i], src->linesize[i], width, height);
}
}
return 0;
}
/** \} Deinterlace code block */
#endif

View File

@@ -0,0 +1,224 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup imbuf
*/
#ifdef WITH_FFMPEG
# include "ffmpeg_swscale.hh"
# include <cstdint>
# include <mutex>
# include "BLI_threads.h"
# include "BLI_vector.hh"
extern "C" {
# include <libavutil/opt.h>
# include <libavutil/pixfmt.h>
# include <libswscale/swscale.h>
# include "ffmpeg_compat.h"
}
/* libswscale context creation and destruction is expensive.
* Maintain a cache of already created contexts. */
static constexpr int64_t swscale_cache_max_entries = 32;
struct SwscaleContext {
int src_width = 0, src_height = 0;
int dst_width = 0, dst_height = 0;
AVPixelFormat src_format = AV_PIX_FMT_NONE, dst_format = AV_PIX_FMT_NONE;
int flags = 0;
SwsContext *context = nullptr;
int64_t last_use_timestamp = 0;
bool is_used = false;
};
static std::mutex swscale_cache_lock;
static int64_t swscale_cache_timestamp = 0;
static blender::Vector<SwscaleContext> *swscale_cache = nullptr;
static SwsContext *sws_create_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags)
{
# if defined(FFMPEG_SWSCALE_THREADING)
/* sws_getContext does not allow passing flags that ask for multi-threaded
* scaling context, so do it the hard way. */
SwsContext *c = sws_alloc_context();
if (c == nullptr) {
return nullptr;
}
av_opt_set_int(c, "srcw", src_width, 0);
av_opt_set_int(c, "srch", src_height, 0);
av_opt_set_int(c, "src_format", av_src_format, 0);
av_opt_set_int(c, "dstw", dst_width, 0);
av_opt_set_int(c, "dsth", dst_height, 0);
av_opt_set_int(c, "dst_format", av_dst_format, 0);
av_opt_set_int(c, "sws_flags", sws_flags, 0);
av_opt_set_int(c, "threads", BLI_system_thread_count(), 0);
if (sws_init_context(c, nullptr, nullptr) < 0) {
sws_freeContext(c);
return nullptr;
}
# else
SwsContext *c = sws_getContext(src_width,
src_height,
AVPixelFormat(av_src_format),
dst_width,
dst_height,
AVPixelFormat(av_dst_format),
sws_flags,
nullptr,
nullptr,
nullptr);
# endif
return c;
}
static void init_swscale_cache_if_needed()
{
if (swscale_cache == nullptr) {
swscale_cache = new blender::Vector<SwscaleContext>();
swscale_cache_timestamp = 0;
}
}
static bool remove_oldest_swscale_context()
{
int64_t oldest_index = -1;
int64_t oldest_time = 0;
for (int64_t index = 0; index < swscale_cache->size(); index++) {
SwscaleContext &ctx = (*swscale_cache)[index];
if (ctx.is_used) {
continue;
}
int64_t time = swscale_cache_timestamp - ctx.last_use_timestamp;
if (time > oldest_time) {
oldest_time = time;
oldest_index = index;
}
}
if (oldest_index >= 0) {
SwscaleContext &ctx = (*swscale_cache)[oldest_index];
sws_freeContext(ctx.context);
swscale_cache->remove_and_reorder(oldest_index);
return true;
}
return false;
}
static void maintain_swscale_cache_size()
{
while (swscale_cache->size() > swscale_cache_max_entries) {
if (!remove_oldest_swscale_context()) {
/* Could not remove anything (all contexts are actively used),
* stop trying. */
break;
}
}
}
SwsContext *ffmpeg_sws_get_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags)
{
std::lock_guard lock(swscale_cache_lock);
init_swscale_cache_if_needed();
swscale_cache_timestamp++;
/* Search for unused context that has suitable parameters. */
SwsContext *ctx = nullptr;
for (SwscaleContext &c : *swscale_cache) {
if (!c.is_used && c.src_width == src_width && c.src_height == src_height &&
c.src_format == av_src_format && c.dst_width == dst_width && c.dst_height == dst_height &&
c.dst_format == av_dst_format && c.flags == sws_flags)
{
ctx = c.context;
/* Mark as used. */
c.is_used = true;
c.last_use_timestamp = swscale_cache_timestamp;
break;
}
}
if (ctx == nullptr) {
/* No free matching context in cache: create a new one. */
ctx = sws_create_context(
src_width, src_height, av_src_format, dst_width, dst_height, av_dst_format, sws_flags);
SwscaleContext c;
c.src_width = src_width;
c.src_height = src_height;
c.dst_width = dst_width;
c.dst_height = dst_height;
c.src_format = AVPixelFormat(av_src_format);
c.dst_format = AVPixelFormat(av_dst_format);
c.flags = sws_flags;
c.context = ctx;
c.is_used = true;
c.last_use_timestamp = swscale_cache_timestamp;
swscale_cache->append(c);
maintain_swscale_cache_size();
}
return ctx;
}
void ffmpeg_sws_release_context(SwsContext *ctx)
{
std::lock_guard lock(swscale_cache_lock);
init_swscale_cache_if_needed();
bool found = false;
for (SwscaleContext &c : *swscale_cache) {
if (c.context == ctx) {
BLI_assert_msg(c.is_used, "Releasing ffmpeg swscale context that is not in use");
c.is_used = false;
found = true;
break;
}
}
BLI_assert_msg(found, "Releasing ffmpeg swscale context that is not in cache");
UNUSED_VARS_NDEBUG(found);
maintain_swscale_cache_size();
}
void ffmpeg_sws_exit()
{
std::lock_guard lock(swscale_cache_lock);
if (swscale_cache != nullptr) {
for (SwscaleContext &c : *swscale_cache) {
sws_freeContext(c.context);
}
delete swscale_cache;
swscale_cache = nullptr;
}
}
void ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src)
{
# if defined(FFMPEG_SWSCALE_THREADING)
sws_scale_frame(ctx, dst, src);
# else
sws_scale(ctx, src->data, src->linesize, 0, src->height, dst->data, dst->linesize);
# endif
}
#endif /* WITH_FFMPEG */

View File

@@ -0,0 +1,35 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup imbuf
*/
#ifdef WITH_FFMPEG
struct AVFrame;
struct SwsContext;
/**
* Gets a `libswscale` context for given size and format parameters.
* After you're done using the context, call #ffmpeg_sws_release_context
* to release it. Internally the contexts are coming from the context
* pool/cache.
*/
SwsContext *ffmpeg_sws_get_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags);
void ffmpeg_sws_release_context(SwsContext *ctx);
void ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src);
void ffmpeg_sws_exit();
#endif /* WITH_FFMPEG */

View File

@@ -0,0 +1,522 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup imbuf
*/
#include "BLI_path_utils.hh"
#include "BLI_utildefines.h"
#include "DNA_scene_types.h"
#include "IMB_anim.hh"
#include "ffmpeg_swscale.hh"
#include "ffmpeg_util.hh"
#ifdef WITH_FFMPEG
# include "BLI_string.h"
# include "BKE_global.hh"
extern "C" {
# include "ffmpeg_compat.h"
# include <libavcodec/avcodec.h>
# include <libavdevice/avdevice.h>
# include <libavformat/avformat.h>
# include <libavutil/log.h>
}
static char ffmpeg_last_error_buffer[1024];
/* BLI_vsnprintf in ffmpeg_log_callback() causes invalid warning */
# ifdef __GNUC__
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wmissing-format-attribute"
# endif
static void ffmpeg_log_callback(void *ptr, int level, const char *format, va_list arg)
{
if (ELEM(level, AV_LOG_FATAL, AV_LOG_ERROR)) {
size_t n;
va_list args_cpy;
va_copy(args_cpy, arg);
n = VSNPRINTF(ffmpeg_last_error_buffer, format, args_cpy);
va_end(args_cpy);
/* strip trailing \n */
ffmpeg_last_error_buffer[n - 1] = '\0';
}
if (G.debug & G_DEBUG_FFMPEG) {
/* call default logger to print all message to console */
av_log_default_callback(ptr, level, format, arg);
}
}
# ifdef __GNUC__
# pragma GCC diagnostic pop
# endif
const char *ffmpeg_last_error()
{
return ffmpeg_last_error_buffer;
}
static int isffmpeg(const char *filepath)
{
AVFormatContext *pFormatCtx = nullptr;
uint i;
int videoStream;
const AVCodec *pCodec;
if (BLI_path_extension_check_n(filepath,
".swf",
".jpg",
".jp2",
".j2c",
".png",
".dds",
".tga",
".bmp",
".tif",
".exr",
".cin",
".wav",
nullptr))
{
return 0;
}
if (avformat_open_input(&pFormatCtx, filepath, nullptr, nullptr) != 0) {
return 0;
}
if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
avformat_close_input(&pFormatCtx);
return 0;
}
/* Find the first video stream */
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i] && pFormatCtx->streams[i]->codecpar &&
(pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
{
videoStream = i;
break;
}
}
if (videoStream == -1) {
avformat_close_input(&pFormatCtx);
return 0;
}
AVCodecParameters *codec_par = pFormatCtx->streams[videoStream]->codecpar;
/* Find the decoder for the video stream */
pCodec = avcodec_find_decoder(codec_par->codec_id);
if (pCodec == nullptr) {
avformat_close_input(&pFormatCtx);
return 0;
}
avformat_close_input(&pFormatCtx);
return 1;
}
/* -------------------------------------------------------------------- */
/* AVFrame deinterlacing. Code for this was originally based on ffmpeg 2.6.4 (LGPL). */
# define MAX_NEG_CROP 1024
# define times4(x) x, x, x, x
# define times256(x) times4(times4(times4(times4(times4(x)))))
static const uint8_t ff_compat_crop_tab[256 + 2 * MAX_NEG_CROP] = {
times256(0x00), 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22,
0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E,
0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A,
0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46,
0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52,
0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E,
0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A,
0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76,
0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82,
0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E,
0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A,
0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2,
0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE,
0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA,
0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
0xD7, 0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF, 0xE0, 0xE1, 0xE2,
0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE,
0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA,
0xFB, 0xFC, 0xFD, 0xFE, 0xFF, times256(0xFF)};
# undef times4
# undef times256
/* filter parameters: [-1 4 2 4 -1] // 8 */
FFMPEG_INLINE void deinterlace_line(uint8_t *dst,
const uint8_t *lum_m4,
const uint8_t *lum_m3,
const uint8_t *lum_m2,
const uint8_t *lum_m1,
const uint8_t *lum,
int size)
{
const uint8_t *cm = ff_compat_crop_tab + MAX_NEG_CROP;
int sum;
for (; size > 0; size--) {
sum = -lum_m4[0];
sum += lum_m3[0] << 2;
sum += lum_m2[0] << 1;
sum += lum_m1[0] << 2;
sum += -lum[0];
dst[0] = cm[(sum + 4) >> 3];
lum_m4++;
lum_m3++;
lum_m2++;
lum_m1++;
lum++;
dst++;
}
}
FFMPEG_INLINE void deinterlace_line_inplace(
uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum, int size)
{
const uint8_t *cm = ff_compat_crop_tab + MAX_NEG_CROP;
int sum;
for (; size > 0; size--) {
sum = -lum_m4[0];
sum += lum_m3[0] << 2;
sum += lum_m2[0] << 1;
lum_m4[0] = lum_m2[0];
sum += lum_m1[0] << 2;
sum += -lum[0];
lum_m2[0] = cm[(sum + 4) >> 3];
lum_m4++;
lum_m3++;
lum_m2++;
lum_m1++;
lum++;
}
}
/* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
* top field is copied as is, but the bottom field is deinterlaced
* against the top field. */
FFMPEG_INLINE void deinterlace_bottom_field(
uint8_t *dst, int dst_wrap, const uint8_t *src1, int src_wrap, int width, int height)
{
const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
int y;
src_m2 = src1;
src_m1 = src1;
src_0 = &src_m1[src_wrap];
src_p1 = &src_0[src_wrap];
src_p2 = &src_p1[src_wrap];
for (y = 0; y < (height - 2); y += 2) {
memcpy(dst, src_m1, width);
dst += dst_wrap;
deinterlace_line(dst, src_m2, src_m1, src_0, src_p1, src_p2, width);
src_m2 = src_0;
src_m1 = src_p1;
src_0 = src_p2;
src_p1 += 2 * src_wrap;
src_p2 += 2 * src_wrap;
dst += dst_wrap;
}
memcpy(dst, src_m1, width);
dst += dst_wrap;
/* do last line */
deinterlace_line(dst, src_m2, src_m1, src_0, src_0, src_0, width);
}
FFMPEG_INLINE int deinterlace_bottom_field_inplace(uint8_t *src1,
int src_wrap,
int width,
int height)
{
uint8_t *src_m1, *src_0, *src_p1, *src_p2;
int y;
uint8_t *buf = (uint8_t *)av_malloc(width);
if (!buf) {
return AVERROR(ENOMEM);
}
src_m1 = src1;
memcpy(buf, src_m1, width);
src_0 = &src_m1[src_wrap];
src_p1 = &src_0[src_wrap];
src_p2 = &src_p1[src_wrap];
for (y = 0; y < (height - 2); y += 2) {
deinterlace_line_inplace(buf, src_m1, src_0, src_p1, src_p2, width);
src_m1 = src_p1;
src_0 = src_p2;
src_p1 += 2 * src_wrap;
src_p2 += 2 * src_wrap;
}
/* do last line */
deinterlace_line_inplace(buf, src_m1, src_0, src_0, src_0, width);
av_free(buf);
return 0;
}
int ffmpeg_deinterlace(
AVFrame *dst, const AVFrame *src, enum AVPixelFormat pix_fmt, int width, int height)
{
int i, ret;
if (pix_fmt != AV_PIX_FMT_YUV420P && pix_fmt != AV_PIX_FMT_YUVJ420P &&
pix_fmt != AV_PIX_FMT_YUV422P && pix_fmt != AV_PIX_FMT_YUVJ422P &&
pix_fmt != AV_PIX_FMT_YUV444P && pix_fmt != AV_PIX_FMT_YUV411P &&
pix_fmt != AV_PIX_FMT_GRAY8)
{
return -1;
}
if ((width & 3) != 0 || (height & 3) != 0) {
return -1;
}
for (i = 0; i < 3; i++) {
if (i == 1) {
switch (pix_fmt) {
case AV_PIX_FMT_YUVJ420P:
case AV_PIX_FMT_YUV420P:
width >>= 1;
height >>= 1;
break;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUVJ422P:
width >>= 1;
break;
case AV_PIX_FMT_YUV411P:
width >>= 2;
break;
default:
break;
}
if (pix_fmt == AV_PIX_FMT_GRAY8) {
break;
}
}
if (src == dst) {
ret = deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i], width, height);
if (ret < 0) {
return ret;
}
}
else {
deinterlace_bottom_field(
dst->data[i], dst->linesize[i], src->data[i], src->linesize[i], width, height);
}
}
return 0;
}
#endif /* WITH_FFMPEG */
bool IMB_isanim(const char *filepath)
{
BLI_assert(!BLI_path_is_rel(filepath));
#ifdef WITH_FFMPEG
if (isffmpeg(filepath)) {
return true;
}
#endif
return false;
}
void IMB_ffmpeg_init()
{
#ifdef WITH_FFMPEG
avdevice_register_all();
ffmpeg_last_error_buffer[0] = '\0';
if (G.debug & G_DEBUG_FFMPEG) {
av_log_set_level(AV_LOG_DEBUG);
}
/* set separate callback which could store last error to report to UI */
av_log_set_callback(ffmpeg_log_callback);
#endif
}
void IMB_ffmpeg_exit()
{
#ifdef WITH_FFMPEG
ffmpeg_sws_exit();
#endif
}
int IMB_ffmpeg_valid_bit_depths(int av_codec_id)
{
int bit_depths = R_IMF_CHAN_DEPTH_8;
#ifdef WITH_FFMPEG
/* Note: update properties_output.py `use_bpp` when changing this function. */
if (ELEM(av_codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_H265, AV_CODEC_ID_AV1)) {
bit_depths |= R_IMF_CHAN_DEPTH_10;
}
if (ELEM(av_codec_id, AV_CODEC_ID_H265, AV_CODEC_ID_AV1)) {
bit_depths |= R_IMF_CHAN_DEPTH_12;
}
#else
UNUSED_VARS(av_codec_id);
#endif
return bit_depths;
}
#ifdef WITH_FFMPEG
static void ffmpeg_preset_set(RenderData *rd, int preset)
{
bool is_ntsc = (rd->frs_sec != 25);
switch (preset) {
case FFMPEG_PRESET_H264:
rd->ffcodecdata.type = FFMPEG_AVI;
rd->ffcodecdata.codec = AV_CODEC_ID_H264;
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
case FFMPEG_PRESET_THEORA:
case FFMPEG_PRESET_XVID:
if (preset == FFMPEG_PRESET_XVID) {
rd->ffcodecdata.type = FFMPEG_AVI;
rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
}
else if (preset == FFMPEG_PRESET_THEORA) {
rd->ffcodecdata.type = FFMPEG_OGG; /* XXX broken */
rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
}
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
case FFMPEG_PRESET_AV1:
rd->ffcodecdata.type = FFMPEG_AV1;
rd->ffcodecdata.codec = AV_CODEC_ID_AV1;
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
}
}
#endif
void IMB_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf)
{
#ifdef WITH_FFMPEG
int audio = 0;
if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
if (rd->ffcodecdata.type <= 0 || rd->ffcodecdata.codec <= 0 ||
rd->ffcodecdata.audio_codec <= 0 || rd->ffcodecdata.video_bitrate <= 1)
{
ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
rd->ffcodecdata.constant_rate_factor = FFM_CRF_MEDIUM;
rd->ffcodecdata.ffmpeg_preset = FFM_PRESET_GOOD;
rd->ffcodecdata.type = FFMPEG_MKV;
}
if (rd->ffcodecdata.type == FFMPEG_OGG) {
rd->ffcodecdata.type = FFMPEG_MPEG2;
}
audio = 1;
}
else if (imf->imtype == R_IMF_IMTYPE_H264) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_XVID) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_AV1) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_AV1) {
ffmpeg_preset_set(rd, FFMPEG_PRESET_AV1);
audio = 1;
}
}
if (audio && rd->ffcodecdata.audio_codec < 0) {
rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
rd->ffcodecdata.audio_bitrate = 128;
}
#else
UNUSED_VARS(rd, imf);
#endif
}
bool IMB_ffmpeg_alpha_channel_is_supported(int av_codec_id)
{
#if WITH_FFMPEG
return ELEM(av_codec_id,
AV_CODEC_ID_FFV1,
AV_CODEC_ID_QTRLE,
AV_CODEC_ID_PNG,
AV_CODEC_ID_VP9,
AV_CODEC_ID_HUFFYUV);
#else
UNUSED_VARS(av_codec_id);
return false;
#endif
}
bool IMB_ffmpeg_codec_supports_crf(int av_codec_id)
{
#if WITH_FFMPEG
return ELEM(av_codec_id,
AV_CODEC_ID_H264,
AV_CODEC_ID_H265,
AV_CODEC_ID_MPEG4,
AV_CODEC_ID_VP9,
AV_CODEC_ID_AV1);
#else
UNUSED_VARS(av_codec_id);
return false;
#endif
}

View File

@@ -0,0 +1,24 @@
/* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup imbuf
*/
#ifdef WITH_FFMPEG
extern "C" {
# include <libavutil/pixfmt.h>
}
struct AVFrame;
int ffmpeg_deinterlace(
AVFrame *dst, const AVFrame *src, enum AVPixelFormat pix_fmt, int width, int height);
const char *ffmpeg_last_error();
#endif /* WITH_FFMPEG */

View File

@@ -1,4 +1,5 @@
/* SPDX-FileCopyrightText: 2011 Peter Schlaile <peter [at] schlaile [dot] de>.
* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
@@ -32,11 +33,11 @@
# include "BLI_winstuff.h"
#endif
#include "BKE_writeffmpeg.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_indexer.hh"
#include "ffmpeg_swscale.hh"
#include "movie_proxy_indexer.hh"
#include "movie_read.hh"
#ifdef WITH_FFMPEG
extern "C" {
@@ -599,13 +600,13 @@ static proxy_output_ctx *alloc_proxy_output_ffmpeg(ImBufAnim *anim,
rv->frame->height = height;
av_frame_get_buffer(rv->frame, align);
rv->sws_ctx = BKE_ffmpeg_sws_get_context(st->codecpar->width,
rv->orig_height,
AVPixelFormat(st->codecpar->format),
width,
height,
rv->c->pix_fmt,
SWS_FAST_BILINEAR);
rv->sws_ctx = ffmpeg_sws_get_context(st->codecpar->width,
rv->orig_height,
AVPixelFormat(st->codecpar->format),
width,
height,
rv->c->pix_fmt,
SWS_FAST_BILINEAR);
}
ret = avformat_write_header(rv->of, nullptr);
@@ -640,7 +641,7 @@ static void add_to_proxy_output_ffmpeg(proxy_output_ctx *ctx, AVFrame *frame)
if (ctx->sws_ctx && frame &&
(frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]))
{
BKE_ffmpeg_sws_scale_frame(ctx->sws_ctx, ctx->frame, frame);
ffmpeg_sws_scale_frame(ctx->sws_ctx, ctx->frame, frame);
}
frame = ctx->sws_ctx ? (frame ? ctx->frame : nullptr) : frame;
@@ -731,7 +732,7 @@ static void free_proxy_output_ffmpeg(proxy_output_ctx *ctx, int rollback)
avformat_free_context(ctx->of);
if (ctx->sws_ctx) {
BKE_ffmpeg_sws_release_context(ctx->sws_ctx);
ffmpeg_sws_release_context(ctx->sws_ctx);
ctx->sws_ctx = nullptr;
}
if (ctx->frame) {

View File

@@ -1,4 +1,4 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
/* SPDX-FileCopyrightText: 2023-2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
@@ -12,7 +12,8 @@
# include <io.h>
#endif
#include "IMB_anim.hh"
#include "IMB_movie_enums.hh"
#include "movie_read.hh"
#include <stdio.h>
#include <stdlib.h>
/*

View File

@@ -1,4 +1,5 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
@@ -28,18 +29,19 @@
#include "MEM_guardedalloc.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "intern/IMB_colormanagement_intern.hh"
#include "IMB_colormanagement.hh"
#include "IMB_colormanagement_intern.hh"
#include "IMB_anim.hh"
#include "IMB_indexer.hh"
#include "IMB_metadata.hh"
#include "movie_proxy_indexer.hh"
#include "movie_read.hh"
#ifdef WITH_FFMPEG
# include "BKE_writeffmpeg.hh"
# include "ffmpeg_swscale.hh"
# include "ffmpeg_util.hh"
extern "C" {
# include <libavcodec/avcodec.h>
@@ -420,14 +422,14 @@ static int startffmpeg(ImBufAnim *anim)
* the conversion is not fully accurate and introduces some banding and color
* shifts, particularly in dark regions. See issue #111703 or upstream
* ffmpeg ticket https://trac.ffmpeg.org/ticket/1582 */
anim->img_convert_ctx = BKE_ffmpeg_sws_get_context(anim->x,
anim->y,
anim->pCodecCtx->pix_fmt,
anim->x,
anim->y,
anim->pFrameRGB->format,
SWS_POINT | SWS_FULL_CHR_H_INT |
SWS_ACCURATE_RND);
anim->img_convert_ctx = ffmpeg_sws_get_context(anim->x,
anim->y,
anim->pCodecCtx->pix_fmt,
anim->x,
anim->y,
anim->pFrameRGB->format,
SWS_POINT | SWS_FULL_CHR_H_INT |
SWS_ACCURATE_RND);
if (!anim->img_convert_ctx) {
fprintf(stderr,
@@ -561,11 +563,11 @@ static void ffmpeg_postprocess(ImBufAnim *anim, AVFrame *input, ImBuf *ibuf)
input->data[3]);
if (anim->ib_flags & IB_animdeinterlace) {
if (av_image_deinterlace(anim->pFrameDeinterlaced,
anim->pFrame,
anim->pCodecCtx->pix_fmt,
anim->pCodecCtx->width,
anim->pCodecCtx->height) < 0)
if (ffmpeg_deinterlace(anim->pFrameDeinterlaced,
anim->pFrame,
anim->pCodecCtx->pix_fmt,
anim->pCodecCtx->width,
anim->pCodecCtx->height) < 0)
{
filter_y = true;
}
@@ -579,7 +581,7 @@ static void ffmpeg_postprocess(ImBufAnim *anim, AVFrame *input, ImBuf *ibuf)
* it does not support direct YUV->RGBA float interleaved conversion).
* Do vertical flip and interleave into RGBA manually. */
/* Decode, then do vertical flip into destination. */
BKE_ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
const size_t src_linesize = anim->pFrameRGB->linesize[0];
BLI_assert_msg(anim->pFrameRGB->linesize[1] == src_linesize &&
@@ -623,14 +625,14 @@ static void ffmpeg_postprocess(ImBufAnim *anim, AVFrame *input, ImBuf *ibuf)
anim->pFrameRGB->linesize[0] = -ibuf_linesize;
anim->pFrameRGB->data[0] = ibuf->byte_buffer.data + (ibuf->y - 1) * ibuf_linesize;
BKE_ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
anim->pFrameRGB->linesize[0] = rgb_linesize;
anim->pFrameRGB->data[0] = rgb_data;
}
else {
/* Decode, then do vertical flip into destination. */
BKE_ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
ffmpeg_sws_scale_frame(anim->img_convert_ctx, anim->pFrameRGB, input);
/* Use negative line size to do vertical image flip. */
const int src_linesize[4] = {-rgb_linesize, 0, 0, 0};
@@ -1187,7 +1189,7 @@ static void free_anim_ffmpeg(ImBufAnim *anim)
MEM_freeN(anim->pFrameDeinterlaced->data[0]);
}
av_frame_free(&anim->pFrameDeinterlaced);
BKE_ffmpeg_sws_release_context(anim->img_convert_ctx);
ffmpeg_sws_release_context(anim->img_convert_ctx);
}
anim->duration_in_frames = 0;
}
@@ -1322,7 +1324,7 @@ int IMB_anim_get_duration(ImBufAnim *anim, IMB_Timecode_Type tc)
return IMB_indexer_get_duration(idx);
}
double IMD_anim_get_offset(ImBufAnim *anim)
double IMB_anim_get_offset(ImBufAnim *anim)
{
return anim->start_offset;
}

View File

@@ -1,4 +1,5 @@
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
* SPDX-FileCopyrightText: 2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */

View File

@@ -1,12 +1,16 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
/* SPDX-FileCopyrightText: 2006 Peter Schlaile.
* SPDX-FileCopyrightText: 2023-2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later
* Partial Copyright 2006 Peter Schlaile. */
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup bke
* \ingroup imbuf
*/
#include "DNA_scene_types.h"
#include "IMB_movie_write.hh"
#ifdef WITH_FFMPEG
# include <cstdio>
# include <cstring>
@@ -15,8 +19,6 @@
# include "MEM_guardedalloc.h"
# include "DNA_scene_types.h"
# include "BLI_blenlib.h"
# ifdef WITH_AUDASPACE
@@ -28,17 +30,19 @@
# include "BLI_math_base.h"
# include "BLI_threads.h"
# include "BLI_utildefines.h"
# include "BLI_vector.hh"
# include "BKE_global.hh"
# include "BKE_image.hh"
# include "BKE_main.hh"
# include "BKE_report.hh"
# include "BKE_sound.h"
# include "BKE_writeffmpeg.hh"
# include "IMB_anim.hh"
# include "IMB_imbuf.hh"
# include "ffmpeg_swscale.hh"
# include "ffmpeg_util.hh"
/* This needs to be included after BLI_math_base.h otherwise it will redefine some math defines
* like M_SQRT1_2 leading to warnings with MSVC */
extern "C" {
@@ -50,34 +54,13 @@ extern "C" {
# include <libavutil/opt.h>
# include <libavutil/rational.h>
# include <libavutil/samplefmt.h>
# include <libswscale/swscale.h>
# include "ffmpeg_compat.h"
}
struct StampData;
/* libswscale context creation and destruction is expensive.
* Maintain a cache of already created contexts. */
constexpr int64_t swscale_cache_max_entries = 32;
struct SwscaleContext {
int src_width = 0, src_height = 0;
int dst_width = 0, dst_height = 0;
AVPixelFormat src_format = AV_PIX_FMT_NONE, dst_format = AV_PIX_FMT_NONE;
int flags = 0;
SwsContext *context = nullptr;
int64_t last_use_timestamp = 0;
bool is_used = false;
};
static ThreadMutex swscale_cache_lock = PTHREAD_MUTEX_INITIALIZER;
static int64_t swscale_cache_timestamp = 0;
static blender::Vector<SwscaleContext> *swscale_cache = nullptr;
struct FFMpegContext {
struct ImbMovieWriter {
int ffmpeg_type;
AVCodecID ffmpeg_codec;
AVCodecID ffmpeg_audio_codec;
@@ -126,7 +109,8 @@ struct FFMpegContext {
printf
static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value);
static void ffmpeg_filepath_get(FFMpegContext *context,
static void ffmpeg_movie_close(ImbMovieWriter *context);
static void ffmpeg_filepath_get(ImbMovieWriter *context,
char filepath[FILE_MAX],
const RenderData *rd,
bool preview,
@@ -150,7 +134,7 @@ static int request_float_audio_buffer(int codec_id)
# ifdef WITH_AUDASPACE
static int write_audio_frame(FFMpegContext *context)
static int write_audio_frame(ImbMovieWriter *context)
{
AVFrame *frame = nullptr;
AVCodecContext *c = context->audio_codec;
@@ -340,7 +324,7 @@ static const char **get_file_extensions(int format)
}
/* Write a frame to the output file */
static bool write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
static bool write_video_frame(ImbMovieWriter *context, AVFrame *frame, ReportList *reports)
{
int ret, success = 1;
AVPacket *packet = av_packet_alloc();
@@ -396,7 +380,7 @@ static bool write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList
}
/* read and encode a frame of video from the buffer */
static AVFrame *generate_video_frame(FFMpegContext *context, const ImBuf *image)
static AVFrame *generate_video_frame(ImbMovieWriter *context, const ImBuf *image)
{
const uint8_t *pixels = image->byte_buffer.data;
const float *pixels_fl = image->float_buffer.data;
@@ -482,7 +466,7 @@ static AVFrame *generate_video_frame(FFMpegContext *context, const ImBuf *image)
BLI_assert(context->img_convert_ctx != NULL);
/* Ensure the frame we are scaling to is writable as well. */
av_frame_make_writable(context->current_frame);
BKE_ffmpeg_sws_scale_frame(context->img_convert_ctx, context->current_frame, rgb_frame);
ffmpeg_sws_scale_frame(context->img_convert_ctx, context->current_frame, rgb_frame);
}
return context->current_frame;
@@ -531,7 +515,7 @@ static AVRational calc_time_base(uint den, double num, int codec_id)
}
static const AVCodec *get_av1_encoder(
FFMpegContext *context, RenderData *rd, AVDictionary **opts, int rectx, int recty)
ImbMovieWriter *context, RenderData *rd, AVDictionary **opts, int rectx, int recty)
{
/* There are three possible encoders for AV1: `libaom-av1`, librav1e, and `libsvtav1`. librav1e
* tends to give the best compression quality while `libsvtav1` tends to be the fastest encoder.
@@ -712,189 +696,6 @@ static const AVCodec *get_av1_encoder(
return codec;
}
static SwsContext *sws_create_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags)
{
# if defined(FFMPEG_SWSCALE_THREADING)
/* sws_getContext does not allow passing flags that ask for multi-threaded
* scaling context, so do it the hard way. */
SwsContext *c = sws_alloc_context();
if (c == nullptr) {
return nullptr;
}
av_opt_set_int(c, "srcw", src_width, 0);
av_opt_set_int(c, "srch", src_height, 0);
av_opt_set_int(c, "src_format", av_src_format, 0);
av_opt_set_int(c, "dstw", dst_width, 0);
av_opt_set_int(c, "dsth", dst_height, 0);
av_opt_set_int(c, "dst_format", av_dst_format, 0);
av_opt_set_int(c, "sws_flags", sws_flags, 0);
av_opt_set_int(c, "threads", BLI_system_thread_count(), 0);
if (sws_init_context(c, nullptr, nullptr) < 0) {
sws_freeContext(c);
return nullptr;
}
# else
SwsContext *c = sws_getContext(src_width,
src_height,
AVPixelFormat(av_src_format),
dst_width,
dst_height,
AVPixelFormat(av_dst_format),
sws_flags,
nullptr,
nullptr,
nullptr);
# endif
return c;
}
static void init_swscale_cache_if_needed()
{
if (swscale_cache == nullptr) {
swscale_cache = new blender::Vector<SwscaleContext>();
swscale_cache_timestamp = 0;
}
}
static bool remove_oldest_swscale_context()
{
int64_t oldest_index = -1;
int64_t oldest_time = 0;
for (int64_t index = 0; index < swscale_cache->size(); index++) {
SwscaleContext &ctx = (*swscale_cache)[index];
if (ctx.is_used) {
continue;
}
int64_t time = swscale_cache_timestamp - ctx.last_use_timestamp;
if (time > oldest_time) {
oldest_time = time;
oldest_index = index;
}
}
if (oldest_index >= 0) {
SwscaleContext &ctx = (*swscale_cache)[oldest_index];
sws_freeContext(ctx.context);
swscale_cache->remove_and_reorder(oldest_index);
return true;
}
return false;
}
static void maintain_swscale_cache_size()
{
while (swscale_cache->size() > swscale_cache_max_entries) {
if (!remove_oldest_swscale_context()) {
/* Could not remove anything (all contexts are actively used),
* stop trying. */
break;
}
}
}
SwsContext *BKE_ffmpeg_sws_get_context(int src_width,
int src_height,
int av_src_format,
int dst_width,
int dst_height,
int av_dst_format,
int sws_flags)
{
BLI_mutex_lock(&swscale_cache_lock);
init_swscale_cache_if_needed();
swscale_cache_timestamp++;
/* Search for unused context that has suitable parameters. */
SwsContext *ctx = nullptr;
for (SwscaleContext &c : *swscale_cache) {
if (!c.is_used && c.src_width == src_width && c.src_height == src_height &&
c.src_format == av_src_format && c.dst_width == dst_width && c.dst_height == dst_height &&
c.dst_format == av_dst_format && c.flags == sws_flags)
{
ctx = c.context;
/* Mark as used. */
c.is_used = true;
c.last_use_timestamp = swscale_cache_timestamp;
break;
}
}
if (ctx == nullptr) {
/* No free matching context in cache: create a new one. */
ctx = sws_create_context(
src_width, src_height, av_src_format, dst_width, dst_height, av_dst_format, sws_flags);
SwscaleContext c;
c.src_width = src_width;
c.src_height = src_height;
c.dst_width = dst_width;
c.dst_height = dst_height;
c.src_format = AVPixelFormat(av_src_format);
c.dst_format = AVPixelFormat(av_dst_format);
c.flags = sws_flags;
c.context = ctx;
c.is_used = true;
c.last_use_timestamp = swscale_cache_timestamp;
swscale_cache->append(c);
maintain_swscale_cache_size();
}
BLI_mutex_unlock(&swscale_cache_lock);
return ctx;
}
void BKE_ffmpeg_sws_release_context(SwsContext *ctx)
{
BLI_mutex_lock(&swscale_cache_lock);
init_swscale_cache_if_needed();
bool found = false;
for (SwscaleContext &c : *swscale_cache) {
if (c.context == ctx) {
BLI_assert_msg(c.is_used, "Releasing ffmpeg swscale context that is not in use");
c.is_used = false;
found = true;
break;
}
}
BLI_assert_msg(found, "Releasing ffmpeg swscale context that is not in cache");
UNUSED_VARS_NDEBUG(found);
maintain_swscale_cache_size();
BLI_mutex_unlock(&swscale_cache_lock);
}
void BKE_ffmpeg_exit()
{
BLI_mutex_lock(&swscale_cache_lock);
if (swscale_cache != nullptr) {
for (SwscaleContext &c : *swscale_cache) {
sws_freeContext(c.context);
}
delete swscale_cache;
swscale_cache = nullptr;
}
BLI_mutex_unlock(&swscale_cache_lock);
}
void BKE_ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src)
{
# if defined(FFMPEG_SWSCALE_THREADING)
sws_scale_frame(ctx, dst, src);
# else
sws_scale(ctx, src->data, src->linesize, 0, src->height, dst->data, dst->linesize);
# endif
}
/* Remap H.264 CRF to H.265 CRF: 17..32 range (23 default) to 20..37 range (28 default).
* https://trac.ffmpeg.org/wiki/Encode/H.265 */
static int remap_crf_to_h265_crf(int crf, bool is_10_or_12_bpp)
@@ -927,7 +728,7 @@ static int remap_crf_to_h264_10bpp_crf(int crf)
return crf;
}
static void set_quality_rate_options(const FFMpegContext *context,
static void set_quality_rate_options(const ImbMovieWriter *context,
const AVCodecID codec_id,
const RenderData *rd,
AVDictionary **opts)
@@ -935,7 +736,7 @@ static void set_quality_rate_options(const FFMpegContext *context,
AVCodecContext *c = context->video_codec;
/* Handle constant bit rate (CBR) case. */
if (!BKE_ffmpeg_codec_supports_crf(codec_id) || context->ffmpeg_crf < 0) {
if (!IMB_ffmpeg_codec_supports_crf(codec_id) || context->ffmpeg_crf < 0) {
c->bit_rate = context->ffmpeg_video_bitrate * 1000;
c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
@@ -1006,7 +807,7 @@ static void set_quality_rate_options(const FFMpegContext *context,
/* prepare a video stream for the output file */
static AVStream *alloc_video_stream(FFMpegContext *context,
static AVStream *alloc_video_stream(ImbMovieWriter *context,
RenderData *rd,
AVCodecID codec_id,
AVFormatContext *of,
@@ -1234,7 +1035,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
char error_str[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Couldn't initialize video codec: %s\n", error_str);
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
BLI_strncpy(error, ffmpeg_last_error(), error_size);
av_dict_free(&opts);
avcodec_free_context(&c);
context->video_codec = nullptr;
@@ -1254,7 +1055,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
/* Output pixel format is different, allocate frame for conversion. */
AVPixelFormat src_format = is_10_bpp || is_12_bpp ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_RGBA;
context->img_convert_frame = alloc_picture(src_format, c->width, c->height);
context->img_convert_ctx = BKE_ffmpeg_sws_get_context(
context->img_convert_ctx = ffmpeg_sws_get_context(
c->width, c->height, src_format, c->width, c->height, c->pix_fmt, SWS_BICUBIC);
/* Setup BT.709 coefficients for RGB->YUV conversion, if needed. */
@@ -1288,7 +1089,7 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
return st;
}
static AVStream *alloc_audio_stream(FFMpegContext *context,
static AVStream *alloc_audio_stream(ImbMovieWriter *context,
RenderData *rd,
AVCodecID codec_id,
AVFormatContext *of,
@@ -1399,7 +1200,7 @@ static AVStream *alloc_audio_stream(FFMpegContext *context,
char error_str[AV_ERROR_MAX_STRING_SIZE];
av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
fprintf(stderr, "Couldn't initialize audio codec: %s\n", error_str);
BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
BLI_strncpy(error, ffmpeg_last_error(), error_size);
avcodec_free_context(&c);
context->audio_codec = nullptr;
return nullptr;
@@ -1458,7 +1259,7 @@ static void ffmpeg_add_metadata_callback(void *data,
av_dict_set(metadata, propname, propvalue, 0);
}
static bool start_ffmpeg_impl(FFMpegContext *context,
static bool start_ffmpeg_impl(ImbMovieWriter *context,
RenderData *rd,
int rectx,
int recty,
@@ -1733,7 +1534,7 @@ static void flush_ffmpeg(AVCodecContext *c, AVStream *stream, AVFormatContext *o
* ********************************************************************** */
/* Get the output filename-- similar to the other output formats */
static void ffmpeg_filepath_get(FFMpegContext *context,
static void ffmpeg_filepath_get(ImbMovieWriter *context,
char filepath[FILE_MAX],
const RenderData *rd,
bool preview,
@@ -1802,30 +1603,40 @@ static void ffmpeg_filepath_get(FFMpegContext *context,
BLI_path_suffix(filepath, FILE_MAX, suffix, "");
}
void BKE_ffmpeg_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix)
static void ffmpeg_get_filepath(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix)
{
ffmpeg_filepath_get(nullptr, filepath, rd, preview, suffix);
}
bool BKE_ffmpeg_start(void *context_v,
const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix)
static ImbMovieWriter *ffmpeg_movie_open(const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix)
{
FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
ImbMovieWriter *context = static_cast<ImbMovieWriter *>(
MEM_callocN(sizeof(ImbMovieWriter), "new FFMPEG context"));
context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
context->ffmpeg_video_bitrate = 1150;
context->ffmpeg_audio_bitrate = 128;
context->ffmpeg_gop_size = 12;
context->ffmpeg_autosplit = 0;
context->stamp_data = nullptr;
context->audio_time_total = 0.0;
context->ffmpeg_autosplit_count = 0;
context->ffmpeg_preview = preview;
context->stamp_data = BKE_stamp_info_from_scene_static(scene);
bool success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
# ifdef WITH_AUDASPACE
if (context->audio_stream) {
AVCodecContext *c = context->audio_codec;
@@ -1854,21 +1665,29 @@ bool BKE_ffmpeg_start(void *context_v,
specs.format = AUD_FORMAT_FLOAT64;
break;
default:
return -31415;
success = false;
break;
}
specs.rate = rd->ffcodecdata.audio_mixrate;
context->audio_mixdown_device = BKE_sound_mixdown(
scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
if (success) {
context->audio_mixdown_device = BKE_sound_mixdown(
scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
}
}
# endif
return success;
if (!success) {
ffmpeg_movie_close(context);
return nullptr;
}
return context;
}
static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit);
static void end_ffmpeg_impl(ImbMovieWriter *context, int is_autosplit);
# ifdef WITH_AUDASPACE
static void write_audio_frames(FFMpegContext *context, double to_pts)
static void write_audio_frames(ImbMovieWriter *context, double to_pts)
{
AVCodecContext *c = context->audio_codec;
@@ -1882,15 +1701,14 @@ static void write_audio_frames(FFMpegContext *context, double to_pts)
}
# endif
bool BKE_ffmpeg_append(void *context_v,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports)
static bool ffmpeg_movie_append(ImbMovieWriter *context,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports)
{
FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
AVFrame *avframe;
bool success = true;
@@ -1920,7 +1738,7 @@ bool BKE_ffmpeg_append(void *context_v,
return success;
}
static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
static void end_ffmpeg_impl(ImbMovieWriter *context, int is_autosplit)
{
PRINT("Closing FFMPEG...\n");
@@ -2000,179 +1818,17 @@ static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
}
if (context->img_convert_ctx != nullptr) {
BKE_ffmpeg_sws_release_context(context->img_convert_ctx);
ffmpeg_sws_release_context(context->img_convert_ctx);
context->img_convert_ctx = nullptr;
}
}
void BKE_ffmpeg_end(void *context_v)
static void ffmpeg_movie_close(ImbMovieWriter *context)
{
FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
end_ffmpeg_impl(context, false);
}
void BKE_ffmpeg_preset_set(RenderData *rd, int preset)
{
bool is_ntsc = (rd->frs_sec != 25);
switch (preset) {
case FFMPEG_PRESET_H264:
rd->ffcodecdata.type = FFMPEG_AVI;
rd->ffcodecdata.codec = AV_CODEC_ID_H264;
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
case FFMPEG_PRESET_THEORA:
case FFMPEG_PRESET_XVID:
if (preset == FFMPEG_PRESET_XVID) {
rd->ffcodecdata.type = FFMPEG_AVI;
rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
}
else if (preset == FFMPEG_PRESET_THEORA) {
rd->ffcodecdata.type = FFMPEG_OGG; /* XXX broken */
rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
}
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
case FFMPEG_PRESET_AV1:
rd->ffcodecdata.type = FFMPEG_AV1;
rd->ffcodecdata.codec = AV_CODEC_ID_AV1;
rd->ffcodecdata.video_bitrate = 6000;
rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
rd->ffcodecdata.rc_max_rate = 9000;
rd->ffcodecdata.rc_min_rate = 0;
rd->ffcodecdata.rc_buffer_size = 224 * 8;
rd->ffcodecdata.mux_packet_size = 2048;
rd->ffcodecdata.mux_rate = 10080000;
break;
}
}
void BKE_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf)
{
int audio = 0;
if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
if (rd->ffcodecdata.type <= 0 || rd->ffcodecdata.codec <= 0 ||
rd->ffcodecdata.audio_codec <= 0 || rd->ffcodecdata.video_bitrate <= 1)
{
BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
rd->ffcodecdata.constant_rate_factor = FFM_CRF_MEDIUM;
rd->ffcodecdata.ffmpeg_preset = FFM_PRESET_GOOD;
rd->ffcodecdata.type = FFMPEG_MKV;
}
if (rd->ffcodecdata.type == FFMPEG_OGG) {
rd->ffcodecdata.type = FFMPEG_MPEG2;
}
audio = 1;
}
else if (imf->imtype == R_IMF_IMTYPE_H264) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_XVID) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
audio = 1;
}
}
else if (imf->imtype == R_IMF_IMTYPE_AV1) {
if (rd->ffcodecdata.codec != AV_CODEC_ID_AV1) {
BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_AV1);
audio = 1;
}
}
if (audio && rd->ffcodecdata.audio_codec < 0) {
rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
rd->ffcodecdata.audio_bitrate = 128;
}
}
bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
{
int codec = rd->ffcodecdata.codec;
return ELEM(codec,
AV_CODEC_ID_FFV1,
AV_CODEC_ID_QTRLE,
AV_CODEC_ID_PNG,
AV_CODEC_ID_VP9,
AV_CODEC_ID_HUFFYUV);
}
bool BKE_ffmpeg_codec_supports_crf(int av_codec_id)
{
return ELEM(av_codec_id,
AV_CODEC_ID_H264,
AV_CODEC_ID_H265,
AV_CODEC_ID_MPEG4,
AV_CODEC_ID_VP9,
AV_CODEC_ID_AV1);
}
int BKE_ffmpeg_valid_bit_depths(int av_codec_id)
{
int bit_depths = R_IMF_CHAN_DEPTH_8;
/* Note: update properties_output.py `use_bpp` when changing this function. */
if (ELEM(av_codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_H265, AV_CODEC_ID_AV1)) {
bit_depths |= R_IMF_CHAN_DEPTH_10;
}
if (ELEM(av_codec_id, AV_CODEC_ID_H265, AV_CODEC_ID_AV1)) {
bit_depths |= R_IMF_CHAN_DEPTH_12;
}
return bit_depths;
}
void *BKE_ffmpeg_context_create()
{
/* New FFMPEG data struct. */
FFMpegContext *context = static_cast<FFMpegContext *>(
MEM_callocN(sizeof(FFMpegContext), "new FFMPEG context"));
context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
context->ffmpeg_video_bitrate = 1150;
context->ffmpeg_audio_bitrate = 128;
context->ffmpeg_gop_size = 12;
context->ffmpeg_autosplit = 0;
context->ffmpeg_autosplit_count = 0;
context->ffmpeg_preview = false;
context->stamp_data = nullptr;
context->audio_time_total = 0.0;
return context;
}
void BKE_ffmpeg_context_free(void *context_v)
{
FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
if (context == nullptr) {
return;
}
end_ffmpeg_impl(context, false);
if (context->stamp_data) {
MEM_freeN(context->stamp_data);
}
@@ -2180,3 +1836,85 @@ void BKE_ffmpeg_context_free(void *context_v)
}
#endif /* WITH_FFMPEG */
static bool is_imtype_ffmpeg(const char imtype)
{
return ELEM(imtype,
R_IMF_IMTYPE_AVIRAW,
R_IMF_IMTYPE_AVIJPEG,
R_IMF_IMTYPE_FFMPEG,
R_IMF_IMTYPE_H264,
R_IMF_IMTYPE_XVID,
R_IMF_IMTYPE_THEORA,
R_IMF_IMTYPE_AV1);
}
ImbMovieWriter *IMB_movie_write_begin(const char imtype,
const Scene *scene,
RenderData *rd,
int rectx,
int recty,
ReportList *reports,
bool preview,
const char *suffix)
{
if (!is_imtype_ffmpeg(imtype)) {
return nullptr;
}
ImbMovieWriter *writer = nullptr;
#ifdef WITH_FFMPEG
writer = ffmpeg_movie_open(scene, rd, rectx, recty, reports, preview, suffix);
#else
UNUSED_VARS(scene, rd, rectx, recty, reports, preview, suffix);
#endif
return writer;
}
bool IMB_movie_write_append(ImbMovieWriter *writer,
RenderData *rd,
int start_frame,
int frame,
const ImBuf *image,
const char *suffix,
ReportList *reports)
{
if (writer == nullptr) {
return false;
}
#ifdef WITH_FFMPEG
bool ok = ffmpeg_movie_append(writer, rd, start_frame, frame, image, suffix, reports);
return ok;
#else
UNUSED_VARS(rd, start_frame, frame, image, suffix, reports);
return false;
#endif
}
void IMB_movie_write_end(ImbMovieWriter *writer)
{
#ifdef WITH_FFMPEG
if (writer) {
ffmpeg_movie_close(writer);
}
#else
UNUSED_VARS(writer);
#endif
}
void IMB_movie_filepath_get(char filepath[/*FILE_MAX*/ 1024],
const RenderData *rd,
bool preview,
const char *suffix)
{
#ifdef WITH_FFMPEG
if (is_imtype_ffmpeg(rd->im_format.imtype)) {
ffmpeg_get_filepath(filepath, rd, preview, suffix);
return;
}
#else
UNUSED_VARS(rd, preview, suffix);
#endif
filepath[0] = '\0';
}

View File

@@ -1,4 +1,4 @@
/* SPDX-FileCopyrightText: 2020-2023 Blender Authors
/* SPDX-FileCopyrightText: 2020-2024 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */

View File

@@ -7,6 +7,7 @@ set(INC
PUBLIC ${CMAKE_CURRENT_BINARY_DIR}
../../blenlib
../../imbuf
../../imbuf/movie
)
set(INC_SYS

View File

@@ -74,6 +74,7 @@
#include "BLI_utildefines.h"
#include "IMB_imbuf_enums.h"
#include "movie/IMB_movie_enums.hh"
#include "DNA_defaults.h"

View File

@@ -249,6 +249,7 @@ set(INC
../../gpu
../../ikplugin
../../imbuf
../../imbuf/movie
../../io/usd
../../modifiers
../../nodes
@@ -315,15 +316,6 @@ if(WITH_AUDASPACE)
endif()
if(WITH_CODEC_FFMPEG)
list(APPEND INC
../../../../intern/ffmpeg
)
list(APPEND INC_SYS
${FFMPEG_INCLUDE_DIRS}
)
list(APPEND LIB
${FFMPEG_LIBRARIES}
)
add_definitions(-DWITH_FFMPEG)
endif()

View File

@@ -63,6 +63,7 @@ const EnumPropertyItem rna_enum_color_space_convert_default_items[] = {
# include "ED_node.hh"
# include "IMB_anim.hh"
# include "IMB_colormanagement.hh"
# include "IMB_imbuf.hh"

View File

@@ -26,6 +26,8 @@
#include "RNA_define.hh"
#include "RNA_enum_types.hh"
#include "IMB_anim.hh"
#include "rna_internal.hh"
#include "WM_api.hh"

View File

@@ -27,6 +27,7 @@
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_metadata.hh"
#include "IMB_movie_enums.hh"
#ifdef RNA_RUNTIME

View File

@@ -53,12 +53,7 @@
#include "RE_engine.h"
#include "RE_pipeline.h"
#ifdef WITH_FFMPEG
# include "BKE_writeffmpeg.hh"
# include "ffmpeg_compat.h"
# include <libavcodec/avcodec.h>
# include <libavformat/avformat.h>
#endif
#include "IMB_anim.hh"
#include "ED_render.hh"
#include "ED_transform.hh"
@@ -1349,10 +1344,7 @@ static void rna_ImageFormatSettings_file_format_set(PointerRNA *ptr, int value)
if (id && GS(id->name) == ID_SCE) {
Scene *scene = (Scene *)ptr->owner_id;
RenderData *rd = &scene->r;
# ifdef WITH_FFMPEG
BKE_ffmpeg_image_type_verify(rd, imf);
# endif
(void)rd;
IMB_ffmpeg_image_type_verify(rd, imf);
}
BKE_image_format_update_color_space_for_type(imf);
@@ -1388,7 +1380,6 @@ static const EnumPropertyItem *rna_ImageFormatSettings_color_mode_itemf(bContext
char chan_flag = BKE_imtype_valid_channels(imf->imtype, true) |
(is_render ? IMA_CHAN_FLAG_BW : 0);
# ifdef WITH_FFMPEG
/* a WAY more crappy case than B&W flag: depending on codec, file format MIGHT support
* alpha channel. for example MPEG format with h264 codec can't do alpha channel, but
* the same MPEG format with QTRLE codec can easily handle alpha channel.
@@ -1397,11 +1388,10 @@ static const EnumPropertyItem *rna_ImageFormatSettings_color_mode_itemf(bContext
Scene *scene = (Scene *)ptr->owner_id;
RenderData *rd = &scene->r;
if (BKE_ffmpeg_alpha_channel_is_supported(rd)) {
if (IMB_ffmpeg_alpha_channel_is_supported(rd->ffcodecdata.codec)) {
chan_flag |= IMA_CHAN_FLAG_RGBA;
}
}
# endif
if (chan_flag == (IMA_CHAN_FLAG_BW | IMA_CHAN_FLAG_RGB | IMA_CHAN_FLAG_RGBA)) {
return rna_enum_image_color_mode_items;
@@ -2947,7 +2937,7 @@ static std::optional<std::string> rna_FFmpegSettings_path(const PointerRNA * /*p
static void rna_FFmpegSettings_codec_update(Main * /*bmain*/, Scene * /*scene*/, PointerRNA *ptr)
{
FFMpegCodecData *codec_data = (FFMpegCodecData *)ptr->data;
if (!BKE_ffmpeg_codec_supports_crf(codec_data->codec)) {
if (!IMB_ffmpeg_codec_supports_crf(codec_data->codec)) {
/* Constant Rate Factor (CRF) setting is only available for some codecs. Change encoder quality
* mode to CBR for others. */
codec_data->constant_rate_factor = FFM_CRF_NONE;
@@ -6426,24 +6416,28 @@ static void rna_def_scene_ffmpeg_settings(BlenderRNA *brna)
};
static const EnumPropertyItem ffmpeg_codec_items[] = {
{AV_CODEC_ID_NONE, "NONE", 0, "No Video", "Disables video output, for audio-only renders"},
{AV_CODEC_ID_AV1, "AV1", 0, "AV1", ""},
{AV_CODEC_ID_H264, "H264", 0, "H.264", ""},
{AV_CODEC_ID_H265, "H265", 0, "H.265 / HEVC", ""},
{AV_CODEC_ID_VP9, "WEBM", 0, "WebM / VP9", ""},
{FFMPEG_CODEC_ID_NONE,
"NONE",
0,
"No Video",
"Disables video output, for audio-only renders"},
{FFMPEG_CODEC_ID_AV1, "AV1", 0, "AV1", ""},
{FFMPEG_CODEC_ID_H264, "H264", 0, "H.264", ""},
{FFMPEG_CODEC_ID_H265, "H265", 0, "H.265 / HEVC", ""},
{FFMPEG_CODEC_ID_VP9, "WEBM", 0, "WebM / VP9", ""},
/* Legacy / rare codecs. */
RNA_ENUM_ITEM_SEPR,
{AV_CODEC_ID_DNXHD, "DNXHD", 0, "DNxHD", ""},
{AV_CODEC_ID_DVVIDEO, "DV", 0, "DV", ""},
{AV_CODEC_ID_FFV1, "FFV1", 0, "FFmpeg video codec #1", ""},
{AV_CODEC_ID_FLV1, "FLASH", 0, "Flash Video", ""},
{AV_CODEC_ID_HUFFYUV, "HUFFYUV", 0, "HuffYUV", ""},
{AV_CODEC_ID_MPEG1VIDEO, "MPEG1", 0, "MPEG-1", ""},
{AV_CODEC_ID_MPEG2VIDEO, "MPEG2", 0, "MPEG-2", ""},
{AV_CODEC_ID_MPEG4, "MPEG4", 0, "MPEG-4 (divx)", ""},
{AV_CODEC_ID_PNG, "PNG", 0, "PNG", ""},
{AV_CODEC_ID_QTRLE, "QTRLE", 0, "QuickTime Animation", ""},
{AV_CODEC_ID_THEORA, "THEORA", 0, "Theora", ""},
{FFMPEG_CODEC_ID_DNXHD, "DNXHD", 0, "DNxHD", ""},
{FFMPEG_CODEC_ID_DVVIDEO, "DV", 0, "DV", ""},
{FFMPEG_CODEC_ID_FFV1, "FFV1", 0, "FFmpeg video codec #1", ""},
{FFMPEG_CODEC_ID_FLV1, "FLASH", 0, "Flash Video", ""},
{FFMPEG_CODEC_ID_HUFFYUV, "HUFFYUV", 0, "HuffYUV", ""},
{FFMPEG_CODEC_ID_MPEG1VIDEO, "MPEG1", 0, "MPEG-1", ""},
{FFMPEG_CODEC_ID_MPEG2VIDEO, "MPEG2", 0, "MPEG-2", ""},
{FFMPEG_CODEC_ID_MPEG4, "MPEG4", 0, "MPEG-4 (divx)", ""},
{FFMPEG_CODEC_ID_PNG, "PNG", 0, "PNG", ""},
{FFMPEG_CODEC_ID_QTRLE, "QTRLE", 0, "QuickTime Animation", ""},
{FFMPEG_CODEC_ID_THEORA, "THEORA", 0, "Theora", ""},
{0, nullptr, 0, nullptr, nullptr},
};
@@ -6478,15 +6472,19 @@ static void rna_def_scene_ffmpeg_settings(BlenderRNA *brna)
};
static const EnumPropertyItem ffmpeg_audio_codec_items[] = {
{AV_CODEC_ID_NONE, "NONE", 0, "No Audio", "Disables audio output, for video-only renders"},
{AV_CODEC_ID_AAC, "AAC", 0, "AAC", ""},
{AV_CODEC_ID_AC3, "AC3", 0, "AC3", ""},
{AV_CODEC_ID_FLAC, "FLAC", 0, "FLAC", ""},
{AV_CODEC_ID_MP2, "MP2", 0, "MP2", ""},
{AV_CODEC_ID_MP3, "MP3", 0, "MP3", ""},
{AV_CODEC_ID_OPUS, "OPUS", 0, "Opus", ""},
{AV_CODEC_ID_PCM_S16LE, "PCM", 0, "PCM", ""},
{AV_CODEC_ID_VORBIS, "VORBIS", 0, "Vorbis", ""},
{FFMPEG_CODEC_ID_NONE,
"NONE",
0,
"No Audio",
"Disables audio output, for video-only renders"},
{FFMPEG_CODEC_ID_AAC, "AAC", 0, "AAC", ""},
{FFMPEG_CODEC_ID_AC3, "AC3", 0, "AC3", ""},
{FFMPEG_CODEC_ID_FLAC, "FLAC", 0, "FLAC", ""},
{FFMPEG_CODEC_ID_MP2, "MP2", 0, "MP2", ""},
{FFMPEG_CODEC_ID_MP3, "MP3", 0, "MP3", ""},
{FFMPEG_CODEC_ID_OPUS, "OPUS", 0, "Opus", ""},
{FFMPEG_CODEC_ID_PCM_S16LE, "PCM", 0, "PCM", ""},
{FFMPEG_CODEC_ID_VORBIS, "VORBIS", 0, "Vorbis", ""},
{0, nullptr, 0, nullptr, nullptr},
};
# endif
@@ -6525,7 +6523,7 @@ static void rna_def_scene_ffmpeg_settings(BlenderRNA *brna)
RNA_def_property_enum_bitflag_sdna(prop, nullptr, "codec");
RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
RNA_def_property_enum_items(prop, ffmpeg_codec_items);
RNA_def_property_enum_default(prop, AV_CODEC_ID_H264);
RNA_def_property_enum_default(prop, FFMPEG_CODEC_ID_H264);
RNA_def_property_ui_text(prop, "Video Codec", "FFmpeg codec to use for video output");
RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, "rna_FFmpegSettings_codec_update");

View File

@@ -34,7 +34,6 @@
# include "BKE_global.hh"
# include "BKE_image.hh"
# include "BKE_scene.hh"
# include "BKE_writemovie.hh"
# include "DEG_depsgraph_query.hh"
@@ -42,6 +41,8 @@
# include "ED_transform_snap_object_context.hh"
# include "ED_uvedit.hh"
# include "IMB_movie_write.hh"
# ifdef WITH_PYTHON
# include "BPY_extern.hh"
# endif
@@ -114,7 +115,7 @@ static void rna_SceneRender_get_frame_path(
}
if (BKE_imtype_is_movie(rd->im_format.imtype)) {
BKE_movie_filepath_get(filepath, rd, preview != 0, suffix);
IMB_movie_filepath_get(filepath, rd, preview != 0, suffix);
}
else {
BKE_image_path_from_imformat(filepath,

View File

@@ -127,6 +127,7 @@ const EnumPropertyItem rna_enum_strip_color_items[] = {
# include "DEG_depsgraph.hh"
# include "DEG_depsgraph_build.hh"
# include "IMB_anim.hh"
# include "IMB_imbuf.hh"
# include "SEQ_edit.hh"

View File

@@ -16,6 +16,7 @@ set(INC
../../geometry
../../gpu
../../imbuf
../../imbuf/movie
../../makesrna
../../modifiers
../../render

View File

@@ -4,6 +4,7 @@
#include "BKE_image.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -15,6 +15,7 @@ set(INC
../gpu
../gpu/intern
../imbuf
../imbuf/movie
../makesrna
../nodes
../sequencer

View File

@@ -18,6 +18,7 @@ struct GPUTexture;
struct ImBuf;
struct Image;
struct ImageFormatData;
struct ImbMovieWriter;
struct Main;
struct Object;
struct RenderData;
@@ -26,7 +27,6 @@ struct ReportList;
struct Scene;
struct StampData;
struct ViewLayer;
struct bMovieHandle;
#ifdef __cplusplus
extern "C" {
@@ -344,8 +344,7 @@ bool RE_WriteRenderViewsMovie(struct ReportList *reports,
struct RenderResult *rr,
struct Scene *scene,
struct RenderData *rd,
struct bMovieHandle *mh,
void **movie_ctx_arr,
struct ImbMovieWriter **movie_writers,
int totvideos,
bool preview);

View File

@@ -63,7 +63,6 @@
#include "BKE_report.hh"
#include "BKE_scene.hh"
#include "BKE_sound.h"
#include "BKE_writemovie.hh"
#include "NOD_composite.hh"
@@ -78,6 +77,7 @@
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_metadata.hh"
#include "IMB_movie_write.hh"
#include "RE_engine.h"
#include "RE_pipeline.h"
@@ -167,12 +167,8 @@ static void render_callback_exec_id(Render *re, Main *bmain, ID *id, eCbEvent ev
/** \name Allocation & Free
* \{ */
static bool do_write_image_or_movie(Render *re,
Main *bmain,
Scene *scene,
bMovieHandle *mh,
const int totvideos,
const char *filepath_override);
static bool do_write_image_or_movie(
Render *re, Main *bmain, Scene *scene, const int totvideos, const char *filepath_override);
/* default callbacks, set in each new render */
static void result_nothing(void * /*arg*/, RenderResult * /*rr*/) {}
@@ -2053,9 +2049,7 @@ void RE_RenderFrame(Render *re,
(rd.scemode & R_EXTENSION) != 0,
false,
nullptr);
/* reports only used for Movie */
do_write_image_or_movie(re, bmain, scene, nullptr, 0, filepath_override);
do_write_image_or_movie(re, bmain, scene, 0, filepath_override);
}
}
@@ -2152,8 +2146,7 @@ bool RE_WriteRenderViewsMovie(ReportList *reports,
RenderResult *rr,
Scene *scene,
RenderData *rd,
bMovieHandle *mh,
void **movie_ctx_arr,
ImbMovieWriter **movie_writers,
const int totvideos,
bool preview)
{
@@ -2177,13 +2170,14 @@ bool RE_WriteRenderViewsMovie(ReportList *reports,
IMB_colormanagement_imbuf_for_write(ibuf, true, false, &image_format);
if (!mh->append_movie(movie_ctx_arr[view_id],
rd,
preview ? scene->r.psfra : scene->r.sfra,
scene->r.cfra,
ibuf,
suffix,
reports))
BLI_assert(movie_writers[view_id] != nullptr);
if (!IMB_movie_write_append(movie_writers[view_id],
rd,
preview ? scene->r.psfra : scene->r.sfra,
scene->r.cfra,
ibuf,
suffix,
reports))
{
ok = false;
}
@@ -2211,13 +2205,14 @@ bool RE_WriteRenderViewsMovie(ReportList *reports,
ibuf_arr[2] = IMB_stereo3d_ImBuf(&image_format, ibuf_arr[0], ibuf_arr[1]);
if (!mh->append_movie(movie_ctx_arr[0],
rd,
preview ? scene->r.psfra : scene->r.sfra,
scene->r.cfra,
ibuf_arr[2],
"",
reports))
BLI_assert(movie_writers[0] != nullptr);
if (!IMB_movie_write_append(movie_writers[0],
rd,
preview ? scene->r.psfra : scene->r.sfra,
scene->r.cfra,
ibuf_arr[2],
"",
reports))
{
ok = false;
}
@@ -2233,12 +2228,8 @@ bool RE_WriteRenderViewsMovie(ReportList *reports,
return ok;
}
static bool do_write_image_or_movie(Render *re,
Main *bmain,
Scene *scene,
bMovieHandle *mh,
const int totvideos,
const char *filepath_override)
static bool do_write_image_or_movie(
Render *re, Main *bmain, Scene *scene, const int totvideos, const char *filepath_override)
{
char filepath[FILE_MAX];
RenderResult rres;
@@ -2256,7 +2247,7 @@ static bool do_write_image_or_movie(Render *re,
/* write movie or image */
if (BKE_imtype_is_movie(scene->r.im_format.imtype)) {
RE_WriteRenderViewsMovie(
re->reports, &rres, scene, &re->r, mh, re->movie_ctx_arr, totvideos, false);
re->reports, &rres, scene, &re->r, re->movie_writers.data(), totvideos, false);
}
else {
if (filepath_override) {
@@ -2334,16 +2325,12 @@ static void get_videos_dimensions(const Render *re,
BKE_scene_multiview_videos_dimensions_get(rd, width, height, r_width, r_height);
}
static void re_movie_free_all(Render *re, bMovieHandle *mh, int totvideos)
static void re_movie_free_all(Render *re)
{
int i;
for (i = 0; i < totvideos; i++) {
mh->end_movie(re->movie_ctx_arr[i]);
mh->context_free(re->movie_ctx_arr[i]);
for (ImbMovieWriter *writer : re->movie_writers) {
IMB_movie_write_end(writer);
}
MEM_SAFE_FREE(re->movie_ctx_arr);
re->movie_writers.clear_and_shrink();
}
void RE_RenderAnim(Render *re,
@@ -2361,7 +2348,6 @@ void RE_RenderAnim(Render *re,
RenderData rd;
memcpy(&rd, &scene->r, sizeof(rd));
bMovieHandle *mh = nullptr;
const int cfra_old = rd.cfra;
const float subframe_old = rd.subframe;
int nfra, totrendered = 0, totskipped = 0;
@@ -2385,42 +2371,30 @@ void RE_RenderAnim(Render *re,
if (is_movie && do_write_file) {
size_t width, height;
int i;
bool is_error = false;
get_videos_dimensions(re, &rd, &width, &height);
mh = BKE_movie_handle_get(rd.im_format.imtype);
if (mh == nullptr) {
render_pipeline_free(re);
BKE_report(re->reports, RPT_ERROR, "Movie format unsupported");
return;
}
re->movie_ctx_arr = MEM_cnew_array<void *>(totvideos, "Movies' Context");
for (i = 0; i < totvideos; i++) {
bool is_error = false;
re->movie_writers.reserve(totvideos);
for (int i = 0; i < totvideos; i++) {
const char *suffix = BKE_scene_multiview_view_id_suffix_get(&re->r, i);
re->movie_ctx_arr[i] = mh->context_create();
if (!mh->start_movie(re->movie_ctx_arr[i],
re->pipeline_scene_eval,
&re->r,
width,
height,
re->reports,
false,
suffix))
{
ImbMovieWriter *writer = IMB_movie_write_begin(rd.im_format.imtype,
re->pipeline_scene_eval,
&re->r,
width,
height,
re->reports,
false,
suffix);
if (writer == nullptr) {
is_error = true;
break;
}
re->movie_writers.append(writer);
}
if (is_error) {
/* report is handled above */
re_movie_free_all(re, mh, i + 1);
BKE_report(re->reports, RPT_ERROR, "Movie format unsupported");
re_movie_free_all(re);
render_pipeline_free(re);
return;
}
@@ -2558,7 +2532,7 @@ void RE_RenderAnim(Render *re,
const bool should_write = !(re->flag & R_SKIP_WRITE);
if (re->test_break_cb(re->tbh) == 0) {
if (!G.is_break && should_write) {
if (!do_write_image_or_movie(re, bmain, scene, mh, totvideos, nullptr)) {
if (!do_write_image_or_movie(re, bmain, scene, totvideos, nullptr)) {
G.is_break = true;
}
}
@@ -2610,7 +2584,7 @@ void RE_RenderAnim(Render *re,
/* end movie */
if (is_movie && do_write_file) {
re_movie_free_all(re, mh, totvideos);
re_movie_free_all(re);
}
if (totskipped && totrendered == 0) {

View File

@@ -245,7 +245,7 @@ struct Render : public BaseRender {
*/
struct ReportList *reports = nullptr;
void **movie_ctx_arr = nullptr;
blender::Vector<ImbMovieWriter *> movie_writers;
char viewname[MAX_NAME] = "";
/* TODO: replace by a whole draw manager. */

View File

@@ -10,6 +10,7 @@ set(INC
../blenloader
../blentranslation
../imbuf
../imbuf/movie
../makesrna
../render
../windowmanager

View File

@@ -14,6 +14,7 @@
#include "BKE_scene.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "multiview.hh"

View File

@@ -32,6 +32,7 @@
#include "WM_types.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
#include "IMB_metadata.hh"

View File

@@ -42,6 +42,7 @@
#include "DEG_depsgraph.hh"
#include "DEG_depsgraph_query.hh"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -35,6 +35,7 @@
#include "DEG_depsgraph.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "SEQ_channels.hh"

View File

@@ -33,6 +33,7 @@
#include "DEG_depsgraph_query.hh"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
@@ -453,7 +454,7 @@ Sequence *SEQ_add_movie_strip(Main *bmain, Scene *scene, ListBase *seqbase, SeqL
DEG_id_tag_update(&scene->id, ID_RECALC_AUDIO_FPS | ID_RECALC_SEQUENCER_STRIPS);
}
load_data->r_video_stream_start = IMD_anim_get_offset(anim_arr[0]);
load_data->r_video_stream_start = IMB_anim_get_offset(anim_arr[0]);
}
Sequence *seq = SEQ_sequence_alloc(

View File

@@ -21,6 +21,7 @@
#include "DEG_depsgraph.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "SEQ_iterator.hh"

View File

@@ -21,6 +21,7 @@
#include "DNA_sound_types.h"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "SEQ_channels.hh"

View File

@@ -19,6 +19,7 @@
#include "DNA_scene_types.h"
#include "DNA_sequence_types.h"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "SEQ_render.hh"

View File

@@ -37,6 +37,7 @@
#include "SEQ_time.hh"
#include "SEQ_utils.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"

View File

@@ -15,6 +15,7 @@ set(INC
../draw
../gpu
../imbuf
../imbuf/movie
../makesrna
../nodes
../render

View File

@@ -40,6 +40,7 @@
#include "BLI_time.h"
#include "BLI_utildefines.h"
#include "IMB_anim.hh"
#include "IMB_colormanagement.hh"
#include "IMB_imbuf.hh"
#include "IMB_imbuf_types.hh"
@@ -1717,9 +1718,7 @@ static bool wm_main_playanim_intern(int argc, const char **argv, PlayArgs *args_
ps.font_id = -1;
IMB_init();
#ifdef WITH_FFMPEG
IMB_ffmpeg_init();
#endif
STRNCPY(ps.display_ctx.display_settings.display_device,
IMB_colormanagement_role_colorspace_name_get(COLOR_ROLE_DEFAULT_BYTE));

View File

@@ -7,6 +7,7 @@ set(INC
../blender/editors/include
../blender/gpu
../blender/imbuf
../blender/imbuf/movie
../blender/io/usd
../blender/bmesh
../blender/makesrna

View File

@@ -59,6 +59,7 @@
#include "DEG_depsgraph.hh"
#include "IMB_anim.hh"
#include "IMB_imbuf.hh" /* For #IMB_init. */
#include "RE_engine.h"
@@ -495,10 +496,8 @@ int main(int argc,
/* Must be initialized after #BKE_appdir_init to account for color-management paths. */
IMB_init();
#ifdef WITH_FFMPEG
/* Keep after #ARG_PASS_SETTINGS since debug flags are checked. */
IMB_ffmpeg_init();
#endif
/* After #ARG_PASS_SETTINGS arguments, this is so #WM_main_playanim skips #RNA_init. */
RNA_init();