2023-05-31 16:19:06 +02:00
|
|
|
/* SPDX-FileCopyrightText: 2001-2002 NaN Holding BV. All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later */
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup blenloader
|
2011-02-27 20:35:41 +00:00
|
|
|
*/
|
|
|
|
|
|
2016-06-27 11:21:03 +10:00
|
|
|
/**
|
|
|
|
|
* FILE FORMAT
|
|
|
|
|
* ===========
|
|
|
|
|
*
|
2019-10-15 12:02:40 +11:00
|
|
|
* IFF-style structure (but not IFF compatible!)
|
2012-10-04 13:26:15 +00:00
|
|
|
*
|
2019-10-15 12:02:40 +11:00
|
|
|
* Start file:
|
2016-06-27 11:21:03 +10:00
|
|
|
* <pre>
|
2019-10-15 12:02:40 +11:00
|
|
|
* `BLENDER_V100` `12` bytes (version 1.00 is just an example).
|
|
|
|
|
* `V` = big endian, `v` = little endian.
|
|
|
|
|
* `_` = 4 byte pointer, `-` = 8 byte pointer.
|
2016-06-27 11:21:03 +10:00
|
|
|
* </pre>
|
2012-10-04 13:26:15 +00:00
|
|
|
*
|
2019-06-12 09:04:10 +10:00
|
|
|
* data-blocks: (also see struct #BHead).
|
2016-06-27 11:21:03 +10:00
|
|
|
* <pre>
|
2023-08-28 15:01:05 +02:00
|
|
|
* `bh.code` `char[4]` see `BLO_blend_defs.hh` for a list of known types.
|
2019-10-15 12:02:40 +11:00
|
|
|
* `bh.len` `int32` length data after #BHead in bytes.
|
|
|
|
|
* `bh.old` `void *` old pointer (the address at the time of writing the file).
|
|
|
|
|
* `bh.SDNAnr` `int32` struct index of structs stored in #DNA1 data.
|
|
|
|
|
* `bh.nr` `int32` in case of array: number of structs.
|
|
|
|
|
* data
|
|
|
|
|
* ...
|
|
|
|
|
* ...
|
2016-06-27 11:21:03 +10:00
|
|
|
* </pre>
|
2012-10-04 13:26:15 +00:00
|
|
|
*
|
|
|
|
|
* Almost all data in Blender are structures. Each struct saved
|
|
|
|
|
* gets a BHead header. With BHead the struct can be linked again
|
2019-10-15 12:02:40 +11:00
|
|
|
* and compared with #StructDNA.
|
2021-11-30 09:15:02 +11:00
|
|
|
*
|
2012-10-04 13:26:15 +00:00
|
|
|
* WRITE
|
2016-06-27 11:21:03 +10:00
|
|
|
* =====
|
2012-10-04 13:26:15 +00:00
|
|
|
*
|
|
|
|
|
* Preferred writing order: (not really a must, but why would you do it random?)
|
2019-10-15 12:02:40 +11:00
|
|
|
* Any case: direct data is ALWAYS after the lib block.
|
2012-10-04 13:26:15 +00:00
|
|
|
*
|
|
|
|
|
* (Local file data)
|
|
|
|
|
* - for each LibBlock
|
2016-06-27 11:21:03 +10:00
|
|
|
* - write LibBlock
|
|
|
|
|
* - write associated direct data
|
2012-10-04 13:26:15 +00:00
|
|
|
* (External file data)
|
|
|
|
|
* - per library
|
2016-06-27 11:21:03 +10:00
|
|
|
* - write library block
|
|
|
|
|
* - per LibBlock
|
|
|
|
|
* - write the ID of LibBlock
|
2023-06-08 10:33:50 +10:00
|
|
|
* - write #BLO_CODE_GLOB (#RenderInfo struct. 128x128 blend file preview is optional).
|
|
|
|
|
* - write #BLO_CODE_GLOB (#FileGlobal struct) (some global vars).
|
|
|
|
|
* - write #BLO_CODE_DNA1 (#SDNA struct)
|
|
|
|
|
* - write #BLO_CODE_USER (#UserDef struct) for file paths:
|
2023-06-21 11:28:56 +10:00
|
|
|
* - #BLENDER_STARTUP_FILE (on UNIX `~/.config/blender/X.X/config/startup.blend`).
|
|
|
|
|
* - #BLENDER_USERPREF_FILE (on UNIX `~/.config/blender/X.X/config/userpref.blend`).
|
2012-10-04 13:26:15 +00:00
|
|
|
*/
|
2003-04-26 18:01:01 +00:00
|
|
|
|
2022-10-05 13:44:02 -05:00
|
|
|
#include <cerrno>
|
|
|
|
|
#include <climits>
|
|
|
|
|
#include <cmath>
|
|
|
|
|
#include <cstdio>
|
|
|
|
|
#include <cstdlib>
|
|
|
|
|
#include <cstring>
|
2009-09-06 13:20:05 +00:00
|
|
|
#include <fcntl.h>
|
|
|
|
|
|
2014-04-02 11:43:54 +02:00
|
|
|
#ifdef WIN32
|
2020-03-19 09:33:03 +01:00
|
|
|
# include "BLI_winstuff.h"
|
2012-04-15 07:54:07 +00:00
|
|
|
# include "winsock2.h"
|
|
|
|
|
# include <io.h>
|
2014-04-03 09:20:04 +02:00
|
|
|
#else
|
|
|
|
|
# include <unistd.h> /* FreeBSD, for write() and close(). */
|
2002-10-12 11:37:38 +00:00
|
|
|
#endif
|
|
|
|
|
|
2013-02-22 13:35:32 +00:00
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
|
|
2022-05-17 15:29:24 +02:00
|
|
|
#include "CLG_log.h"
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Allow writefile to use deprecated functionality (for forward compatibility code). */
|
2012-01-21 11:15:01 +00:00
|
|
|
#define DNA_DEPRECATED_ALLOW
|
|
|
|
|
|
2020-12-15 10:47:58 +11:00
|
|
|
#include "DNA_collection_types.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "DNA_fileglobal_types.h"
|
2008-10-31 23:50:02 +00:00
|
|
|
#include "DNA_genfile.h"
|
2023-09-04 15:53:31 +02:00
|
|
|
#include "DNA_key_types.h"
|
2005-05-02 13:28:13 +00:00
|
|
|
#include "DNA_sdna_types.h"
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2012-03-14 06:31:38 +00:00
|
|
|
#include "BLI_bitmap.h"
|
2002-10-12 11:37:38 +00:00
|
|
|
#include "BLI_blenlib.h"
|
2021-06-10 21:05:50 +10:00
|
|
|
#include "BLI_endian_defines.h"
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
#include "BLI_endian_switch.h"
|
2024-02-29 17:14:58 +01:00
|
|
|
#include "BLI_implicit_sharing.hh"
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
#include "BLI_link_utils.h"
|
|
|
|
|
#include "BLI_linklist.h"
|
|
|
|
|
#include "BLI_math_base.h"
|
2013-08-03 11:35:09 +00:00
|
|
|
#include "BLI_mempool.h"
|
2024-12-10 17:43:09 +01:00
|
|
|
#include "BLI_multi_value_map.hh"
|
2024-07-26 12:16:42 +02:00
|
|
|
#include "BLI_set.hh"
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
#include "BLI_threads.h"
|
2023-08-28 15:17:08 +02:00
|
|
|
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "MEM_guardedalloc.h" /* MEM_freeN */
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2024-05-03 10:20:01 -04:00
|
|
|
#include "BKE_asset.hh"
|
2016-04-24 22:42:41 +10:00
|
|
|
#include "BKE_blender_version.h"
|
2024-02-09 19:23:03 +01:00
|
|
|
#include "BKE_bpath.hh"
|
2024-02-10 18:25:14 +01:00
|
|
|
#include "BKE_global.hh" /* For #Global `G`. */
|
2024-03-26 12:57:30 -04:00
|
|
|
#include "BKE_idprop.hh"
|
2024-01-20 19:17:36 +01:00
|
|
|
#include "BKE_idtype.hh"
|
2024-01-23 15:18:09 -05:00
|
|
|
#include "BKE_layer.hh"
|
2024-01-15 12:44:04 -05:00
|
|
|
#include "BKE_lib_id.hh"
|
2023-08-02 15:00:40 +02:00
|
|
|
#include "BKE_lib_override.hh"
|
2024-01-18 12:20:42 +01:00
|
|
|
#include "BKE_lib_query.hh"
|
2023-12-01 19:43:16 +01:00
|
|
|
#include "BKE_main.hh"
|
2023-11-27 16:21:49 +01:00
|
|
|
#include "BKE_main_namemap.hh"
|
2023-05-15 15:14:22 +02:00
|
|
|
#include "BKE_node.hh"
|
2024-08-08 15:13:14 +02:00
|
|
|
#include "BKE_packedFile.hh"
|
2024-05-22 20:32:23 +10:00
|
|
|
#include "BKE_preferences.h"
|
2024-02-10 18:34:29 +01:00
|
|
|
#include "BKE_report.hh"
|
2024-04-12 17:03:18 -04:00
|
|
|
#include "BKE_workspace.hh"
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2024-07-29 21:26:43 +02:00
|
|
|
#include "DRW_engine.hh"
|
|
|
|
|
|
2023-08-28 15:01:05 +02:00
|
|
|
#include "BLO_blend_defs.hh"
|
|
|
|
|
#include "BLO_blend_validate.hh"
|
|
|
|
|
#include "BLO_read_write.hh"
|
2024-02-09 13:41:30 +01:00
|
|
|
#include "BLO_readfile.hh"
|
2023-08-28 15:01:05 +02:00
|
|
|
#include "BLO_undofile.hh"
|
|
|
|
|
#include "BLO_writefile.hh"
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2023-08-28 15:01:05 +02:00
|
|
|
#include "readfile.hh"
|
2002-10-12 11:37:38 +00:00
|
|
|
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
#include <zstd.h>
|
|
|
|
|
|
2019-11-24 22:54:51 +11:00
|
|
|
/* Make preferences read-only. */
|
|
|
|
|
#define U (*((const UserDef *)&U))
|
|
|
|
|
|
2008-03-05 15:13:41 +00:00
|
|
|
/* ********* my write, buffered writing with minimum size chunks ************ */
|
|
|
|
|
|
2016-07-08 14:32:29 +10:00
|
|
|
/* Use optimal allocation since blocks of this size are kept in memory for undo. */
|
2022-09-25 22:41:22 +10:00
|
|
|
#define MEM_BUFFER_SIZE MEM_SIZE_OPTIMAL(1 << 17) /* 128kb */
|
|
|
|
|
#define MEM_CHUNK_SIZE MEM_SIZE_OPTIMAL(1 << 15) /* ~32kb */
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
#define ZSTD_BUFFER_SIZE (1 << 21) /* 2mb */
|
|
|
|
|
#define ZSTD_CHUNK_SIZE (1 << 20) /* 1mb */
|
|
|
|
|
|
|
|
|
|
#define ZSTD_COMPRESSION_LEVEL 3
|
2014-09-04 21:48:36 +10:00
|
|
|
|
2022-05-17 15:29:24 +02:00
|
|
|
static CLG_LogRef LOG = {"blo.writefile"};
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** Use if we want to store how many bytes have been written to the file. */
|
|
|
|
|
// #define USE_WRITE_DATA_LEN
|
2014-09-04 21:48:36 +10:00
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Internal Write Wrapper's (Abstracts Compression)
|
2014-09-04 21:48:36 +10:00
|
|
|
* \{ */
|
|
|
|
|
|
2022-10-05 13:44:02 -05:00
|
|
|
struct ZstdFrame {
|
2023-06-03 08:36:28 +10:00
|
|
|
ZstdFrame *next, *prev;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
uint32_t compressed_size;
|
|
|
|
|
uint32_t uncompressed_size;
|
2022-10-05 13:44:02 -05:00
|
|
|
};
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
class WriteWrap {
|
2023-10-19 08:05:27 +02:00
|
|
|
public:
|
|
|
|
|
virtual bool open(const char *filepath) = 0;
|
|
|
|
|
virtual bool close() = 0;
|
2023-10-19 17:51:21 +11:00
|
|
|
virtual bool write(const void *buf, size_t buf_len) = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-19 17:51:21 +11:00
|
|
|
/** Buffer output (we only want when output isn't already buffered). */
|
2023-10-19 08:05:27 +02:00
|
|
|
bool use_buf = true;
|
2023-10-19 01:54:00 +02:00
|
|
|
};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
class RawWriteWrap : public WriteWrap {
|
|
|
|
|
public:
|
|
|
|
|
bool open(const char *filepath) override;
|
|
|
|
|
bool close() override;
|
2023-10-19 17:51:21 +11:00
|
|
|
bool write(const void *buf, size_t buf_len) override;
|
2023-10-19 01:54:00 +02:00
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
int file_handle = 0;
|
2014-09-04 21:48:36 +10:00
|
|
|
};
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
bool RawWriteWrap::open(const char *filepath)
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
|
|
|
|
int file;
|
|
|
|
|
|
|
|
|
|
file = BLI_open(filepath, O_BINARY + O_WRONLY + O_CREAT + O_TRUNC, 0666);
|
|
|
|
|
|
|
|
|
|
if (file != -1) {
|
2023-10-19 01:54:00 +02:00
|
|
|
file_handle = file;
|
2014-09-04 21:48:36 +10:00
|
|
|
return true;
|
|
|
|
|
}
|
2020-08-07 12:31:44 +02:00
|
|
|
|
|
|
|
|
return false;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
2023-10-19 01:54:00 +02:00
|
|
|
bool RawWriteWrap::close()
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
2023-10-19 01:54:00 +02:00
|
|
|
return (::close(file_handle) != -1);
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
2023-10-19 01:54:00 +02:00
|
|
|
bool RawWriteWrap::write(const void *buf, size_t buf_len)
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
2023-10-19 01:54:00 +02:00
|
|
|
return ::write(file_handle, buf, buf_len) == buf_len;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
class ZstdWriteWrap : public WriteWrap {
|
2023-10-19 17:51:21 +11:00
|
|
|
WriteWrap &base_wrap;
|
2023-10-19 01:54:00 +02:00
|
|
|
|
|
|
|
|
ListBase threadpool = {};
|
|
|
|
|
ListBase tasks = {};
|
|
|
|
|
ThreadMutex mutex = {};
|
|
|
|
|
ThreadCondition condition = {};
|
|
|
|
|
int next_frame = 0;
|
|
|
|
|
int num_frames = 0;
|
|
|
|
|
|
|
|
|
|
ListBase frames = {};
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
bool write_error = false;
|
|
|
|
|
|
2023-10-19 08:05:27 +02:00
|
|
|
public:
|
2023-10-19 17:51:21 +11:00
|
|
|
ZstdWriteWrap(WriteWrap &base_wrap) : base_wrap(base_wrap) {}
|
2023-10-19 01:54:00 +02:00
|
|
|
|
2023-10-19 08:05:27 +02:00
|
|
|
bool open(const char *filepath) override;
|
|
|
|
|
bool close() override;
|
2023-10-19 17:51:21 +11:00
|
|
|
bool write(const void *buf, size_t buf_len) override;
|
2023-10-19 01:54:00 +02:00
|
|
|
|
2023-10-19 08:05:27 +02:00
|
|
|
private:
|
2023-10-19 01:54:00 +02:00
|
|
|
struct ZstdWriteBlockTask;
|
|
|
|
|
void write_task(ZstdWriteBlockTask *task);
|
|
|
|
|
void write_u32_le(uint32_t val);
|
|
|
|
|
void write_seekable_frames();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct ZstdWriteWrap::ZstdWriteBlockTask {
|
2022-09-15 19:13:01 +02:00
|
|
|
ZstdWriteBlockTask *next, *prev;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
void *data;
|
|
|
|
|
size_t size;
|
|
|
|
|
int frame_number;
|
2023-10-19 01:54:00 +02:00
|
|
|
ZstdWriteWrap *ww;
|
|
|
|
|
|
|
|
|
|
static void *write_task(void *userdata)
|
|
|
|
|
{
|
|
|
|
|
auto *task = static_cast<ZstdWriteBlockTask *>(userdata);
|
|
|
|
|
task->ww->write_task(task);
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
2022-09-15 19:13:01 +02:00
|
|
|
};
|
2014-09-04 21:48:36 +10:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
void ZstdWriteWrap::write_task(ZstdWriteBlockTask *task)
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
size_t out_buf_len = ZSTD_compressBound(task->size);
|
|
|
|
|
void *out_buf = MEM_mallocN(out_buf_len, "Zstd out buffer");
|
|
|
|
|
size_t out_size = ZSTD_compress(
|
|
|
|
|
out_buf, out_buf_len, task->data, task->size, ZSTD_COMPRESSION_LEVEL);
|
2014-09-04 21:48:36 +10:00
|
|
|
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
MEM_freeN(task->data);
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_mutex_lock(&mutex);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
while (next_frame != task->frame_number) {
|
|
|
|
|
BLI_condition_wait(&condition, &mutex);
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
2020-08-07 12:31:44 +02:00
|
|
|
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (ZSTD_isError(out_size)) {
|
2023-10-19 01:54:00 +02:00
|
|
|
write_error = true;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2023-10-19 17:51:21 +11:00
|
|
|
if (base_wrap.write(out_buf, out_size)) {
|
2022-09-15 19:13:01 +02:00
|
|
|
ZstdFrame *frameinfo = static_cast<ZstdFrame *>(
|
|
|
|
|
MEM_mallocN(sizeof(ZstdFrame), "zstd frameinfo"));
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
frameinfo->uncompressed_size = task->size;
|
|
|
|
|
frameinfo->compressed_size = out_size;
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_addtail(&frames, frameinfo);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
else {
|
2023-10-19 01:54:00 +02:00
|
|
|
write_error = true;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
next_frame++;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_mutex_unlock(&mutex);
|
|
|
|
|
BLI_condition_notify_all(&condition);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
MEM_freeN(out_buf);
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
bool ZstdWriteWrap::open(const char *filepath)
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
{
|
2023-10-19 17:51:21 +11:00
|
|
|
if (!base_wrap.open(filepath)) {
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Leave one thread open for the main writing logic, unless we only have one HW thread. */
|
|
|
|
|
int num_threads = max_ii(1, BLI_system_thread_count() - 1);
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_threadpool_init(&threadpool, ZstdWriteBlockTask::write_task, num_threads);
|
|
|
|
|
BLI_mutex_init(&mutex);
|
|
|
|
|
BLI_condition_init(&condition);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
return true;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void ZstdWriteWrap::write_u32_le(const uint32_t val)
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
{
|
|
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
|
|
BLI_endian_switch_uint32(&val);
|
|
|
|
|
#endif
|
2023-10-19 17:51:21 +11:00
|
|
|
base_wrap.write(&val, sizeof(uint32_t));
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* In order to implement efficient seeking when reading the .blend, we append
|
|
|
|
|
* a skippable frame that encodes information about the other frames present
|
|
|
|
|
* in the file.
|
|
|
|
|
* The format here follows the upstream spec for seekable files:
|
|
|
|
|
* https://github.com/facebook/zstd/blob/master/contrib/seekable_format/zstd_seekable_compression_format.md
|
|
|
|
|
* If this information is not present in a file (e.g. if it was compressed
|
|
|
|
|
* with external tools), it can still be opened in Blender, but seeking will
|
|
|
|
|
* not be supported, so more memory might be needed. */
|
2023-10-19 01:54:00 +02:00
|
|
|
void ZstdWriteWrap::write_seekable_frames()
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
{
|
|
|
|
|
/* Write seek table header (magic number and frame size). */
|
2023-10-19 01:54:00 +02:00
|
|
|
write_u32_le(0x184D2A5E);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
/* The actual frame number might not match num_frames if there was a write error. */
|
|
|
|
|
const uint32_t num_frames = BLI_listbase_count(&frames);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
/* Each frame consists of two u32, so 8 bytes each.
|
|
|
|
|
* After the frames, a footer containing two u32 and one byte (9 bytes total) is written. */
|
|
|
|
|
const uint32_t frame_size = num_frames * 8 + 9;
|
2023-10-19 01:54:00 +02:00
|
|
|
write_u32_le(frame_size);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
/* Write seek table entries. */
|
2023-10-19 01:54:00 +02:00
|
|
|
LISTBASE_FOREACH (ZstdFrame *, frame, &frames) {
|
|
|
|
|
write_u32_le(frame->compressed_size);
|
|
|
|
|
write_u32_le(frame->uncompressed_size);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Write seek table footer (number of frames, option flags and second magic number). */
|
2023-10-19 01:54:00 +02:00
|
|
|
write_u32_le(num_frames);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
const char flags = 0; /* We don't store checksums for each frame. */
|
2023-10-19 17:51:21 +11:00
|
|
|
base_wrap.write(&flags, 1);
|
2023-10-19 01:54:00 +02:00
|
|
|
write_u32_le(0x8F92EAB1);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
bool ZstdWriteWrap::close()
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_threadpool_end(&threadpool);
|
|
|
|
|
BLI_freelistN(&tasks);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_mutex_end(&mutex);
|
|
|
|
|
BLI_condition_end(&condition);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
write_seekable_frames();
|
|
|
|
|
BLI_freelistN(&frames);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 17:51:21 +11:00
|
|
|
return base_wrap.close() && !write_error;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
bool ZstdWriteWrap::write(const void *buf, const size_t buf_len)
|
2014-09-04 21:48:36 +10:00
|
|
|
{
|
2023-10-19 01:54:00 +02:00
|
|
|
if (write_error) {
|
|
|
|
|
return false;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
ZstdWriteBlockTask *task = static_cast<ZstdWriteBlockTask *>(
|
|
|
|
|
MEM_mallocN(sizeof(ZstdWriteBlockTask), __func__));
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
task->data = MEM_mallocN(buf_len, __func__);
|
|
|
|
|
memcpy(task->data, buf, buf_len);
|
|
|
|
|
task->size = buf_len;
|
2023-10-19 01:54:00 +02:00
|
|
|
task->frame_number = num_frames++;
|
|
|
|
|
task->ww = this;
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_mutex_lock(&mutex);
|
|
|
|
|
BLI_addtail(&tasks, task);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
/* If there's a free worker thread, just push the block into that thread.
|
|
|
|
|
* Otherwise, we wait for the earliest thread to finish.
|
|
|
|
|
* We look up the earliest thread while holding the mutex, but release it
|
|
|
|
|
* before joining the thread to prevent a deadlock. */
|
2023-10-19 01:54:00 +02:00
|
|
|
ZstdWriteBlockTask *first_task = static_cast<ZstdWriteBlockTask *>(tasks.first);
|
|
|
|
|
BLI_mutex_unlock(&mutex);
|
|
|
|
|
if (!BLI_available_threads(&threadpool)) {
|
|
|
|
|
BLI_threadpool_remove(&threadpool, first_task);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
|
|
|
|
|
/* If the task list was empty before we pushed our task, there should
|
|
|
|
|
* always be a free thread. */
|
|
|
|
|
BLI_assert(first_task != task);
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_remlink(&tasks, first_task);
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
MEM_freeN(first_task);
|
|
|
|
|
}
|
2023-10-19 01:54:00 +02:00
|
|
|
BLI_threadpool_insert(&threadpool, task);
|
2014-09-04 21:48:36 +10:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
return true;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Write Data Type & Functions
|
|
|
|
|
* \{ */
|
2014-09-04 21:48:36 +10:00
|
|
|
|
2022-10-05 13:44:02 -05:00
|
|
|
struct WriteData {
|
|
|
|
|
const SDNA *sdna;
|
2002-10-12 11:37:38 +00:00
|
|
|
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
struct {
|
|
|
|
|
/** Use for file and memory writing (size stored in max_size). */
|
|
|
|
|
uchar *buf;
|
|
|
|
|
/** Number of bytes used in #WriteData.buf (flushed when exceeded). */
|
|
|
|
|
size_t used_len;
|
|
|
|
|
|
|
|
|
|
/** Maximum size of the buffer. */
|
|
|
|
|
size_t max_size;
|
|
|
|
|
/** Threshold above which writes get their own chunk. */
|
|
|
|
|
size_t chunk_size;
|
|
|
|
|
} buffer;
|
2018-04-14 13:17:11 +02:00
|
|
|
|
|
|
|
|
#ifdef USE_WRITE_DATA_LEN
|
|
|
|
|
/** Total number of bytes written. */
|
|
|
|
|
size_t write_len;
|
|
|
|
|
#endif
|
2016-06-28 17:35:35 +10:00
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
/** Whether writefile code is currently writing an ID. */
|
|
|
|
|
bool is_writing_id;
|
|
|
|
|
|
|
|
|
|
/** Some validation and error handling data. */
|
|
|
|
|
struct {
|
|
|
|
|
/**
|
|
|
|
|
* Set on unlikely case of an error (ignores further file writing). Only used for very
|
|
|
|
|
* low-level errors (like if the actual write on file fails).
|
|
|
|
|
*/
|
|
|
|
|
bool critical_error;
|
|
|
|
|
/**
|
2024-07-30 12:38:16 +10:00
|
|
|
* A set of all 'old' addresses used as UID of written blocks for the current ID. Allows
|
2024-07-26 12:16:42 +02:00
|
|
|
* detecting invalid re-uses of the same address multiple times.
|
|
|
|
|
*/
|
|
|
|
|
blender::Set<const void *> per_id_addresses_set;
|
|
|
|
|
} validation_data;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-07-27 23:07:21 +02:00
|
|
|
/**
|
|
|
|
|
* Keeps track of which shared data has been written for the current ID. This is necessary to
|
|
|
|
|
* avoid writing the same data more than once.
|
|
|
|
|
*/
|
|
|
|
|
blender::Set<const void *> per_id_written_shared_addresses;
|
|
|
|
|
|
2018-04-14 12:33:19 +02:00
|
|
|
/** #MemFile writing (used for undo). */
|
2020-06-03 12:07:45 +02:00
|
|
|
MemFileWriteData mem;
|
2018-04-14 12:33:19 +02:00
|
|
|
/** When true, write to #WriteData.current, could also call 'is_undo'. */
|
|
|
|
|
bool use_memfile;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-04-14 12:33:19 +02:00
|
|
|
/**
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
* Wrap writing, so we can use zstd or
|
2014-09-04 21:48:36 +10:00
|
|
|
* other compression types later, see: G_FILE_COMPRESS
|
2022-09-15 19:13:01 +02:00
|
|
|
* Will be nullptr for UNDO.
|
2018-04-14 12:33:19 +02:00
|
|
|
*/
|
2014-09-04 21:48:36 +10:00
|
|
|
WriteWrap *ww;
|
2022-10-05 13:44:02 -05:00
|
|
|
};
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2022-10-05 13:44:02 -05:00
|
|
|
struct BlendWriter {
|
2020-06-05 11:44:36 +02:00
|
|
|
WriteData *wd;
|
2022-10-05 13:44:02 -05:00
|
|
|
};
|
2020-06-05 11:44:36 +02:00
|
|
|
|
2014-09-04 21:48:36 +10:00
|
|
|
static WriteData *writedata_new(WriteWrap *ww)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2024-01-16 19:41:30 +01:00
|
|
|
WriteData *wd = MEM_new<WriteData>(__func__);
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2016-07-12 12:53:49 +10:00
|
|
|
wd->sdna = DNA_sdna_current_get();
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2014-09-04 21:48:36 +10:00
|
|
|
wd->ww = ww;
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
if ((ww == nullptr) || (ww->use_buf)) {
|
|
|
|
|
if (ww == nullptr) {
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
wd->buffer.max_size = MEM_BUFFER_SIZE;
|
|
|
|
|
wd->buffer.chunk_size = MEM_CHUNK_SIZE;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
wd->buffer.max_size = ZSTD_BUFFER_SIZE;
|
|
|
|
|
wd->buffer.chunk_size = ZSTD_CHUNK_SIZE;
|
|
|
|
|
}
|
2022-09-15 19:13:01 +02:00
|
|
|
wd->buffer.buf = static_cast<uchar *>(MEM_mallocN(wd->buffer.max_size, "wd->buffer.buf"));
|
2019-02-25 13:21:26 +11:00
|
|
|
}
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2002-10-12 11:37:38 +00:00
|
|
|
return wd;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
static void writedata_do_write(WriteData *wd, const void *mem, const size_t memlen)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2024-07-26 12:16:42 +02:00
|
|
|
if ((wd == nullptr) || wd->validation_data.critical_error || (mem == nullptr) || memlen < 1) {
|
2016-06-28 17:35:35 +10:00
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-09-20 18:41:50 +02:00
|
|
|
if (memlen > INT_MAX) {
|
2021-07-15 18:23:28 +10:00
|
|
|
BLI_assert_msg(0, "Cannot write chunks bigger than INT_MAX.");
|
2020-09-20 18:41:50 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
if (UNLIKELY(wd->validation_data.critical_error)) {
|
2016-06-28 17:35:35 +10:00
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Memory based save. */
|
2018-04-14 12:33:19 +02:00
|
|
|
if (wd->use_memfile) {
|
2022-09-15 19:13:01 +02:00
|
|
|
BLO_memfile_chunk_add(&wd->mem, static_cast<const char *>(mem), memlen);
|
2004-09-05 13:43:51 +00:00
|
|
|
}
|
|
|
|
|
else {
|
2023-10-19 01:54:00 +02:00
|
|
|
if (!wd->ww->write(mem, memlen)) {
|
2024-07-26 12:16:42 +02:00
|
|
|
wd->validation_data.critical_error = true;
|
2014-09-04 21:48:36 +10:00
|
|
|
}
|
2004-09-05 13:43:51 +00:00
|
|
|
}
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2004-06-23 18:22:51 +00:00
|
|
|
static void writedata_free(WriteData *wd)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (wd->buffer.buf) {
|
|
|
|
|
MEM_freeN(wd->buffer.buf);
|
2019-02-25 13:21:26 +11:00
|
|
|
}
|
2024-01-16 19:41:30 +01:00
|
|
|
MEM_delete(wd);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Local Writing API 'mywrite'
|
|
|
|
|
* \{ */
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2016-07-07 16:02:45 +10:00
|
|
|
/**
|
|
|
|
|
* Flush helps the de-duplicating memory for undo-save by logically segmenting data,
|
|
|
|
|
* so differences in one part of memory won't cause unrelated data to be duplicated.
|
|
|
|
|
*/
|
|
|
|
|
static void mywrite_flush(WriteData *wd)
|
|
|
|
|
{
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (wd->buffer.used_len != 0) {
|
|
|
|
|
writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
|
|
|
|
|
wd->buffer.used_len = 0;
|
2016-07-07 16:02:45 +10:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2002-10-12 11:37:38 +00:00
|
|
|
/**
|
|
|
|
|
* Low level WRITE(2) wrapper that buffers data
|
2018-12-12 12:50:58 +11:00
|
|
|
* \param adr: Pointer to new chunk of data
|
|
|
|
|
* \param len: Length of new chunk of data
|
2002-10-12 11:37:38 +00:00
|
|
|
*/
|
2020-09-20 18:41:50 +02:00
|
|
|
static void mywrite(WriteData *wd, const void *adr, size_t len)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2024-07-26 12:16:42 +02:00
|
|
|
if (UNLIKELY(wd->validation_data.critical_error)) {
|
2016-06-28 17:35:35 +10:00
|
|
|
return;
|
|
|
|
|
}
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
if (UNLIKELY(adr == nullptr)) {
|
2016-07-07 16:02:45 +10:00
|
|
|
BLI_assert(0);
|
2004-09-05 13:43:51 +00:00
|
|
|
return;
|
|
|
|
|
}
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
#ifdef USE_WRITE_DATA_LEN
|
|
|
|
|
wd->write_len += len;
|
|
|
|
|
#endif
|
2016-06-28 17:35:35 +10:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
if (wd->buffer.buf == nullptr) {
|
2019-02-25 13:21:26 +11:00
|
|
|
writedata_do_write(wd, adr, len);
|
|
|
|
|
}
|
|
|
|
|
else {
|
2023-11-07 11:31:02 +11:00
|
|
|
/* If we have a single big chunk, write existing data in
|
|
|
|
|
* buffer and write out big chunk in smaller pieces. */
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (len > wd->buffer.chunk_size) {
|
|
|
|
|
if (wd->buffer.used_len != 0) {
|
|
|
|
|
writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
|
|
|
|
|
wd->buffer.used_len = 0;
|
2019-02-25 13:21:26 +11:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-02-25 13:21:26 +11:00
|
|
|
do {
|
2024-11-18 17:55:24 +01:00
|
|
|
const size_t writelen = std::min(len, wd->buffer.chunk_size);
|
2019-02-25 13:21:26 +11:00
|
|
|
writedata_do_write(wd, adr, writelen);
|
|
|
|
|
adr = (const char *)adr + writelen;
|
|
|
|
|
len -= writelen;
|
|
|
|
|
} while (len > 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-02-25 13:21:26 +11:00
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* If data would overflow buffer, write out the buffer. */
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (len + wd->buffer.used_len > wd->buffer.max_size - 1) {
|
|
|
|
|
writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
|
|
|
|
|
wd->buffer.used_len = 0;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Append data at end of buffer. */
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
memcpy(&wd->buffer.buf[wd->buffer.used_len], adr, len);
|
|
|
|
|
wd->buffer.used_len += len;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* BeGiN initializer for mywrite
|
2015-05-20 12:54:45 +10:00
|
|
|
* \param ww: File write wrapper.
|
2022-09-15 19:13:01 +02:00
|
|
|
* \param compare: Previous memory file (can be nullptr).
|
|
|
|
|
* \param current: The current memory file (can be nullptr).
|
2012-03-02 16:05:54 +00:00
|
|
|
* \warning Talks to other functions with global parameters
|
2002-10-12 11:37:38 +00:00
|
|
|
*/
|
2018-04-14 13:17:11 +02:00
|
|
|
static WriteData *mywrite_begin(WriteWrap *ww, MemFile *compare, MemFile *current)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2016-06-28 17:35:35 +10:00
|
|
|
WriteData *wd = writedata_new(ww);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
if (current != nullptr) {
|
2020-06-03 12:07:45 +02:00
|
|
|
BLO_memfile_write_init(&wd->mem, current, compare);
|
2018-04-14 12:33:19 +02:00
|
|
|
wd->use_memfile = true;
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
2006-10-27 18:24:10 +00:00
|
|
|
|
2002-10-12 11:37:38 +00:00
|
|
|
return wd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* END the mywrite wrapper
|
2022-02-28 15:10:11 -05:00
|
|
|
* \return True if write failed
|
2012-03-02 16:05:54 +00:00
|
|
|
* \return unknown global variable otherwise
|
|
|
|
|
* \warning Talks to other functions with global parameters
|
2002-10-12 11:37:38 +00:00
|
|
|
*/
|
2018-04-14 13:17:11 +02:00
|
|
|
static bool mywrite_end(WriteData *wd)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
Add support for Zstandard compression for .blend files
Compressing blendfiles can help save a lot of disk space, but the slowdown
while loading and saving is a major annoyance.
Currently Blender uses Zlib (aka gzip aka Deflate) for compression, but there
are now several more modern algorithms that outperform it in every way.
In this patch, I decided for Zstandard aka Zstd for several reasons:
- It is widely supported, both in other programs and libraries as well as in
general-purpose compression utilities on Unix
- It is extremely flexible - spanning several orders of magnitude of
compression speeds depending on the level setting.
- It is pretty much on the Pareto frontier for all of its configurations
(meaning that no other algorithm is both faster and more efficient).
One downside of course is that older versions of Blender will not be able to
read these files, but one can always just re-save them without compression or
decompress the file manually with an external tool.
The implementation here saves additional metadata into the compressed file in
order to allow for efficient seeking when loading. This is standard-compliant
and will be ignored by other tools that support Zstd.
If the metadata is not present (e.g. because you manually compressed a .blend
file with another tool), Blender will fall back to sequential reading.
Saving is multithreaded to improve performance. Loading is currently not
multithreaded since it's not easy to predict the access patterns of the
loading code when seeking is supported.
In the future, we might want to look into making this more predictable or
disabling seeking for the main .blend file, which would then allow for
multiple background threads that decompress data ahead of time.
The compression level was chosen to get sizes comparable to previous versions
at much higher speeds. In the future, this could be exposed as an option.
Reviewed By: campbellbarton, brecht, mont29
Differential Revision: https://developer.blender.org/D5799
2021-08-21 03:15:31 +02:00
|
|
|
if (wd->buffer.used_len != 0) {
|
|
|
|
|
writedata_do_write(wd, wd->buffer.buf, wd->buffer.used_len);
|
|
|
|
|
wd->buffer.used_len = 0;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
2016-06-28 17:35:35 +10:00
|
|
|
|
2020-06-03 12:07:45 +02:00
|
|
|
if (wd->use_memfile) {
|
|
|
|
|
BLO_memfile_write_finalize(&wd->mem);
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
const bool err = wd->validation_data.critical_error;
|
2002-10-12 11:37:38 +00:00
|
|
|
writedata_free(wd);
|
2007-02-14 11:00:05 +00:00
|
|
|
|
2002-10-12 11:37:38 +00:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-03 12:07:45 +02:00
|
|
|
/**
|
|
|
|
|
* Start writing of data related to a single ID.
|
|
|
|
|
*
|
|
|
|
|
* Only does something when storing an undo step.
|
|
|
|
|
*/
|
|
|
|
|
static void mywrite_id_begin(WriteData *wd, ID *id)
|
|
|
|
|
{
|
2024-07-26 12:16:42 +02:00
|
|
|
BLI_assert(wd->is_writing_id == false);
|
|
|
|
|
wd->is_writing_id = true;
|
|
|
|
|
|
|
|
|
|
BLI_assert(wd->validation_data.per_id_addresses_set.is_empty());
|
|
|
|
|
|
2020-06-03 12:07:45 +02:00
|
|
|
if (wd->use_memfile) {
|
2024-01-22 13:47:13 +01:00
|
|
|
wd->mem.current_id_session_uid = id->session_uid;
|
2020-06-03 12:07:45 +02:00
|
|
|
|
2022-04-14 16:41:02 +02:00
|
|
|
/* If current next memchunk does not match the ID we are about to write, or is not the _first_
|
2024-01-22 13:47:13 +01:00
|
|
|
* one for said ID, try to find the correct memchunk in the mapping using ID's session_uid. */
|
2024-05-28 13:35:25 +10:00
|
|
|
const MemFileChunk *curr_memchunk = wd->mem.reference_current_chunk;
|
|
|
|
|
const MemFileChunk *prev_memchunk = curr_memchunk != nullptr ?
|
|
|
|
|
static_cast<MemFileChunk *>(curr_memchunk->prev) :
|
|
|
|
|
nullptr;
|
2024-04-20 13:09:21 +10:00
|
|
|
if (curr_memchunk == nullptr || curr_memchunk->id_session_uid != id->session_uid ||
|
|
|
|
|
(prev_memchunk != nullptr &&
|
|
|
|
|
(prev_memchunk->id_session_uid == curr_memchunk->id_session_uid)))
|
2022-04-14 16:41:02 +02:00
|
|
|
{
|
2024-01-22 13:47:13 +01:00
|
|
|
if (MemFileChunk *ref = wd->mem.id_session_uid_mapping.lookup_default(id->session_uid,
|
|
|
|
|
nullptr))
|
2024-01-16 19:41:30 +01:00
|
|
|
{
|
2022-09-15 19:13:01 +02:00
|
|
|
wd->mem.reference_current_chunk = static_cast<MemFileChunk *>(ref);
|
2020-06-03 12:07:45 +02:00
|
|
|
}
|
|
|
|
|
/* Else, no existing memchunk found, i.e. this is supposed to be a new ID. */
|
|
|
|
|
}
|
|
|
|
|
/* Otherwise, we try with the current memchunk in any case, whether it is matching current
|
2024-01-22 13:47:13 +01:00
|
|
|
* ID's session_uid or not. */
|
2020-06-03 12:07:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Start writing of data related to a single ID.
|
|
|
|
|
*
|
|
|
|
|
* Only does something when storing an undo step.
|
|
|
|
|
*/
|
2022-10-03 17:37:25 -05:00
|
|
|
static void mywrite_id_end(WriteData *wd, ID * /*id*/)
|
2020-06-03 12:07:45 +02:00
|
|
|
{
|
|
|
|
|
if (wd->use_memfile) {
|
|
|
|
|
/* Very important to do it after every ID write now, otherwise we cannot know whether a
|
|
|
|
|
* specific ID changed or not. */
|
|
|
|
|
mywrite_flush(wd);
|
2024-01-22 13:47:13 +01:00
|
|
|
wd->mem.current_id_session_uid = MAIN_ID_SESSION_UID_UNSET;
|
2020-06-03 12:07:45 +02:00
|
|
|
}
|
2024-07-26 12:16:42 +02:00
|
|
|
|
BLI: change default hash-table clear behavior
Previously, calling `clear()` on `Map`, `Set` or `VectorSet` would remove all
elements but did not free the already allocated capacity. This is fine in most
cases, but has very bad and non-obvious worst-case behavior as can be seen in
#131793. The issue is that having a huge hash table with only very few elements
is inefficient when having to iterate over it (e.g. when clearing).
There used to be a `clear_and_shrink()` method to avoid this worst-case
behavior. However, it's not obvious that this should be used to improve
performance.
This patch changes the behavior of `clear` to what `clear_and_shrink` did before
to avoid accidentally running in worst-case behavior. The old behavior is still
available with the name `clear_and_keep_capacity`. This is more efficient if
it's known that the hash-table is filled with approximately the same number of
elements or more again.
The main annoying aspect from an API perspective is that for `Vector`, the
default behavior of `clear` is and should stay to not free the memory. `Vector`
does not have the same worst-case behavior when there is a lot of unused
capacity (besides taking up memory), because the extra memory is never looked
at. `std::vector::clear` also does not free the memory, so that's the expected
behavior. While this patch introduces an inconsistency between `Vector` and
`Map/Set/VectorSet` with regards to freeing memory, it makes them more
consistent in that `clear` is the better default when reusing the data-structure
repeatedly.
I went over existing uses of `clear` to see if any of them should be changed to
`clear_and_keep_capacity`. None of them seemed to really benefit from that or
showed that it was impossible to get into the worst-case scenario. Therefore,
this patch slightly changes the behavior of these calls (only performance wise,
semantics are exactly the same).
Pull Request: https://projects.blender.org/blender/blender/pulls/131852
2024-12-17 13:35:07 +01:00
|
|
|
wd->validation_data.per_id_addresses_set.clear();
|
|
|
|
|
wd->per_id_written_shared_addresses.clear();
|
2024-07-26 12:16:42 +02:00
|
|
|
|
|
|
|
|
BLI_assert(wd->is_writing_id == true);
|
|
|
|
|
wd->is_writing_id = false;
|
2020-06-03 12:07:45 +02:00
|
|
|
}
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Generic DNA File Writing
|
|
|
|
|
* \{ */
|
2002-10-12 11:37:38 +00:00
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
/**
|
|
|
|
|
* Return \a false if the given 'old' address is not valid in current context. The block should
|
|
|
|
|
* not be written in that case.
|
|
|
|
|
*
|
|
|
|
|
* \note Currently only checks that #BLO_CODE_DATA blocks written as part of an ID data never match
|
|
|
|
|
* an already written one for the same ID.
|
|
|
|
|
*/
|
2024-11-18 17:55:24 +01:00
|
|
|
static bool write_at_address_validate(WriteData *wd, const int filecode, const void *address)
|
2024-07-26 12:16:42 +02:00
|
|
|
{
|
|
|
|
|
/* Skip in undo case. */
|
|
|
|
|
if (wd->use_memfile) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (wd->is_writing_id && filecode == BLO_CODE_DATA) {
|
2024-07-26 15:26:36 +02:00
|
|
|
if (!wd->validation_data.per_id_addresses_set.add(address)) {
|
2024-07-26 12:16:42 +02:00
|
|
|
CLOG_ERROR(&LOG,
|
|
|
|
|
"Same identifier (old address) used several times for a same ID, skipping this "
|
|
|
|
|
"block to avoid critical corruption of the Blender file.");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-20 10:39:58 +01:00
|
|
|
static void write_bhead(WriteData *wd, const BHead &bhead)
|
|
|
|
|
{
|
|
|
|
|
mywrite(wd, &bhead, sizeof(BHead));
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
static void writestruct_at_address_nr(WriteData *wd,
|
|
|
|
|
const int filecode,
|
|
|
|
|
const int struct_nr,
|
2024-11-22 12:35:27 +01:00
|
|
|
const int64_t nr,
|
2024-11-18 17:55:24 +01:00
|
|
|
const void *adr,
|
|
|
|
|
const void *data)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2016-06-28 20:05:42 +10:00
|
|
|
BLI_assert(struct_nr > 0 && struct_nr < SDNA_TYPE_MAX);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
if (adr == nullptr || data == nullptr || nr == 0) {
|
2002-10-12 11:37:38 +00:00
|
|
|
return;
|
|
|
|
|
}
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
if (!write_at_address_validate(wd, filecode, adr)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
const int64_t len_in_bytes = nr * DNA_struct_size(wd->sdna, struct_nr);
|
|
|
|
|
if (len_in_bytes > INT32_MAX) {
|
|
|
|
|
CLOG_ERROR(&LOG, "Cannot write chunks bigger than INT_MAX.");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-20 10:39:58 +01:00
|
|
|
BHead bh;
|
2016-06-28 17:35:35 +10:00
|
|
|
bh.code = filecode;
|
|
|
|
|
bh.old = adr;
|
|
|
|
|
bh.nr = nr;
|
2016-06-28 20:05:42 +10:00
|
|
|
bh.SDNAnr = struct_nr;
|
2024-11-22 12:35:27 +01:00
|
|
|
bh.len = len_in_bytes;
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2016-06-28 17:35:35 +10:00
|
|
|
if (bh.len == 0) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2024-11-20 10:39:58 +01:00
|
|
|
write_bhead(wd, bh);
|
2022-09-25 18:33:28 +10:00
|
|
|
mywrite(wd, data, size_t(bh.len));
|
2013-04-12 15:33:09 +00:00
|
|
|
}
|
|
|
|
|
|
2016-06-28 20:05:42 +10:00
|
|
|
static void writestruct_nr(
|
2024-11-22 12:35:27 +01:00
|
|
|
WriteData *wd, const int filecode, const int struct_nr, const int64_t nr, const void *adr)
|
2016-06-28 20:05:42 +10:00
|
|
|
{
|
|
|
|
|
writestruct_at_address_nr(wd, filecode, struct_nr, nr, adr, adr);
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/**
|
|
|
|
|
* \warning Do not use for structs.
|
|
|
|
|
*/
|
2024-11-18 17:55:24 +01:00
|
|
|
static void writedata(WriteData *wd, const int filecode, const size_t len, const void *adr)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2022-09-15 19:13:01 +02:00
|
|
|
if (adr == nullptr || len == 0) {
|
2016-06-28 17:35:35 +10:00
|
|
|
return;
|
|
|
|
|
}
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2024-07-26 12:16:42 +02:00
|
|
|
if (!write_at_address_validate(wd, filecode, adr)) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-20 18:41:50 +02:00
|
|
|
if (len > INT_MAX) {
|
2021-07-15 18:23:28 +10:00
|
|
|
BLI_assert_msg(0, "Cannot write chunks bigger than INT_MAX.");
|
2020-09-20 18:41:50 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-20 10:39:58 +01:00
|
|
|
BHead bh;
|
2012-04-17 19:51:40 +00:00
|
|
|
bh.code = filecode;
|
2016-06-22 08:44:26 +10:00
|
|
|
bh.old = adr;
|
2012-04-17 19:51:40 +00:00
|
|
|
bh.nr = 1;
|
2024-08-08 15:56:33 +02:00
|
|
|
BLI_STATIC_ASSERT(SDNA_RAW_DATA_STRUCT_INDEX == 0, "'raw data' SDNA struct index should be 0")
|
|
|
|
|
bh.SDNAnr = SDNA_RAW_DATA_STRUCT_INDEX;
|
2022-09-25 18:33:28 +10:00
|
|
|
bh.len = int(len);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2024-11-20 10:39:58 +01:00
|
|
|
write_bhead(wd, bh);
|
2013-05-08 13:00:06 +00:00
|
|
|
mywrite(wd, adr, len);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/**
|
|
|
|
|
* Use this to force writing of lists in same order as reading (using link_list).
|
|
|
|
|
*/
|
2024-11-18 17:55:24 +01:00
|
|
|
static void writelist_nr(WriteData *wd,
|
|
|
|
|
const int filecode,
|
|
|
|
|
const int struct_nr,
|
|
|
|
|
const ListBase *lb)
|
2013-03-07 16:57:53 +00:00
|
|
|
{
|
2022-09-15 19:13:01 +02:00
|
|
|
const Link *link = static_cast<Link *>(lb->first);
|
2016-06-28 17:35:35 +10:00
|
|
|
|
2013-03-07 16:57:53 +00:00
|
|
|
while (link) {
|
2016-06-28 20:05:42 +10:00
|
|
|
writestruct_nr(wd, filecode, struct_nr, 1, link);
|
2013-03-07 16:57:53 +00:00
|
|
|
link = link->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-28 20:05:42 +10:00
|
|
|
#if 0
|
2024-11-18 17:55:24 +01:00
|
|
|
static void writelist_id(WriteData *wd, const int filecode, const char *structname, const ListBase *lb)
|
2016-06-28 20:05:42 +10:00
|
|
|
{
|
|
|
|
|
const Link *link = lb->first;
|
|
|
|
|
if (link) {
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-09-25 13:20:17 +10:00
|
|
|
const int struct_nr = DNA_struct_find_with_alias(wd->sdna, structname);
|
2016-06-28 20:05:42 +10:00
|
|
|
if (struct_nr == -1) {
|
|
|
|
|
printf("error: can't find SDNA code <%s>\n", structname);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-06-28 20:05:42 +10:00
|
|
|
while (link) {
|
|
|
|
|
writestruct_nr(wd, filecode, struct_nr, 1, link);
|
|
|
|
|
link = link->next;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#define writestruct_at_address(wd, filecode, struct_id, nr, adr, data) \
|
|
|
|
|
writestruct_at_address_nr(wd, filecode, SDNA_TYPE_FROM_STRUCT(struct_id), nr, adr, data)
|
|
|
|
|
|
|
|
|
|
#define writestruct(wd, filecode, struct_id, nr, adr) \
|
|
|
|
|
writestruct_nr(wd, filecode, SDNA_TYPE_FROM_STRUCT(struct_id), nr, adr)
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Typed DNA File Writing
|
|
|
|
|
*
|
|
|
|
|
* These functions are used by blender's .blend system for file saving/loading.
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2015-02-10 05:45:57 +11:00
|
|
|
/**
|
2021-06-22 10:42:32 -07:00
|
|
|
* Take care using 'use_active_win', since we won't want the currently active window
|
2015-02-10 05:45:57 +11:00
|
|
|
* to change which scene renders (currently only used for undo).
|
|
|
|
|
*/
|
Main Workspace Integration
This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup)
Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know!
(Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.)
== Main Changes/Features
* Introduces the new Workspaces as data-blocks.
* Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces.
* Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces).
* Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead.
* Store screen-layouts (`bScreen`) per workspace.
* Store an active screen-layout per workspace. Changing the workspace will enable this layout.
* Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.)
* Store an active render layer per workspace.
* Moved mode switch from 3D View header to Info Editor header.
* Store active scene in window (not directly workspace related, but overlaps quite a bit).
* Removed 'Use Global Scene' User Preference option.
* Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well.
* Default .blend only contains one workspace ("General").
* Support appending workspaces.
Opening files without UI and commandline rendering should work fine.
Note that the UI is temporary! We plan to introduce a new global topbar
that contains the workspace options and tabs for switching workspaces.
== Technical Notes
* Workspaces are data-blocks.
* Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now.
* A workspace can be active in multiple windows at the same time.
* The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned).
* The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that).
* Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs.
* `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those.
* Added scene operators `SCENE_OT_`. Was previously done through screen operators.
== BPY API Changes
* Removed `Screen.scene`, added `Window.scene`
* Removed `UserPreferencesView.use_global_scene`
* Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces`
* Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer`
* Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name)
== What's left?
* There are a few open design questions (T50521). We should find the needed answers and implement them.
* Allow adding and removing individual workspaces from workspace configuration (needs UI design).
* Get the override system ready and support overrides per workspace.
* Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc).
* Allow enabling add-ons per workspace.
* Support custom workspace keymaps.
* Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later.
* Get the topbar done.
* Workspaces need a proper icon, current one is just a placeholder :)
Reviewed By: campbellbarton, mont29
Tags: #user_interface, #bf_blender_2.8
Maniphest Tasks: T50521
Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
|
|
|
static void current_screen_compat(Main *mainvar,
|
2024-11-18 17:55:24 +01:00
|
|
|
const bool use_active_win,
|
2018-07-24 11:21:32 +02:00
|
|
|
bScreen **r_screen,
|
|
|
|
|
Scene **r_scene,
|
|
|
|
|
ViewLayer **r_view_layer)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2008-12-19 16:36:15 +00:00
|
|
|
wmWindowManager *wm;
|
2022-09-15 19:13:01 +02:00
|
|
|
wmWindow *window = nullptr;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Find a global current screen in the first open window, to have
|
|
|
|
|
* a reasonable default for reading in older versions. */
|
2022-09-15 19:13:01 +02:00
|
|
|
wm = static_cast<wmWindowManager *>(mainvar->wm.first);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2015-02-10 05:45:57 +11:00
|
|
|
if (wm) {
|
|
|
|
|
if (use_active_win) {
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Write the active window into the file, needed for multi-window undo #43424. */
|
2022-09-15 19:13:01 +02:00
|
|
|
for (window = static_cast<wmWindow *>(wm->windows.first); window; window = window->next) {
|
2015-02-10 05:45:57 +11:00
|
|
|
if (window->active) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Fallback. */
|
2022-09-15 19:13:01 +02:00
|
|
|
if (window == nullptr) {
|
|
|
|
|
window = static_cast<wmWindow *>(wm->windows.first);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2015-02-10 05:45:57 +11:00
|
|
|
}
|
|
|
|
|
else {
|
2022-09-15 19:13:01 +02:00
|
|
|
window = static_cast<wmWindow *>(wm->windows.first);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2015-02-10 05:45:57 +11:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
*r_screen = (window) ? BKE_workspace_active_screen_get(window->workspace_hook) : nullptr;
|
|
|
|
|
*r_scene = (window) ? window->scene : nullptr;
|
2018-07-24 11:21:32 +02:00
|
|
|
*r_view_layer = (window && *r_scene) ? BKE_view_layer_find(*r_scene, window->view_layer_name) :
|
2022-09-15 19:13:01 +02:00
|
|
|
nullptr;
|
2008-12-19 16:36:15 +00:00
|
|
|
}
|
|
|
|
|
|
2022-10-05 13:44:02 -05:00
|
|
|
struct RenderInfo {
|
2012-04-26 04:03:25 +00:00
|
|
|
int sfra;
|
|
|
|
|
int efra;
|
|
|
|
|
char scene_name[MAX_ID_NAME - 2];
|
2022-10-05 13:44:02 -05:00
|
|
|
};
|
2012-04-26 04:03:25 +00:00
|
|
|
|
2019-08-17 00:54:22 +10:00
|
|
|
/**
|
|
|
|
|
* This was originally added for the historic render-daemon feature,
|
|
|
|
|
* now write because it can be easily extracted without reading the whole blend file.
|
|
|
|
|
*
|
2023-02-21 16:39:58 +01:00
|
|
|
* See: `scripts/modules/blend_render_info.py`
|
2019-08-17 00:54:22 +10:00
|
|
|
*/
|
2012-10-15 02:15:07 +00:00
|
|
|
static void write_renderinfo(WriteData *wd, Main *mainvar)
|
2008-12-19 16:36:15 +00:00
|
|
|
{
|
|
|
|
|
bScreen *curscreen;
|
2022-09-15 19:13:01 +02:00
|
|
|
Scene *curscene = nullptr;
|
2018-07-24 11:21:32 +02:00
|
|
|
ViewLayer *view_layer;
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* XXX: in future, handle multiple windows with multiple screens? */
|
2018-07-24 11:21:32 +02:00
|
|
|
current_screen_compat(mainvar, false, &curscreen, &curscene, &view_layer);
|
2016-06-28 17:35:35 +10:00
|
|
|
|
2020-08-21 13:14:41 +02:00
|
|
|
LISTBASE_FOREACH (Scene *, sce, &mainvar->scenes) {
|
2021-08-26 15:01:14 +02:00
|
|
|
if (!ID_IS_LINKED(sce) && (sce == curscene || (sce->r.scemode & R_BG_RENDER))) {
|
2020-08-21 13:14:41 +02:00
|
|
|
RenderInfo data;
|
2012-04-26 04:03:25 +00:00
|
|
|
data.sfra = sce->r.sfra;
|
|
|
|
|
data.efra = sce->r.efra;
|
|
|
|
|
memset(data.scene_name, 0, sizeof(data.scene_name));
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2023-05-09 12:50:37 +10:00
|
|
|
STRNCPY(data.scene_name, sce->id.name + 2);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2023-04-08 12:42:40 +02:00
|
|
|
writedata(wd, BLO_CODE_REND, sizeof(data), &data);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-05 21:19:03 +02:00
|
|
|
static void write_keymapitem(BlendWriter *writer, const wmKeyMapItem *kmi)
|
KEYMAP REFACTORING
Diff Keymaps
User edited keymaps now no longer override the builtin keymaps entirely, but
rather save only the difference and reapply those changes. This means they can
stay better in sync when the builtin keymaps change. The diff/patch algorithm
is not perfect, but better for the common case where only a few items are changed
rather than entire keymaps The main weakness is that if a builtin keymap item
changes, user modification of that item may need to be redone in some cases.
Keymap Editor
The most noticeable change here is that there is no longer an "Edit" button for
keymaps, all are editable immediately, but a "Restore" buttons shows for keymaps
and items that have been edited. Shortcuts for addons can also be edited in the
keymap editor.
Addons
Addons now should only modify the new addon keyconfiguration, the keymap items
there will be added to the builtin ones for handling events, and not get lost
when starting new files. Example code of register/unregister:
km = wm.keyconfigs.addon.keymaps.new("3D View", space_type="VIEW_3D")
km.keymap_items.new('my.operator', 'ESC', 'PRESS')
km = wm.keyconfigs.addon.keymaps["3D View"]
km.keymap_items.remove(km.keymap_items["my.operator"])
Compatibility
The changes made are not forward compatible, i.e. if you save user preferences
with newer versions, older versions will not have key configuration changes that
were made.
2011-08-05 20:45:26 +00:00
|
|
|
{
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, wmKeyMapItem, kmi);
|
2016-06-28 17:35:35 +10:00
|
|
|
if (kmi->properties) {
|
2020-08-21 12:45:33 +02:00
|
|
|
IDP_BlendWrite(writer, kmi->properties);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
KEYMAP REFACTORING
Diff Keymaps
User edited keymaps now no longer override the builtin keymaps entirely, but
rather save only the difference and reapply those changes. This means they can
stay better in sync when the builtin keymaps change. The diff/patch algorithm
is not perfect, but better for the common case where only a few items are changed
rather than entire keymaps The main weakness is that if a builtin keymap item
changes, user modification of that item may need to be redone in some cases.
Keymap Editor
The most noticeable change here is that there is no longer an "Edit" button for
keymaps, all are editable immediately, but a "Restore" buttons shows for keymaps
and items that have been edited. Shortcuts for addons can also be edited in the
keymap editor.
Addons
Addons now should only modify the new addon keyconfiguration, the keymap items
there will be added to the builtin ones for handling events, and not get lost
when starting new files. Example code of register/unregister:
km = wm.keyconfigs.addon.keymaps.new("3D View", space_type="VIEW_3D")
km.keymap_items.new('my.operator', 'ESC', 'PRESS')
km = wm.keyconfigs.addon.keymaps["3D View"]
km.keymap_items.remove(km.keymap_items["my.operator"])
Compatibility
The changes made are not forward compatible, i.e. if you save user preferences
with newer versions, older versions will not have key configuration changes that
were made.
2011-08-05 20:45:26 +00:00
|
|
|
}
|
|
|
|
|
|
2020-06-05 21:19:03 +02:00
|
|
|
static void write_userdef(BlendWriter *writer, const UserDef *userdef)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writestruct(writer->wd, BLO_CODE_USER, UserDef, 1, userdef);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const bTheme *, btheme, &userdef->themes) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bTheme, btheme);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const wmKeyMap *, keymap, &userdef->user_keymaps) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, wmKeyMap, keymap);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const wmKeyMapDiffItem *, kmdi, &keymap->diff_items) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, wmKeyMapDiffItem, kmdi);
|
2016-06-28 17:35:35 +10:00
|
|
|
if (kmdi->remove_item) {
|
2020-06-05 21:19:03 +02:00
|
|
|
write_keymapitem(writer, kmdi->remove_item);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
|
|
|
|
if (kmdi->add_item) {
|
2020-06-05 21:19:03 +02:00
|
|
|
write_keymapitem(writer, kmdi->add_item);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
Key Configuration
Keymaps are now saveable and configurable from the user preferences, note
that editing one item in a keymap means the whole keymap is now defined by
the user and will not be updated by Blender, an option for syncing might be
added later. The outliner interface is still there, but I will probably
remove it.
There's actually 3 levels now:
* Default builtin key configuration.
* Key configuration loaded from .py file, for configs like Blender 2.4x
or other 3D applications.
* Keymaps edited by the user and saved in .B.blend. These can be saved
to .py files as well to make creating distributable configurations
easier.
Also, user preferences sections were reorganized a bit, now there is:
Interface, Editing, Input, Files and System.
Implementation notes:
* wmKeyConfig was added which represents a key configuration containing
keymaps.
* wmKeymapItem was renamed to wmKeyMapItem for consistency with wmKeyMap.
* Modal maps are not wrapped yet.
* User preferences DNA file reading did not support newdataadr() yet,
added this now for reading keymaps.
* Key configuration related settings are now RNA wrapped.
* is_property_set and is_property_hidden python methods were added.
2009-10-08 18:40:03 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const wmKeyMapItem *, kmi, &keymap->items) {
|
2020-06-05 21:19:03 +02:00
|
|
|
write_keymapitem(writer, kmi);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
- The basic layer for Themes in place!
- currently only implemented for 3d window
- create as many themes you like, and name them
- default theme is not editable, and always will be defined at startup
(initTheme)
- saves in .B.blend
- themes for spaces can become local too, so you can set individual
3d windows at theme 'Maya' or so. (to be implemented)
- it uses alpha as well...!
API:
This doesnt use the old method with BFCOLORID blahblah. The API is copied
from OpenGL conventions (naming) as much as possible:
- void BIF_ThemeColor(ScrArea *sa, int colorid)
sets a color... id's are in BIF_resources.h (TH_GRID, TH_WIRE, etc)
- void BIF_ThemeColorShade(ScrArea *sa, int colorid, int offset)
sets a color with offset, no more weird COLORSHADE_LGREY stuff
- void BIF_GetThemeColor3fv(ScrArea *sa, int colorid, float *col)
like opengl, this gives you in *col the three rgb values
- void BIF_GetThemeColor4ubv(ScrArea *sa, int colorid, char *col)
or the one to get 4 bytes
ThemeColor calls for globals (UI etc) can also call NULL for *sa... this
is to be implemented still.
Next step: cleaning up interface.c for all weird colorcalls.
2003-10-17 14:02:08 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const wmKeyConfigPref *, kpt, &userdef->user_keyconfig_prefs) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, wmKeyConfigPref, kpt);
|
2018-11-16 11:24:49 +11:00
|
|
|
if (kpt->prop) {
|
2020-08-21 12:45:33 +02:00
|
|
|
IDP_BlendWrite(writer, kpt->prop);
|
2018-11-16 11:24:49 +11:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const bUserMenu *, um, &userdef->user_menus) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bUserMenu, um);
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const bUserMenuItem *, umi, &um->items) {
|
2018-06-24 16:07:34 +02:00
|
|
|
if (umi->type == USER_MENU_TYPE_OPERATOR) {
|
|
|
|
|
const bUserMenuItem_Op *umi_op = (const bUserMenuItem_Op *)umi;
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bUserMenuItem_Op, umi_op);
|
2018-06-24 16:07:34 +02:00
|
|
|
if (umi_op->prop) {
|
2020-08-21 12:45:33 +02:00
|
|
|
IDP_BlendWrite(writer, umi_op->prop);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-06-24 16:07:34 +02:00
|
|
|
}
|
2018-06-30 20:59:10 +02:00
|
|
|
else if (umi->type == USER_MENU_TYPE_MENU) {
|
|
|
|
|
const bUserMenuItem_Menu *umi_mt = (const bUserMenuItem_Menu *)umi;
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bUserMenuItem_Menu, umi_mt);
|
2018-06-30 20:59:10 +02:00
|
|
|
}
|
|
|
|
|
else if (umi->type == USER_MENU_TYPE_PROP) {
|
|
|
|
|
const bUserMenuItem_Prop *umi_pr = (const bUserMenuItem_Prop *)umi;
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bUserMenuItem_Prop, umi_pr);
|
2018-06-30 20:59:10 +02:00
|
|
|
}
|
2018-06-24 16:07:34 +02:00
|
|
|
else {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bUserMenuItem, umi);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-06-24 16:07:34 +02:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const bAddon *, bext, &userdef->addons) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bAddon, bext);
|
2012-12-29 10:24:42 +00:00
|
|
|
if (bext->prop) {
|
2020-08-21 12:45:33 +02:00
|
|
|
IDP_BlendWrite(writer, bext->prop);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2012-12-29 10:24:42 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const bPathCompare *, path_cmp, &userdef->autoexec_paths) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, bPathCompare, path_cmp);
|
2013-06-18 18:11:52 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-04-11 15:20:52 +02:00
|
|
|
LISTBASE_FOREACH (const bUserScriptDirectory *, script_dir, &userdef->script_directories) {
|
|
|
|
|
BLO_write_struct(writer, bUserScriptDirectory, script_dir);
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-06 15:18:18 +02:00
|
|
|
LISTBASE_FOREACH (const bUserAssetLibrary *, asset_library_ref, &userdef->asset_libraries) {
|
|
|
|
|
BLO_write_struct(writer, bUserAssetLibrary, asset_library_ref);
|
2020-12-14 13:39:41 +01:00
|
|
|
}
|
|
|
|
|
|
2023-08-09 20:15:34 +10:00
|
|
|
LISTBASE_FOREACH (const bUserExtensionRepo *, repo_ref, &userdef->extension_repos) {
|
|
|
|
|
BLO_write_struct(writer, bUserExtensionRepo, repo_ref);
|
2024-05-22 20:32:23 +10:00
|
|
|
BKE_preferences_extension_repo_write_data(writer, repo_ref);
|
2023-08-09 20:15:34 +10:00
|
|
|
}
|
2024-05-03 10:20:01 -04:00
|
|
|
LISTBASE_FOREACH (
|
|
|
|
|
const bUserAssetShelfSettings *, shelf_settings, &userdef->asset_shelves_settings)
|
|
|
|
|
{
|
|
|
|
|
BLO_write_struct(writer, bUserAssetShelfSettings, shelf_settings);
|
|
|
|
|
BKE_asset_catalog_path_list_blend_write(writer, shelf_settings->enabled_catalog_paths);
|
|
|
|
|
}
|
2023-08-09 20:15:34 +10:00
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (const uiStyle *, style, &userdef->uistyles) {
|
2020-06-05 21:19:03 +02:00
|
|
|
BLO_write_struct(writer, uiStyle, style);
|
2011-06-24 14:00:15 +00:00
|
|
|
}
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/** Keep it last of `write_*_data` functions. */
|
2024-12-10 17:43:09 +01:00
|
|
|
static void write_libraries(WriteData *wd, Main *bmain)
|
2016-06-14 14:53:39 +02:00
|
|
|
{
|
2024-12-10 17:43:09 +01:00
|
|
|
/* Gather IDs coming from each library. */
|
|
|
|
|
blender::MultiValueMap<Library *, ID *> linked_ids_by_library;
|
|
|
|
|
{
|
|
|
|
|
ID *id;
|
|
|
|
|
FOREACH_MAIN_ID_BEGIN (bmain, id) {
|
|
|
|
|
if (!ID_IS_LINKED(id)) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
BLI_assert(id->lib);
|
|
|
|
|
linked_ids_by_library.add(id->lib, id);
|
|
|
|
|
}
|
|
|
|
|
FOREACH_MAIN_ID_END;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LISTBASE_FOREACH (Library *, library_ptr, &bmain->libraries) {
|
|
|
|
|
Library &library = *library_ptr;
|
|
|
|
|
const blender::Span<ID *> ids = linked_ids_by_library.lookup(&library);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
/* Gather IDs that are somehow directly referenced by data in the current blend file. */
|
|
|
|
|
blender::Vector<ID *> ids_used_from_library;
|
|
|
|
|
for (ID *id : ids) {
|
|
|
|
|
if (id->us == 0) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (id->tag & ID_TAG_EXTERN) {
|
|
|
|
|
ids_used_from_library.append(id);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if ((id->tag & ID_TAG_INDIRECT) && (id->flag & ID_FLAG_INDIRECT_WEAK_LINK)) {
|
|
|
|
|
ids_used_from_library.append(id);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
bool should_write_library = false;
|
|
|
|
|
if (library.packedfile) {
|
|
|
|
|
should_write_library = true;
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
2020-05-25 17:39:16 +02:00
|
|
|
else if (wd->use_memfile) {
|
2024-12-10 17:43:09 +01:00
|
|
|
/* When writing undo step we always write all existing libraries. That makes reading undo
|
|
|
|
|
* step much easier when dealing with purely indirectly used libraries. */
|
|
|
|
|
should_write_library = true;
|
2020-05-25 17:39:16 +02:00
|
|
|
}
|
2016-06-14 14:53:39 +02:00
|
|
|
else {
|
2024-12-10 17:43:09 +01:00
|
|
|
should_write_library = !ids_used_from_library.is_empty();
|
2016-06-14 14:53:39 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
if (!should_write_library) {
|
|
|
|
|
/* Nothing from the library is used, so it does not have to be written. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2022-07-21 16:36:06 +02:00
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
BlendWriter writer = {wd};
|
|
|
|
|
writestruct(wd, ID_LI, Library, 1, &library);
|
|
|
|
|
BKE_id_blend_write(&writer, &library.id);
|
|
|
|
|
|
|
|
|
|
/* Write packed file if necessary. */
|
|
|
|
|
if (library.packedfile) {
|
|
|
|
|
BKE_packedfile_blend_write(&writer, library.packedfile);
|
|
|
|
|
if (!wd->use_memfile) {
|
|
|
|
|
CLOG_INFO(&LOG, 2, "Write packed .blend: %s", library.filepath);
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
2024-12-10 17:43:09 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
/* Write placeholders for linked data-blocks that are used. */
|
|
|
|
|
for (const ID *id : ids_used_from_library) {
|
|
|
|
|
if (!BKE_idtype_idcode_is_linkable(GS(id->name))) {
|
|
|
|
|
CLOG_ERROR(&LOG,
|
|
|
|
|
"Data-block '%s' from lib '%s' is not linkable, but is flagged as "
|
|
|
|
|
"directly linked",
|
|
|
|
|
id->name,
|
|
|
|
|
library.runtime.filepath_abs);
|
2016-06-14 14:53:39 +02:00
|
|
|
}
|
2024-12-10 17:43:09 +01:00
|
|
|
writestruct(wd, ID_LINK_PLACEHOLDER, ID, 1, id);
|
2016-06-14 14:53:39 +02:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-07-07 16:02:45 +10:00
|
|
|
mywrite_flush(wd);
|
2016-06-14 14:53:39 +02:00
|
|
|
}
|
|
|
|
|
|
2022-09-15 19:13:01 +02:00
|
|
|
#ifdef WITH_BUILDINFO
|
2023-07-26 15:23:23 +10:00
|
|
|
extern "C" ulong build_commit_timestamp;
|
2022-09-15 19:13:01 +02:00
|
|
|
extern "C" char build_hash[];
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/**
|
|
|
|
|
* Context is usually defined by WM, two cases where no WM is available:
|
|
|
|
|
* - for forward compatibility, `curscreen` has to be saved
|
|
|
|
|
* - for undo-file, `curscene` needs to be saved.
|
|
|
|
|
*/
|
2024-11-18 17:55:24 +01:00
|
|
|
static void write_global(WriteData *wd, const int fileflags, Main *mainvar)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2018-04-14 12:33:19 +02:00
|
|
|
const bool is_undo = wd->use_memfile;
|
2002-10-12 11:37:38 +00:00
|
|
|
FileGlobal fg;
|
2008-12-19 16:36:15 +00:00
|
|
|
bScreen *screen;
|
Main Workspace Integration
This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup)
Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know!
(Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.)
== Main Changes/Features
* Introduces the new Workspaces as data-blocks.
* Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces.
* Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces).
* Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead.
* Store screen-layouts (`bScreen`) per workspace.
* Store an active screen-layout per workspace. Changing the workspace will enable this layout.
* Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.)
* Store an active render layer per workspace.
* Moved mode switch from 3D View header to Info Editor header.
* Store active scene in window (not directly workspace related, but overlaps quite a bit).
* Removed 'Use Global Scene' User Preference option.
* Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well.
* Default .blend only contains one workspace ("General").
* Support appending workspaces.
Opening files without UI and commandline rendering should work fine.
Note that the UI is temporary! We plan to introduce a new global topbar
that contains the workspace options and tabs for switching workspaces.
== Technical Notes
* Workspaces are data-blocks.
* Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now.
* A workspace can be active in multiple windows at the same time.
* The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned).
* The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that).
* Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs.
* `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those.
* Added scene operators `SCENE_OT_`. Was previously done through screen operators.
== BPY API Changes
* Removed `Screen.scene`, added `Window.scene`
* Removed `UserPreferencesView.use_global_scene`
* Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces`
* Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer`
* Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name)
== What's left?
* There are a few open design questions (T50521). We should find the needed answers and implement them.
* Allow adding and removing individual workspaces from workspace configuration (needs UI design).
* Get the override system ready and support overrides per workspace.
* Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc).
* Allow enabling add-ons per workspace.
* Support custom workspace keymaps.
* Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later.
* Get the topbar done.
* Workspaces need a proper icon, current one is just a placeholder :)
Reviewed By: campbellbarton, mont29
Tags: #user_interface, #bf_blender_2.8
Maniphest Tasks: T50521
Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
|
|
|
Scene *scene;
|
2018-07-24 11:21:32 +02:00
|
|
|
ViewLayer *view_layer;
|
2007-01-08 12:31:53 +00:00
|
|
|
char subvstr[8];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Prevent memory checkers from complaining. */
|
2019-02-27 15:07:50 +11:00
|
|
|
memset(fg._pad, 0, sizeof(fg._pad));
|
2021-12-13 16:22:19 +11:00
|
|
|
memset(fg.filepath, 0, sizeof(fg.filepath));
|
2014-01-22 16:23:55 +06:00
|
|
|
memset(fg.build_hash, 0, sizeof(fg.build_hash));
|
2022-09-15 19:13:01 +02:00
|
|
|
fg._pad1 = nullptr;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-07-24 11:21:32 +02:00
|
|
|
current_screen_compat(mainvar, is_undo, &screen, &scene, &view_layer);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* XXX: still remap `G`. */
|
2016-06-28 17:35:35 +10:00
|
|
|
fg.curscreen = screen;
|
Main Workspace Integration
This commit does the main integration of workspaces, which is a design we agreed on during the 2.8 UI workshop (see https://wiki.blender.org/index.php/Dev:2.8/UI/Workshop_Writeup)
Workspaces should generally be stable, I'm not aware of any remaining bugs (or I've forgotten them :) ). If you find any, let me know!
(Exception: mode switching button might get out of sync with actual mode in some cases, would consider that a limitation/ToDo. Needs to be resolved at some point.)
== Main Changes/Features
* Introduces the new Workspaces as data-blocks.
* Allow storing a number of custom workspaces as part of the user configuration. Needs further work to allow adding and deleting individual workspaces.
* Bundle a default workspace configuration with Blender (current screen-layouts converted to workspaces).
* Pressing button to add a workspace spawns a menu to select between "Duplicate Current" and the workspaces from the user configuration. If no workspaces are stored in the user configuration, the default workspaces are listed instead.
* Store screen-layouts (`bScreen`) per workspace.
* Store an active screen-layout per workspace. Changing the workspace will enable this layout.
* Store active mode in workspace. Changing the workspace will also enter the mode of the new workspace. (Note that we still store the active mode in the object, moving this completely to workspaces is a separate project.)
* Store an active render layer per workspace.
* Moved mode switch from 3D View header to Info Editor header.
* Store active scene in window (not directly workspace related, but overlaps quite a bit).
* Removed 'Use Global Scene' User Preference option.
* Compatibility with old files - a new workspace is created for every screen-layout of old files. Old Blender versions should be able to read files saved with workspace support as well.
* Default .blend only contains one workspace ("General").
* Support appending workspaces.
Opening files without UI and commandline rendering should work fine.
Note that the UI is temporary! We plan to introduce a new global topbar
that contains the workspace options and tabs for switching workspaces.
== Technical Notes
* Workspaces are data-blocks.
* Adding and removing `bScreen`s should be done through `ED_workspace_layout` API now.
* A workspace can be active in multiple windows at the same time.
* The mode menu (which is now in the Info Editor header) doesn't display "Grease Pencil Edit" mode anymore since its availability depends on the active editor. Will be fixed by making Grease Pencil an own object type (as planned).
* The button to change the active workspace object mode may get out of sync with the mode of the active object. Will either be resolved by moving mode out of object data, or we'll disable workspace modes again (there's a `#define USE_WORKSPACE_MODE` for that).
* Screen-layouts (`bScreen`) are IDs and thus stored in a main list-base. Had to add a wrapper `WorkSpaceLayout` so we can store them in a list-base within workspaces, too. On the long run we could completely replace `bScreen` by workspace structs.
* `WorkSpace` types use some special compiler trickery to allow marking structs and struct members as private. BKE_workspace API should be used for accessing those.
* Added scene operators `SCENE_OT_`. Was previously done through screen operators.
== BPY API Changes
* Removed `Screen.scene`, added `Window.scene`
* Removed `UserPreferencesView.use_global_scene`
* Added `Context.workspace`, `Window.workspace` and `BlendData.workspaces`
* Added `bpy.types.WorkSpace` containing `screens`, `object_mode` and `render_layer`
* Added Screen.layout_name for the layout name that'll be displayed in the UI (may differ from internal name)
== What's left?
* There are a few open design questions (T50521). We should find the needed answers and implement them.
* Allow adding and removing individual workspaces from workspace configuration (needs UI design).
* Get the override system ready and support overrides per workspace.
* Support custom UI setups as part of workspaces (hidden panels, hidden buttons, customizable toolbars, etc).
* Allow enabling add-ons per workspace.
* Support custom workspace keymaps.
* Remove special exception for workspaces in linking code (so they're always appended, never linked). Depends on a few things, so best to solve later.
* Get the topbar done.
* Workspaces need a proper icon, current one is just a placeholder :)
Reviewed By: campbellbarton, mont29
Tags: #user_interface, #bf_blender_2.8
Maniphest Tasks: T50521
Differential Revision: https://developer.blender.org/D2451
2017-06-01 19:56:58 +02:00
|
|
|
fg.curscene = scene;
|
2018-07-24 11:21:32 +02:00
|
|
|
fg.cur_view_layer = view_layer;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Prevent to save this, is not good convention, and feature with concerns. */
|
2019-02-02 14:01:48 +11:00
|
|
|
fg.fileflags = (fileflags & ~G_FILE_FLAG_ALL_RUNTIME);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-06-28 17:35:35 +10:00
|
|
|
fg.globalf = G.f;
|
2021-03-15 13:30:43 +11:00
|
|
|
/* Write information needed for recovery. */
|
|
|
|
|
if (fileflags & G_FILE_RECOVER_WRITE) {
|
2021-12-13 16:22:19 +11:00
|
|
|
STRNCPY(fg.filepath, mainvar->filepath);
|
2024-09-11 15:11:42 +02:00
|
|
|
/* Compression is often turned of when writing recovery files. However, when opening the file,
|
|
|
|
|
* it should be enabled again. */
|
|
|
|
|
fg.fileflags = G.fileflags & G_FILE_COMPRESS;
|
2021-03-15 13:30:43 +11:00
|
|
|
}
|
2023-05-09 12:50:37 +10:00
|
|
|
SNPRINTF(subvstr, "%4d", BLENDER_FILE_SUBVERSION);
|
2007-01-08 12:31:53 +00:00
|
|
|
memcpy(fg.subvstr, subvstr, 4);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
Blender: change bugfix release versioning from a/b/c to .1/.2/.3
The file subversion is no longer used in the Python API or user interface,
and is now internal to Blender.
User interface, Python API and file I/O metadata now use more consistent
formatting for version numbers. Official releases use "2.83.0", "2.83.1",
and releases under development use "2.90.0 Alpha", "2.90.0 Beta".
Some Python add-ons may need to lower the Blender version in bl_info to
(2, 83, 0) or (2, 90, 0) if they used a subversion number higher than 0.
https://wiki.blender.org/wiki/Reference/Release_Notes/2.83/Python_API#Compatibility
This change is in preparation of LTS releases, and also brings us more
in line with semantic versioning.
Fixes T76058.
Differential Revision: https://developer.blender.org/D7748
2020-05-25 10:49:04 +02:00
|
|
|
fg.subversion = BLENDER_FILE_SUBVERSION;
|
|
|
|
|
fg.minversion = BLENDER_FILE_MIN_VERSION;
|
|
|
|
|
fg.minsubversion = BLENDER_FILE_MIN_SUBVERSION;
|
2011-08-22 16:54:26 +00:00
|
|
|
#ifdef WITH_BUILDINFO
|
2022-09-15 19:13:01 +02:00
|
|
|
/* TODO(sergey): Add branch name to file as well? */
|
|
|
|
|
fg.build_commit_timestamp = build_commit_timestamp;
|
2023-05-13 17:34:21 +10:00
|
|
|
STRNCPY(fg.build_hash, build_hash);
|
2011-01-02 13:33:32 +00:00
|
|
|
#else
|
2013-11-15 17:11:59 +06:00
|
|
|
fg.build_commit_timestamp = 0;
|
2023-05-09 12:50:37 +10:00
|
|
|
STRNCPY(fg.build_hash, "unknown");
|
2011-01-02 13:33:32 +00:00
|
|
|
#endif
|
2023-04-08 12:42:40 +02:00
|
|
|
writestruct(wd, BLO_CODE_GLOB, FileGlobal, 1, &fg);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2021-07-03 23:08:40 +10:00
|
|
|
/**
|
|
|
|
|
* Preview image, first 2 values are width and height
|
|
|
|
|
* second are an RGBA image (uchar).
|
|
|
|
|
* \note this uses 'TEST' since new types will segfault on file load for older blender versions.
|
2010-05-24 21:52:18 +00:00
|
|
|
*/
|
Make .blend file thumbnail reading simpler and more coherent, read/store them when reading in background mode.
Primary goal of this commit is to fix an annoying issue - when processing and saving .blend
files in background mode you lose their thumbnails, since it can only be generated with
an OpenGL context.
Solution to that is to read .blend thumbnail while reading .blend file (only done in background
mode currently), and store it in Main struct.
Also, this lead to removing .blend file reading code from thumb_blend (no need to have doublons).
We now have a small interface in regular reading code area, which keeps it reasonbaly light
by only reading/parsing header info, and first few BHead blocks.
This makes code reading .blend thumbnail about 3 to 4 times slower than previous highly specialized
one in blend_thumb.c, but overall thumbnail generation of a big .blend files folder only grows
of about 1%, think we can bare with it.
Finally, since thumbnail is now optionally stored in Main struct, it makes it easy to allow user
to define their own custom one (instead of auto-generated one). RNA API for this was not added though,
accessing that kind of .blend meta-data has to be rethought a bit on a bigger level first.
Reviewers: sergey, campbellbarton
Subscribers: Severin, psy-fi
Differential Revision: https://developer.blender.org/D1469
2015-08-27 15:53:23 +02:00
|
|
|
static void write_thumb(WriteData *wd, const BlendThumbnail *thumb)
|
2010-05-24 21:52:18 +00:00
|
|
|
{
|
Make .blend file thumbnail reading simpler and more coherent, read/store them when reading in background mode.
Primary goal of this commit is to fix an annoying issue - when processing and saving .blend
files in background mode you lose their thumbnails, since it can only be generated with
an OpenGL context.
Solution to that is to read .blend thumbnail while reading .blend file (only done in background
mode currently), and store it in Main struct.
Also, this lead to removing .blend file reading code from thumb_blend (no need to have doublons).
We now have a small interface in regular reading code area, which keeps it reasonbaly light
by only reading/parsing header info, and first few BHead blocks.
This makes code reading .blend thumbnail about 3 to 4 times slower than previous highly specialized
one in blend_thumb.c, but overall thumbnail generation of a big .blend files folder only grows
of about 1%, think we can bare with it.
Finally, since thumbnail is now optionally stored in Main struct, it makes it easy to allow user
to define their own custom one (instead of auto-generated one). RNA API for this was not added though,
accessing that kind of .blend meta-data has to be rethought a bit on a bigger level first.
Reviewers: sergey, campbellbarton
Subscribers: Severin, psy-fi
Differential Revision: https://developer.blender.org/D1469
2015-08-27 15:53:23 +02:00
|
|
|
if (thumb) {
|
2023-04-08 12:42:40 +02:00
|
|
|
writedata(wd, BLO_CODE_TEST, BLEN_THUMB_MEMSIZE_FILE(thumb->width, thumb->height), thumb);
|
Make .blend file thumbnail reading simpler and more coherent, read/store them when reading in background mode.
Primary goal of this commit is to fix an annoying issue - when processing and saving .blend
files in background mode you lose their thumbnails, since it can only be generated with
an OpenGL context.
Solution to that is to read .blend thumbnail while reading .blend file (only done in background
mode currently), and store it in Main struct.
Also, this lead to removing .blend file reading code from thumb_blend (no need to have doublons).
We now have a small interface in regular reading code area, which keeps it reasonbaly light
by only reading/parsing header info, and first few BHead blocks.
This makes code reading .blend thumbnail about 3 to 4 times slower than previous highly specialized
one in blend_thumb.c, but overall thumbnail generation of a big .blend files folder only grows
of about 1%, think we can bare with it.
Finally, since thumbnail is now optionally stored in Main struct, it makes it easy to allow user
to define their own custom one (instead of auto-generated one). RNA API for this was not added though,
accessing that kind of .blend meta-data has to be rethought a bit on a bigger level first.
Reviewers: sergey, campbellbarton
Subscribers: Severin, psy-fi
Differential Revision: https://developer.blender.org/D1469
2015-08-27 15:53:23 +02:00
|
|
|
}
|
2010-05-24 21:52:18 +00:00
|
|
|
}
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name File Writing (Private)
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2024-11-25 18:07:16 +01:00
|
|
|
BLO_Write_IDBuffer::BLO_Write_IDBuffer(ID &id, const bool is_undo)
|
|
|
|
|
: buffer_(BKE_idtype_get_info_from_id(&id)->struct_size, alignof(ID))
|
2023-04-14 10:35:31 +02:00
|
|
|
{
|
2024-11-25 18:07:16 +01:00
|
|
|
const IDTypeInfo *id_type = BKE_idtype_get_info_from_id(&id);
|
|
|
|
|
ID *temp_id = static_cast<ID *>(buffer_.buffer());
|
2023-04-14 10:35:31 +02:00
|
|
|
|
|
|
|
|
if (is_undo) {
|
|
|
|
|
/* Record the changes that happened up to this undo push in
|
|
|
|
|
* recalc_up_to_undo_push, and clear `recalc_after_undo_push` again
|
|
|
|
|
* to start accumulating for the next undo push. */
|
2024-11-25 18:07:16 +01:00
|
|
|
id.recalc_up_to_undo_push = id.recalc_after_undo_push;
|
|
|
|
|
id.recalc_after_undo_push = 0;
|
2023-04-14 10:35:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Copy ID data itself into buffer, to be able to freely modify it. */
|
2024-11-25 18:07:16 +01:00
|
|
|
memcpy(temp_id, &id, id_type->struct_size);
|
2023-04-14 10:35:31 +02:00
|
|
|
|
|
|
|
|
/* Clear runtime data to reduce false detection of changed data in undo/redo context. */
|
|
|
|
|
if (is_undo) {
|
2024-08-07 12:12:17 +02:00
|
|
|
temp_id->tag &= ID_TAG_KEEP_ON_UNDO;
|
2023-04-14 10:35:31 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
temp_id->tag = 0;
|
|
|
|
|
}
|
|
|
|
|
temp_id->us = 0;
|
|
|
|
|
temp_id->icon_id = 0;
|
|
|
|
|
/* Those listbase data change every time we add/remove an ID, and also often when
|
|
|
|
|
* renaming one (due to re-sorting). This avoids generating a lot of false 'is changed'
|
|
|
|
|
* detections between undo steps. */
|
|
|
|
|
temp_id->prev = nullptr;
|
|
|
|
|
temp_id->next = nullptr;
|
|
|
|
|
/* Those runtime pointers should never be set during writing stage, but just in case clear
|
|
|
|
|
* them too. */
|
|
|
|
|
temp_id->orig_id = nullptr;
|
|
|
|
|
temp_id->newid = nullptr;
|
|
|
|
|
/* Even though in theory we could be able to preserve this python instance across undo even
|
|
|
|
|
* when we need to re-read the ID into its original address, this is currently cleared in
|
2023-07-31 11:50:54 +10:00
|
|
|
* #direct_link_id_common in `readfile.cc` anyway. */
|
2023-04-14 10:35:31 +02:00
|
|
|
temp_id->py_instance = nullptr;
|
2024-12-23 16:18:22 +01:00
|
|
|
/* Clear runtime data struct. */
|
|
|
|
|
memset(&temp_id->runtime, 0, sizeof(temp_id->runtime));
|
2024-07-29 21:26:43 +02:00
|
|
|
|
|
|
|
|
DrawDataList *drawdata = DRW_drawdatalist_from_id(temp_id);
|
|
|
|
|
if (drawdata) {
|
|
|
|
|
BLI_listbase_clear(reinterpret_cast<ListBase *>(drawdata));
|
|
|
|
|
}
|
2023-04-14 10:35:31 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-25 18:07:16 +01:00
|
|
|
BLO_Write_IDBuffer::BLO_Write_IDBuffer(ID &id, BlendWriter *writer)
|
|
|
|
|
: BLO_Write_IDBuffer(id, BLO_write_is_undo(writer))
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-30 11:13:37 +01:00
|
|
|
/* Helper callback for checking linked IDs used by given ID (assumed local), to ensure directly
|
|
|
|
|
* linked data is tagged accordingly. */
|
|
|
|
|
static int write_id_direct_linked_data_process_cb(LibraryIDLinkCallbackData *cb_data)
|
|
|
|
|
{
|
2023-05-16 18:14:43 +02:00
|
|
|
ID *self_id = cb_data->self_id;
|
2022-11-30 11:13:37 +01:00
|
|
|
ID *id = *cb_data->id_pointer;
|
2024-12-12 15:20:22 +01:00
|
|
|
const LibraryForeachIDCallbackFlag cb_flag = cb_data->cb_flag;
|
2022-11-30 11:13:37 +01:00
|
|
|
|
|
|
|
|
if (id == nullptr || !ID_IS_LINKED(id)) {
|
|
|
|
|
return IDWALK_RET_NOP;
|
|
|
|
|
}
|
2023-05-16 18:14:43 +02:00
|
|
|
BLI_assert(!ID_IS_LINKED(self_id));
|
2022-11-30 11:13:37 +01:00
|
|
|
BLI_assert((cb_flag & IDWALK_CB_INDIRECT_USAGE) == 0);
|
2022-12-16 10:13:40 +09:00
|
|
|
|
2024-08-07 12:12:17 +02:00
|
|
|
if (self_id->tag & ID_TAG_RUNTIME) {
|
2022-12-16 10:13:40 +09:00
|
|
|
return IDWALK_RET_NOP;
|
|
|
|
|
}
|
2022-11-30 11:13:37 +01:00
|
|
|
|
2024-05-29 17:38:08 +02:00
|
|
|
if (!BKE_idtype_idcode_is_linkable(GS(id->name))) {
|
|
|
|
|
/* Usages of unlinkable IDs (aka ShapeKeys and some UI IDs) should never cause them to be
|
|
|
|
|
* considered as directly linked. This can often happen e.g. from UI data (the Outliner will
|
|
|
|
|
* have links to most IDs).
|
|
|
|
|
*/
|
|
|
|
|
return IDWALK_RET_NOP;
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-30 11:13:37 +01:00
|
|
|
if (cb_flag & IDWALK_CB_DIRECT_WEAK_LINK) {
|
|
|
|
|
id_lib_indirect_weak_link(id);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
id_lib_extern(id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return IDWALK_RET_NOP;
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-03 18:34:04 +01:00
|
|
|
/**
|
|
|
|
|
* Writes ID and all its direct data to the file.
|
|
|
|
|
*/
|
|
|
|
|
static void write_id(WriteData *wd, ID *id)
|
|
|
|
|
{
|
|
|
|
|
const IDTypeInfo *id_type = BKE_idtype_get_info_from_id(id);
|
|
|
|
|
mywrite_id_begin(wd, id);
|
|
|
|
|
if (id_type->blend_write != nullptr) {
|
|
|
|
|
BlendWriter writer = {wd};
|
|
|
|
|
BLO_Write_IDBuffer id_buffer{*id, &writer};
|
|
|
|
|
id_type->blend_write(&writer, id_buffer.get(), id);
|
|
|
|
|
}
|
|
|
|
|
mywrite_id_end(wd, id);
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-04 18:53:09 +01:00
|
|
|
static void write_blend_file_header(WriteData *wd)
|
|
|
|
|
{
|
|
|
|
|
char buf[16];
|
|
|
|
|
SNPRINTF(buf,
|
|
|
|
|
"BLENDER%c%c%.3d",
|
|
|
|
|
(sizeof(void *) == 8) ? '-' : '_',
|
|
|
|
|
(ENDIAN_ORDER == B_ENDIAN) ? 'V' : 'v',
|
|
|
|
|
BLENDER_FILE_VERSION);
|
|
|
|
|
|
|
|
|
|
mywrite(wd, buf, 12);
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-03 18:34:04 +01:00
|
|
|
/**
|
|
|
|
|
* Gathers all local IDs that should be written to the file.
|
|
|
|
|
*/
|
|
|
|
|
static blender::Vector<ID *> gather_local_ids_to_write(Main *bmain, const bool is_undo)
|
|
|
|
|
{
|
|
|
|
|
blender::Vector<ID *> local_ids_to_write;
|
|
|
|
|
ID *id;
|
|
|
|
|
FOREACH_MAIN_ID_BEGIN (bmain, id) {
|
|
|
|
|
if (GS(id->name) == ID_LI) {
|
|
|
|
|
/* Libraries are handled separately below. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2024-12-10 17:43:09 +01:00
|
|
|
if (ID_IS_LINKED(id)) {
|
|
|
|
|
/* Linked data-blocks are handled separately below. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2024-12-03 18:34:04 +01:00
|
|
|
const IDTypeInfo *id_type = BKE_idtype_get_info_from_id(id);
|
|
|
|
|
UNUSED_VARS_NDEBUG(id_type);
|
|
|
|
|
/* We should never attempt to write non-regular IDs
|
|
|
|
|
* (i.e. all kind of temp/runtime ones). */
|
|
|
|
|
BLI_assert((id->tag & (ID_TAG_NO_MAIN | ID_TAG_NO_USER_REFCOUNT | ID_TAG_NOT_ALLOCATED)) == 0);
|
|
|
|
|
/* We only write unused IDs in undo case. */
|
|
|
|
|
if (!is_undo) {
|
|
|
|
|
/* NOTE: All 'never unused' local IDs (Scenes, WindowManagers, ...) should always be
|
|
|
|
|
* written to disk, so their user-count should never be zero currently. Note that
|
|
|
|
|
* libraries have already been skipped above, as they need a specific handling. */
|
|
|
|
|
if (id->us == 0) {
|
|
|
|
|
/* FIXME: #124857: Some old files seem to cause incorrect handling of their temp
|
|
|
|
|
* screens.
|
|
|
|
|
*
|
|
|
|
|
* See e.g. file attached to #124777 (from 2.79.1).
|
|
|
|
|
*
|
|
|
|
|
* For now ignore, issue is not obvious to track down (`temp` bScreen ID from read data
|
|
|
|
|
* _does_ have the proper `temp` tag), and seems anecdotal at worst. */
|
|
|
|
|
BLI_assert((id_type->flags & IDTYPE_FLAGS_NEVER_UNUSED) == 0);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* XXX Special handling for ShapeKeys, as having unused shapekeys is not a good thing
|
|
|
|
|
* (and reported as error by e.g. `BLO_main_validate_shapekeys`), skip writing shapekeys
|
|
|
|
|
* when their 'owner' is not written.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: Since ShapeKeys are conceptually embedded IDs (like root node trees e.g.), this
|
|
|
|
|
* behavior actually makes sense anyway. This remains more of a temp hack until topic of
|
|
|
|
|
* how to handle unused data on save is properly tackled. */
|
|
|
|
|
if (GS(id->name) == ID_KE) {
|
|
|
|
|
Key *shape_key = reinterpret_cast<Key *>(id);
|
|
|
|
|
/* NOTE: Here we are accessing the real owner ID data, not it's 'proxy' shallow copy
|
|
|
|
|
* generated for its file-writing. This is not expected to be an issue, but is worth
|
|
|
|
|
* noting. */
|
|
|
|
|
if (shape_key->from == nullptr || shape_key->from->us == 0) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((id->tag & ID_TAG_RUNTIME) != 0 && !is_undo) {
|
|
|
|
|
/* Runtime IDs are never written to .blend files, and they should not influence
|
|
|
|
|
* (in)direct status of linked IDs they may use. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
local_ids_to_write.append(id);
|
|
|
|
|
}
|
|
|
|
|
FOREACH_MAIN_ID_END;
|
|
|
|
|
return local_ids_to_write;
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/**
|
|
|
|
|
* When #MemFile arguments are non-null, this is a file-safe to memory.
|
|
|
|
|
*
|
|
|
|
|
* \param compare: Previous memory file (can be nullptr).
|
|
|
|
|
* \param current: The current memory file (can be nullptr).
|
|
|
|
|
*/
|
2016-06-28 21:00:00 +10:00
|
|
|
static bool write_file_handle(Main *mainvar,
|
2014-09-04 21:48:36 +10:00
|
|
|
WriteWrap *ww,
|
|
|
|
|
MemFile *compare,
|
|
|
|
|
MemFile *current,
|
2024-11-18 17:55:24 +01:00
|
|
|
const int write_flags,
|
|
|
|
|
const bool use_userdef,
|
2016-06-28 21:00:00 +10:00
|
|
|
const BlendThumbnail *thumb)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
|
|
|
|
WriteData *wd;
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
wd = mywrite_begin(ww, compare, current);
|
2020-06-05 21:19:03 +02:00
|
|
|
BlendWriter writer = {wd};
|
2011-12-27 13:17:58 +00:00
|
|
|
|
2022-11-30 11:13:37 +01:00
|
|
|
/* Clear 'directly linked' flag for all linked data, these are not necessarily valid/up-to-date
|
|
|
|
|
* info, they will be re-generated while write code is processing local IDs below. */
|
|
|
|
|
if (!wd->use_memfile) {
|
|
|
|
|
ID *id_iter;
|
|
|
|
|
FOREACH_MAIN_ID_BEGIN (mainvar, id_iter) {
|
2022-12-02 13:37:11 +01:00
|
|
|
if (ID_IS_LINKED(id_iter) && BKE_idtype_idcode_is_linkable(GS(id_iter->name))) {
|
|
|
|
|
if (USER_EXPERIMENTAL_TEST(&U, use_all_linked_data_direct)) {
|
|
|
|
|
/* Forces all linked data to be considered as directly linked.
|
|
|
|
|
* FIXME: Workaround some BAT tool limitations for Heist production, should be removed
|
|
|
|
|
* asap afterward. */
|
|
|
|
|
id_lib_extern(id_iter);
|
|
|
|
|
}
|
2023-12-13 18:50:07 +01:00
|
|
|
else if (GS(id_iter->name) == ID_SCE) {
|
|
|
|
|
/* For scenes, do not force them into 'indirectly linked' status.
|
|
|
|
|
* The main reason is that scenes typically have no users, so most linked scene would be
|
|
|
|
|
* systematically 'lost' on file save.
|
|
|
|
|
*
|
2023-12-14 11:14:50 +11:00
|
|
|
* While this change re-introduces the 'no-more-used data laying around in files for
|
2023-12-13 18:50:07 +01:00
|
|
|
* ever' issue when it comes to scenes, this solution seems to be the most sensible one
|
|
|
|
|
* for the time being, considering that:
|
|
|
|
|
* - Scene are a top-level container.
|
2023-12-14 11:14:50 +11:00
|
|
|
* - Linked scenes are typically explicitly linked by the user.
|
2023-12-13 18:50:07 +01:00
|
|
|
* - Cases where scenes would be indirectly linked by other data (e.g. when linking a
|
|
|
|
|
* collection or material) can be considered at the very least as not following sane
|
|
|
|
|
* practice in data dependencies.
|
|
|
|
|
* - There are typically not hundreds of scenes in a file, and they are always very
|
|
|
|
|
* easily discoverable and browsable from the main UI. */
|
|
|
|
|
}
|
2022-12-02 13:37:11 +01:00
|
|
|
else {
|
2024-08-07 12:12:17 +02:00
|
|
|
id_iter->tag |= ID_TAG_INDIRECT;
|
|
|
|
|
id_iter->tag &= ~ID_TAG_EXTERN;
|
2022-12-02 13:37:11 +01:00
|
|
|
}
|
2022-11-30 11:13:37 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
FOREACH_MAIN_ID_END;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-06 14:19:04 +10:00
|
|
|
/* Recompute all ID user-counts if requested. Allows to avoid skipping writing of IDs wrongly
|
|
|
|
|
* detected as unused due to invalid user-count. */
|
2024-07-02 16:43:55 +02:00
|
|
|
if (!wd->use_memfile) {
|
|
|
|
|
if (USER_EXPERIMENTAL_TEST(&U, use_recompute_usercount_on_save_debug)) {
|
|
|
|
|
BKE_main_id_refcount_recompute(mainvar, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-04 18:53:09 +01:00
|
|
|
write_blend_file_header(wd);
|
2008-12-19 16:36:15 +00:00
|
|
|
write_renderinfo(wd, mainvar);
|
2010-05-24 21:52:18 +00:00
|
|
|
write_thumb(wd, thumb);
|
2009-10-20 13:58:53 +00:00
|
|
|
write_global(wd, write_flags, mainvar);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-11-12 11:35:31 +11:00
|
|
|
/* The window-manager and screen often change,
|
2016-07-06 22:23:50 +10:00
|
|
|
* avoid thumbnail detecting changes because of this. */
|
2016-07-07 16:02:45 +10:00
|
|
|
mywrite_flush(wd);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-03 18:34:04 +01:00
|
|
|
const bool is_undo = wd->use_memfile;
|
|
|
|
|
blender::Vector<ID *> local_ids_to_write = gather_local_ids_to_write(mainvar, is_undo);
|
|
|
|
|
|
|
|
|
|
if (!is_undo) {
|
|
|
|
|
/* If not writing undo data, properly set directly linked IDs as `ID_TAG_EXTERN`. */
|
|
|
|
|
for (ID *id : local_ids_to_write) {
|
|
|
|
|
BKE_library_foreach_ID_link(mainvar,
|
|
|
|
|
id,
|
|
|
|
|
write_id_direct_linked_data_process_cb,
|
|
|
|
|
nullptr,
|
|
|
|
|
IDWALK_READONLY | IDWALK_INCLUDE_UI);
|
|
|
|
|
}
|
2020-08-28 13:05:48 +02:00
|
|
|
|
2024-12-03 18:34:04 +01:00
|
|
|
/* Forcefully ensure we know about all needed override operations. */
|
|
|
|
|
for (ID *id : local_ids_to_write) {
|
|
|
|
|
if (ID_IS_OVERRIDE_LIBRARY_REAL(id) && !ID_IS_OVERRIDE_LIBRARY_VIRTUAL(id)) {
|
|
|
|
|
BKE_lib_override_library_operations_create(mainvar, id, nullptr);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
Refactor writefile handling of data-blocks.
Instead of calling a function looping over whole list of a given ID
type, make whole loop over Main in parent function, and call functions
writing a single datablock at a time.
This design is more in line with all other places in Blender where we
handle whole content of Main (including readfile.c), and much more easy
to extend and add e.g. some generic processing of IDs before/after
writing, etc.
From user point, there should be no change at all, only difference is
that data-block types won't be saved in same order as before (.blend
file specs enforces no order here, so this is not an issue, but it could
bug some third party users using other, simplified .blend file reader maybe).
Reviewers: sergey, campbellbarton
Differential Revision: https://developer.blender.org/D2510
2017-03-17 10:02:08 +01:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-03 18:34:04 +01:00
|
|
|
/* Actually write local data-blocks to the file. */
|
|
|
|
|
for (ID *id : local_ids_to_write) {
|
|
|
|
|
write_id(wd, id);
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-10 17:43:09 +01:00
|
|
|
/* Write libraries about libraries and linked data-blocks. */
|
|
|
|
|
write_libraries(wd, mainvar);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-07-06 22:23:50 +10:00
|
|
|
/* So changes above don't cause a 'DNA1' to be detected as changed on undo. */
|
2016-07-07 16:02:45 +10:00
|
|
|
mywrite_flush(wd);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-06-19 15:41:07 +10:00
|
|
|
if (use_userdef) {
|
2020-06-05 21:19:03 +02:00
|
|
|
write_userdef(&writer, &U);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-07-06 22:23:50 +10:00
|
|
|
/* Write DNA last, because (to be implemented) test for which structs are written.
|
|
|
|
|
*
|
|
|
|
|
* Note that we *borrow* the pointer to 'DNAstr',
|
|
|
|
|
* so writing each time uses the same address and doesn't cause unnecessary undo overhead. */
|
2024-07-22 18:11:17 +02:00
|
|
|
writedata(wd, BLO_CODE_DNA1, size_t(wd->sdna->data_size), wd->sdna->data);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* End of file. */
|
2024-11-20 10:39:58 +01:00
|
|
|
BHead bhead{};
|
2023-04-08 12:42:40 +02:00
|
|
|
bhead.code = BLO_CODE_ENDB;
|
2024-11-20 10:39:58 +01:00
|
|
|
write_bhead(wd, bhead);
|
2004-06-23 18:22:51 +00:00
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
return mywrite_end(wd);
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
/**
|
|
|
|
|
* Do reverse file history: `.blend1` -> `.blend2`, `.blend` -> `.blend1` ... etc.
|
|
|
|
|
* \return True on success.
|
|
|
|
|
*/
|
|
|
|
|
static bool do_history(const char *filepath, ReportList *reports)
|
2011-06-02 12:44:59 +00:00
|
|
|
{
|
2023-06-11 16:37:07 +10:00
|
|
|
/* Add 2 because version number maximum is double-digits. */
|
2023-06-11 16:37:08 +10:00
|
|
|
char filepath_tmp1[FILE_MAX + 2], filepath_tmp2[FILE_MAX + 2];
|
|
|
|
|
int version_number = min_ii(99, U.versions);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
if (version_number == 0) {
|
|
|
|
|
return true;
|
2016-06-28 17:35:35 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
if (strlen(filepath) < 2) {
|
2011-06-02 12:44:59 +00:00
|
|
|
BKE_report(reports, RPT_ERROR, "Unable to make version backup: filename too short");
|
2023-06-11 16:37:08 +10:00
|
|
|
return false;
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
while (version_number > 1) {
|
|
|
|
|
SNPRINTF(filepath_tmp1, "%s%d", filepath, version_number - 1);
|
|
|
|
|
if (BLI_exists(filepath_tmp1)) {
|
|
|
|
|
SNPRINTF(filepath_tmp2, "%s%d", filepath, version_number);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
if (BLI_rename_overwrite(filepath_tmp1, filepath_tmp2)) {
|
2016-02-03 17:06:42 +11:00
|
|
|
BKE_report(reports, RPT_ERROR, "Unable to make version backup");
|
2023-06-11 16:37:08 +10:00
|
|
|
return false;
|
2016-02-03 17:06:42 +11:00
|
|
|
}
|
2012-10-21 05:46:41 +00:00
|
|
|
}
|
2023-06-11 16:37:08 +10:00
|
|
|
version_number--;
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
/* Needed when `version_number == 1`. */
|
|
|
|
|
if (BLI_exists(filepath)) {
|
|
|
|
|
SNPRINTF(filepath_tmp1, "%s%d", filepath, version_number);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
if (BLI_rename_overwrite(filepath, filepath_tmp1)) {
|
2016-02-03 17:06:42 +11:00
|
|
|
BKE_report(reports, RPT_ERROR, "Unable to make version backup");
|
2023-06-11 16:37:08 +10:00
|
|
|
return false;
|
2016-02-03 17:06:42 +11:00
|
|
|
}
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-06-11 16:37:08 +10:00
|
|
|
return true;
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
|
|
|
|
|
2023-08-28 15:34:38 +02:00
|
|
|
static void write_file_main_validate_pre(Main *bmain, ReportList *reports)
|
|
|
|
|
{
|
|
|
|
|
if (!bmain->lock) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-04 16:13:56 +02:00
|
|
|
if (G.debug & G_DEBUG_IO) {
|
|
|
|
|
BKE_report(
|
|
|
|
|
reports, RPT_DEBUG, "Checking validity of current .blend file *BEFORE* save to disk");
|
|
|
|
|
}
|
2023-08-28 15:34:38 +02:00
|
|
|
|
|
|
|
|
BLO_main_validate_shapekeys(bmain, reports);
|
|
|
|
|
if (!BKE_main_namemap_validate_and_fix(bmain)) {
|
|
|
|
|
BKE_report(reports,
|
|
|
|
|
RPT_ERROR,
|
|
|
|
|
"Critical data corruption: Conflicts and/or otherwise invalid data-blocks names "
|
|
|
|
|
"(see console for details)");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (G.debug & G_DEBUG_IO) {
|
|
|
|
|
BLO_main_validate_libraries(bmain, reports);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void write_file_main_validate_post(Main *bmain, ReportList *reports)
|
|
|
|
|
{
|
|
|
|
|
if (!bmain->lock) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (G.debug & G_DEBUG_IO) {
|
2023-08-30 15:06:59 +02:00
|
|
|
BKE_report(
|
|
|
|
|
reports, RPT_DEBUG, "Checking validity of current .blend file *BEFORE* save to disk");
|
2023-08-28 15:34:38 +02:00
|
|
|
BLO_main_validate_libraries(bmain, reports);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
static bool BLO_write_file_impl(Main *mainvar,
|
|
|
|
|
const char *filepath,
|
|
|
|
|
const int write_flags,
|
|
|
|
|
const BlendFileWriteParams *params,
|
|
|
|
|
ReportList *reports,
|
|
|
|
|
WriteWrap &ww)
|
2002-10-12 11:37:38 +00:00
|
|
|
{
|
2021-12-16 16:17:26 +11:00
|
|
|
BLI_assert(!BLI_path_is_rel(filepath));
|
2021-12-16 16:17:28 +11:00
|
|
|
BLI_assert(BLI_path_is_abs_from_cwd(filepath));
|
2021-12-16 16:17:26 +11:00
|
|
|
|
2016-06-28 17:35:35 +10:00
|
|
|
char tempname[FILE_MAX + 1];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-06-19 15:41:07 +10:00
|
|
|
eBLO_WritePathRemap remap_mode = params->remap_mode;
|
|
|
|
|
const bool use_save_versions = params->use_save_versions;
|
|
|
|
|
const bool use_save_as_copy = params->use_save_as_copy;
|
|
|
|
|
const bool use_userdef = params->use_userdef;
|
|
|
|
|
const BlendThumbnail *thumb = params->thumb;
|
2021-12-16 16:17:26 +11:00
|
|
|
const bool relbase_valid = (mainvar->filepath[0] != '\0');
|
2020-06-19 15:41:07 +10:00
|
|
|
|
2024-07-06 21:42:49 +02:00
|
|
|
/* Extra protection: Never save a non asset file as asset file. Otherwise a normal file is turned
|
|
|
|
|
* into an asset file, which can result in data loss because the asset system will allow editing
|
|
|
|
|
* this file from the UI, regenerating its content with just the asset and it dependencies. */
|
|
|
|
|
if ((write_flags & G_FILE_ASSET_EDIT_FILE) && !mainvar->is_asset_edit_file) {
|
|
|
|
|
BKE_reportf(reports, RPT_ERROR, "Cannot save normal file (%s) as asset system file", tempname);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Path backup/restore. */
|
2022-09-15 19:13:01 +02:00
|
|
|
void *path_list_backup = nullptr;
|
Refactor BKE_bpath module.
The main goal of this refactor is to make BPath module use `IDTypeInfo`,
and move each ID-specific part of the `foreach_path` looper into their
own IDTypeInfo struct, using a new `foreach_path` callback.
Additionally, following improvements/cleanups are included:
* Attempt to get better, more consistent namings.
** In particular, move from `path_visitor` to more standard `foreach_path`.
* Update and extend documentation.
** API doc was moved to header, according to recent discussions on this
topic.
* Remove `BKE_bpath_relocate_visitor` from API, this is specific
callback that belongs in `lib_id.c` user code.
NOTE: This commit is expected to be 100% non-behavioral-change. This
implies that several potential further changes were only noted as
comments (like using a more generic solution for
`lib_id_library_local_paths`, addressing inconsistencies like path of
packed libraries always being skipped, regardless of the
`BKE_BPATH_FOREACH_PATH_SKIP_PACKED` `eBPathForeachFlag` flag value,
etc.).
NOTE: basic unittests were added to master already in
rBdcc500e5a265093bc9cc.
Reviewed By: brecht
Differential Revision: https://developer.blender.org/D13381
2021-11-29 14:20:58 +01:00
|
|
|
const eBPathForeachFlag path_list_flag = (BKE_BPATH_FOREACH_PATH_SKIP_LINKED |
|
|
|
|
|
BKE_BPATH_FOREACH_PATH_SKIP_MULTIFILE);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-08-28 15:34:38 +02:00
|
|
|
write_file_main_validate_pre(mainvar, reports);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Open temporary file, so we preserve the original in case we crash. */
|
2023-05-09 12:50:37 +10:00
|
|
|
SNPRINTF(tempname, "%s@", filepath);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
if (ww.open(tempname) == false) {
|
2012-10-19 16:43:10 +00:00
|
|
|
BKE_reportf(
|
|
|
|
|
reports, RPT_ERROR, "Cannot open file %s for writing: %s", tempname, strerror(errno));
|
2022-02-28 15:10:11 -05:00
|
|
|
return false;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-12-16 16:17:26 +11:00
|
|
|
if (remap_mode == BLO_WRITE_PATH_REMAP_ABSOLUTE) {
|
|
|
|
|
/* Paths will already be absolute, no remapping to do. */
|
|
|
|
|
if (relbase_valid == false) {
|
|
|
|
|
remap_mode = BLO_WRITE_PATH_REMAP_NONE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-18 22:03:07 +11:00
|
|
|
/* Remapping of relative paths to new file location. */
|
2020-06-18 15:25:22 +10:00
|
|
|
if (remap_mode != BLO_WRITE_PATH_REMAP_NONE) {
|
|
|
|
|
if (remap_mode == BLO_WRITE_PATH_REMAP_RELATIVE) {
|
2021-12-16 11:38:10 +11:00
|
|
|
/* Make all relative as none of the existing paths can be relative in an unsaved document. */
|
|
|
|
|
if (relbase_valid == false) {
|
2020-06-18 15:25:22 +10:00
|
|
|
remap_mode = BLO_WRITE_PATH_REMAP_RELATIVE_ALL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-16 16:17:26 +11:00
|
|
|
/* The source path only makes sense to set if the file was saved (`relbase_valid`). */
|
2020-02-18 22:03:07 +11:00
|
|
|
char dir_src[FILE_MAX];
|
|
|
|
|
char dir_dst[FILE_MAX];
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-12-16 16:17:26 +11:00
|
|
|
/* Normalize the paths in case there is some subtle difference (so they can be compared). */
|
|
|
|
|
if (relbase_valid) {
|
2023-05-02 20:47:26 +10:00
|
|
|
BLI_path_split_dir_part(mainvar->filepath, dir_src, sizeof(dir_src));
|
2023-04-24 11:31:31 +10:00
|
|
|
BLI_path_normalize(dir_src);
|
2021-12-16 16:17:26 +11:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
dir_src[0] = '\0';
|
|
|
|
|
}
|
2023-05-02 20:47:26 +10:00
|
|
|
BLI_path_split_dir_part(filepath, dir_dst, sizeof(dir_dst));
|
2023-04-24 11:31:31 +10:00
|
|
|
BLI_path_normalize(dir_dst);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-06-18 15:25:22 +10:00
|
|
|
/* Only for relative, not relative-all, as this means making existing paths relative. */
|
|
|
|
|
if (remap_mode == BLO_WRITE_PATH_REMAP_RELATIVE) {
|
2021-12-16 11:38:10 +11:00
|
|
|
if (relbase_valid && (BLI_path_cmp(dir_dst, dir_src) == 0)) {
|
2020-06-18 15:25:22 +10:00
|
|
|
/* Saved to same path. Nothing to do. */
|
|
|
|
|
remap_mode = BLO_WRITE_PATH_REMAP_NONE;
|
|
|
|
|
}
|
2011-05-18 06:48:52 +00:00
|
|
|
}
|
2020-06-18 15:25:22 +10:00
|
|
|
else if (remap_mode == BLO_WRITE_PATH_REMAP_ABSOLUTE) {
|
2021-12-16 11:38:10 +11:00
|
|
|
if (relbase_valid == false) {
|
2020-06-18 15:25:22 +10:00
|
|
|
/* Unsaved, all paths are absolute.Even if the user manages to set a relative path,
|
|
|
|
|
* there is no base-path that can be used to make it absolute. */
|
|
|
|
|
remap_mode = BLO_WRITE_PATH_REMAP_NONE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (remap_mode != BLO_WRITE_PATH_REMAP_NONE) {
|
2022-05-17 17:32:23 +02:00
|
|
|
/* Some path processing (e.g. with libraries) may use the current `main->filepath`, if this
|
|
|
|
|
* is not matching the path currently used for saving, unexpected paths corruptions can
|
2023-02-12 14:37:16 +11:00
|
|
|
* happen. See #98201. */
|
2022-05-17 17:32:23 +02:00
|
|
|
char mainvar_filepath_orig[FILE_MAX];
|
|
|
|
|
STRNCPY(mainvar_filepath_orig, mainvar->filepath);
|
|
|
|
|
STRNCPY(mainvar->filepath, filepath);
|
|
|
|
|
|
2020-02-18 22:03:07 +11:00
|
|
|
/* Check if we need to backup and restore paths. */
|
2020-06-19 15:41:07 +10:00
|
|
|
if (UNLIKELY(use_save_as_copy)) {
|
2020-02-18 22:03:07 +11:00
|
|
|
path_list_backup = BKE_bpath_list_backup(mainvar, path_list_flag);
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-18 15:25:22 +10:00
|
|
|
switch (remap_mode) {
|
|
|
|
|
case BLO_WRITE_PATH_REMAP_RELATIVE:
|
|
|
|
|
/* Saved, make relative paths relative to new location (if possible). */
|
2021-12-16 16:17:26 +11:00
|
|
|
BLI_assert(relbase_valid);
|
2022-09-15 19:13:01 +02:00
|
|
|
BKE_bpath_relative_rebase(mainvar, dir_src, dir_dst, nullptr);
|
2020-06-18 15:25:22 +10:00
|
|
|
break;
|
|
|
|
|
case BLO_WRITE_PATH_REMAP_RELATIVE_ALL:
|
|
|
|
|
/* Make all relative (when requested or unsaved). */
|
2022-09-15 19:13:01 +02:00
|
|
|
BKE_bpath_relative_convert(mainvar, dir_dst, nullptr);
|
2020-06-18 15:25:22 +10:00
|
|
|
break;
|
|
|
|
|
case BLO_WRITE_PATH_REMAP_ABSOLUTE:
|
|
|
|
|
/* Make all absolute (when requested or unsaved). */
|
2021-12-16 16:17:26 +11:00
|
|
|
BLI_assert(relbase_valid);
|
2022-09-15 19:13:01 +02:00
|
|
|
BKE_bpath_absolute_convert(mainvar, dir_src, nullptr);
|
2020-06-18 15:25:22 +10:00
|
|
|
break;
|
|
|
|
|
case BLO_WRITE_PATH_REMAP_NONE:
|
2022-05-17 15:11:30 +02:00
|
|
|
BLI_assert_unreachable(); /* Unreachable. */
|
2020-06-18 15:25:22 +10:00
|
|
|
break;
|
2011-05-18 06:48:52 +00:00
|
|
|
}
|
2022-05-17 17:32:23 +02:00
|
|
|
|
|
|
|
|
STRNCPY(mainvar->filepath, mainvar_filepath_orig);
|
2011-05-18 06:48:52 +00:00
|
|
|
}
|
2010-01-08 17:50:55 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* Actual file writing. */
|
2022-09-15 19:13:01 +02:00
|
|
|
const bool err = write_file_handle(
|
|
|
|
|
mainvar, &ww, nullptr, nullptr, write_flags, use_userdef, thumb);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
ww.close();
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2012-11-07 04:13:03 +00:00
|
|
|
if (UNLIKELY(path_list_backup)) {
|
2012-12-15 15:31:50 +00:00
|
|
|
BKE_bpath_list_restore(mainvar, path_list_flag, path_list_backup);
|
|
|
|
|
BKE_bpath_list_free(path_list_backup);
|
2012-11-07 04:13:03 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2011-06-02 12:44:59 +00:00
|
|
|
if (err) {
|
|
|
|
|
BKE_report(reports, RPT_ERROR, strerror(errno));
|
|
|
|
|
remove(tempname);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-02-28 15:10:11 -05:00
|
|
|
return false;
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/* File save to temporary file was successful, now do reverse file history
|
|
|
|
|
* (move `.blend1` -> `.blend2`, `.blend` -> `.blend1` .. etc). */
|
2020-06-19 15:41:07 +10:00
|
|
|
if (use_save_versions) {
|
2023-06-11 16:37:08 +10:00
|
|
|
if (!do_history(filepath, reports)) {
|
2012-10-13 15:44:50 +00:00
|
|
|
BKE_report(reports, RPT_ERROR, "Version backup failed (file saved with @)");
|
2022-02-28 15:10:11 -05:00
|
|
|
return false;
|
2011-06-02 12:44:59 +00:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-05-16 13:13:05 +10:00
|
|
|
if (BLI_rename_overwrite(tempname, filepath) != 0) {
|
2012-10-19 16:43:10 +00:00
|
|
|
BKE_report(reports, RPT_ERROR, "Cannot change old file (file saved with @)");
|
2022-02-28 15:10:11 -05:00
|
|
|
return false;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2023-08-28 15:34:38 +02:00
|
|
|
write_file_main_validate_post(mainvar, reports);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-02-28 15:10:11 -05:00
|
|
|
return true;
|
2002-10-12 11:37:38 +00:00
|
|
|
}
|
|
|
|
|
|
2023-11-07 11:31:02 +11:00
|
|
|
/** \} */
|
|
|
|
|
|
2023-10-19 01:54:00 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name File Writing (Public)
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
bool BLO_write_file(Main *mainvar,
|
|
|
|
|
const char *filepath,
|
|
|
|
|
const int write_flags,
|
|
|
|
|
const BlendFileWriteParams *params,
|
|
|
|
|
ReportList *reports)
|
|
|
|
|
{
|
|
|
|
|
RawWriteWrap raw_wrap;
|
|
|
|
|
|
|
|
|
|
if (write_flags & G_FILE_COMPRESS) {
|
|
|
|
|
ZstdWriteWrap zstd_wrap(raw_wrap);
|
|
|
|
|
return BLO_write_file_impl(mainvar, filepath, write_flags, params, reports, zstd_wrap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return BLO_write_file_impl(mainvar, filepath, write_flags, params, reports, raw_wrap);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
bool BLO_write_file_mem(Main *mainvar, MemFile *compare, MemFile *current, const int write_flags)
|
2004-09-05 13:43:51 +00:00
|
|
|
{
|
2020-06-19 15:41:07 +10:00
|
|
|
bool use_userdef = false;
|
2004-09-05 13:43:51 +00:00
|
|
|
|
2020-06-19 15:41:07 +10:00
|
|
|
const bool err = write_file_handle(
|
2022-09-15 19:13:01 +02:00
|
|
|
mainvar, nullptr, compare, current, write_flags, use_userdef, nullptr);
|
Holiday coding log :)
Nice formatted version (pictures soon):
http://wiki.blender.org/index.php/Dev:Ref/Release_Notes/2.66/Usability
Short list of main changes:
- Transparent region option (over main region), added code to blend in/out such panels.
- Min size window now 640 x 480
- Fixed DPI for ui - lots of cleanup and changes everywhere. Icon image need correct size still, layer-in-use icon needs remake.
- Macbook retina support, use command line --no-native-pixels to disable it
- Timeline Marker label was drawing wrong
- Trackpad and magic mouse: supports zoom (hold ctrl)
- Fix for splash position: removed ghost function and made window size update after creation immediate
- Fast undo buffer save now adds UI as well. Could be checked for regular file save even...
Quit.blend and temp file saving use this now.
- Dixed filename in window on reading quit.blend or temp saves, and they now add a warning in window title: "(Recovered)"
- New Userpref option "Keep Session" - this always saves quit.blend, and loads on start.
This allows keeping UI and data without actual saves, until you actually save.
When you load startup.blend and quit, it recognises the quit.blend as a startup (no file name in header)
- Added 3D view copy/paste buffers (selected objects). Shortcuts ctrl-c, ctrl-v (OSX, cmd-c, cmd-v).
Coded partial file saving for it. Could be used for other purposes. Todo: use OS clipboards.
- User preferences (themes, keymaps, user settings) now can be saved as a separate file.
Old option is called "Save Startup File" the new one "Save User Settings".
To visualise this difference, the 'save startup file' button has been removed from user preferences window. That option is available as CTRL+U and in File menu still.
- OSX: fixed bug that stopped giving mouse events outside window.
This also fixes "Continuous Grab" for OSX. (error since 2009)
2012-12-12 18:58:11 +00:00
|
|
|
|
2016-03-03 13:35:21 +11:00
|
|
|
return (err == 0);
|
|
|
|
|
}
|
2018-04-14 13:17:11 +02:00
|
|
|
|
2023-04-14 10:35:31 +02:00
|
|
|
/*
|
|
|
|
|
* API to write chunks of data.
|
|
|
|
|
*/
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void BLO_write_raw(BlendWriter *writer, const size_t size_in_bytes, const void *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writedata(writer->wd, BLO_CODE_DATA, size_in_bytes, data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BLO_write_struct_by_name(BlendWriter *writer, const char *struct_name, const void *data_ptr)
|
|
|
|
|
{
|
2020-06-10 16:31:41 +02:00
|
|
|
BLO_write_struct_array_by_name(writer, struct_name, 1, data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BLO_write_struct_array_by_name(BlendWriter *writer,
|
|
|
|
|
const char *struct_name,
|
2024-11-22 12:35:27 +01:00
|
|
|
const int64_t array_size,
|
2020-06-05 11:44:36 +02:00
|
|
|
const void *data_ptr)
|
|
|
|
|
{
|
|
|
|
|
int struct_id = BLO_get_struct_id_by_name(writer, struct_name);
|
2020-06-10 16:31:41 +02:00
|
|
|
if (UNLIKELY(struct_id == -1)) {
|
2022-06-07 14:57:38 +10:00
|
|
|
CLOG_ERROR(&LOG, "Can't find SDNA code <%s>", struct_name);
|
2020-06-10 16:31:41 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-06-05 11:44:36 +02:00
|
|
|
BLO_write_struct_array_by_id(writer, struct_id, array_size, data_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void BLO_write_struct_by_id(BlendWriter *writer, const int struct_id, const void *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writestruct_nr(writer->wd, BLO_CODE_DATA, struct_id, 1, data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2020-06-05 21:44:14 +02:00
|
|
|
void BLO_write_struct_at_address_by_id(BlendWriter *writer,
|
2024-11-18 17:55:24 +01:00
|
|
|
const int struct_id,
|
2020-06-05 21:44:14 +02:00
|
|
|
const void *address,
|
|
|
|
|
const void *data_ptr)
|
|
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
BLO_write_struct_at_address_by_id_with_filecode(
|
|
|
|
|
writer, BLO_CODE_DATA, struct_id, address, data_ptr);
|
2020-10-30 15:59:34 +01:00
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void BLO_write_struct_at_address_by_id_with_filecode(BlendWriter *writer,
|
|
|
|
|
const int filecode,
|
|
|
|
|
const int struct_id,
|
|
|
|
|
const void *address,
|
|
|
|
|
const void *data_ptr)
|
2020-10-30 15:59:34 +01:00
|
|
|
{
|
|
|
|
|
writestruct_at_address_nr(writer->wd, filecode, struct_id, 1, address, data_ptr);
|
2020-06-05 21:44:14 +02:00
|
|
|
}
|
|
|
|
|
|
2020-06-05 11:44:36 +02:00
|
|
|
void BLO_write_struct_array_by_id(BlendWriter *writer,
|
2024-11-18 17:55:24 +01:00
|
|
|
const int struct_id,
|
2024-11-22 12:35:27 +01:00
|
|
|
const int64_t array_size,
|
2020-06-05 11:44:36 +02:00
|
|
|
const void *data_ptr)
|
|
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writestruct_nr(writer->wd, BLO_CODE_DATA, struct_id, array_size, data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void BLO_write_struct_array_at_address_by_id(BlendWriter *writer,
|
|
|
|
|
const int struct_id,
|
2024-11-22 12:35:27 +01:00
|
|
|
const int64_t array_size,
|
2024-11-18 17:55:24 +01:00
|
|
|
const void *address,
|
|
|
|
|
const void *data_ptr)
|
2020-06-05 21:44:14 +02:00
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writestruct_at_address_nr(writer->wd, BLO_CODE_DATA, struct_id, array_size, address, data_ptr);
|
2020-06-05 21:44:14 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void BLO_write_struct_list_by_id(BlendWriter *writer, const int struct_id, const ListBase *list)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2023-04-08 12:42:40 +02:00
|
|
|
writelist_nr(writer->wd, BLO_CODE_DATA, struct_id, list);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BLO_write_struct_list_by_name(BlendWriter *writer, const char *struct_name, ListBase *list)
|
|
|
|
|
{
|
2020-06-10 16:31:41 +02:00
|
|
|
int struct_id = BLO_get_struct_id_by_name(writer, struct_name);
|
|
|
|
|
if (UNLIKELY(struct_id == -1)) {
|
2022-06-07 14:57:38 +10:00
|
|
|
CLOG_ERROR(&LOG, "Can't find SDNA code <%s>", struct_name);
|
2020-06-10 16:31:41 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
BLO_write_struct_list_by_id(writer, struct_id, list);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-18 17:55:24 +01:00
|
|
|
void blo_write_id_struct(BlendWriter *writer,
|
|
|
|
|
const int struct_id,
|
|
|
|
|
const void *id_address,
|
|
|
|
|
const ID *id)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
|
|
|
|
writestruct_at_address_nr(writer->wd, GS(id->name), struct_id, 1, id_address, id);
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-26 11:34:42 +10:00
|
|
|
int BLO_get_struct_id_by_name(const BlendWriter *writer, const char *struct_name)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2023-09-25 12:39:40 +10:00
|
|
|
int struct_id = DNA_struct_find_with_alias(writer->wd->sdna, struct_name);
|
2020-06-05 11:44:36 +02:00
|
|
|
return struct_id;
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_char_array(BlendWriter *writer, const int64_t num, const char *data_ptr)
|
2024-04-24 17:01:22 +02:00
|
|
|
{
|
|
|
|
|
BLO_write_raw(writer, sizeof(char) * size_t(num), data_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_int8_array(BlendWriter *writer, const int64_t num, const int8_t *data_ptr)
|
2023-01-13 12:31:27 -06:00
|
|
|
{
|
|
|
|
|
BLO_write_raw(writer, sizeof(int8_t) * size_t(num), data_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-24 17:05:26 +01:00
|
|
|
void BLO_write_int16_array(BlendWriter *writer, const int64_t num, const int16_t *data_ptr)
|
|
|
|
|
{
|
|
|
|
|
BLO_write_raw(writer, sizeof(int16_t) * size_t(num), data_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_uint8_array(BlendWriter *writer, const int64_t num, const uint8_t *data_ptr)
|
2024-04-24 17:01:22 +02:00
|
|
|
{
|
|
|
|
|
BLO_write_raw(writer, sizeof(uint8_t) * size_t(num), data_ptr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_int32_array(BlendWriter *writer, const int64_t num, const int32_t *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(int32_t) * size_t(num), data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_uint32_array(BlendWriter *writer, const int64_t num, const uint32_t *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(uint32_t) * size_t(num), data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_float_array(BlendWriter *writer, const int64_t num, const float *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(float) * size_t(num), data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_double_array(BlendWriter *writer, const int64_t num, const double *data_ptr)
|
2020-12-15 12:43:21 -06:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(double) * size_t(num), data_ptr);
|
2020-12-15 12:43:21 -06:00
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_pointer_array(BlendWriter *writer, const int64_t num, const void *data_ptr)
|
2020-06-05 13:29:46 +02:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(void *) * size_t(num), data_ptr);
|
2020-06-05 13:29:46 +02:00
|
|
|
}
|
|
|
|
|
|
2024-11-22 12:35:27 +01:00
|
|
|
void BLO_write_float3_array(BlendWriter *writer, const int64_t num, const float *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2022-09-25 20:27:46 +10:00
|
|
|
BLO_write_raw(writer, sizeof(float[3]) * size_t(num), data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-20 18:41:50 +02:00
|
|
|
void BLO_write_string(BlendWriter *writer, const char *data_ptr)
|
2020-06-05 11:44:36 +02:00
|
|
|
{
|
2022-09-15 19:13:01 +02:00
|
|
|
if (data_ptr != nullptr) {
|
2020-09-20 18:41:50 +02:00
|
|
|
BLO_write_raw(writer, strlen(data_ptr) + 1, data_ptr);
|
2020-06-05 11:44:36 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-29 17:14:58 +01:00
|
|
|
void BLO_write_shared(BlendWriter *writer,
|
|
|
|
|
const void *data,
|
|
|
|
|
const size_t approximate_size_in_bytes,
|
|
|
|
|
const blender::ImplicitSharingInfo *sharing_info,
|
|
|
|
|
const blender::FunctionRef<void()> write_fn)
|
|
|
|
|
{
|
|
|
|
|
if (data == nullptr) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (BLO_write_is_undo(writer)) {
|
|
|
|
|
MemFile &memfile = *writer->wd->mem.written_memfile;
|
|
|
|
|
if (sharing_info != nullptr) {
|
|
|
|
|
if (memfile.shared_storage == nullptr) {
|
|
|
|
|
memfile.shared_storage = MEM_new<MemFileSharedStorage>(__func__);
|
|
|
|
|
}
|
|
|
|
|
if (memfile.shared_storage->map.add(data, sharing_info)) {
|
|
|
|
|
/* The undo-step takes (shared) ownership of the data, which also makes it immutable. */
|
|
|
|
|
sharing_info->add_user();
|
|
|
|
|
/* This size is an estimate, but good enough to count data with many users less. */
|
|
|
|
|
memfile.size += approximate_size_in_bytes / sharing_info->strong_users();
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-07-27 23:07:21 +02:00
|
|
|
if (sharing_info != nullptr) {
|
|
|
|
|
if (!writer->wd->per_id_written_shared_addresses.add(data)) {
|
|
|
|
|
/* Was written already. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-02-29 17:14:58 +01:00
|
|
|
write_fn();
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-05 11:44:36 +02:00
|
|
|
bool BLO_write_is_undo(BlendWriter *writer)
|
|
|
|
|
{
|
|
|
|
|
return writer->wd->use_memfile;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-14 13:17:11 +02:00
|
|
|
/** \} */
|