2023-06-14 16:52:36 +10:00
|
|
|
/* SPDX-FileCopyrightText: 2011-2022 Blender Foundation
|
|
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: Apache-2.0 */
|
2015-02-14 18:03:39 +05:00
|
|
|
|
2024-12-26 17:53:56 +01:00
|
|
|
#pragma once
|
2015-02-14 18:03:39 +05:00
|
|
|
|
2016-02-07 03:40:41 +05:00
|
|
|
#include <cstddef>
|
2020-06-02 06:26:28 +02:00
|
|
|
#include <cstdlib>
|
2015-02-14 18:03:39 +05:00
|
|
|
|
2015-02-14 23:26:07 +05:00
|
|
|
#ifdef WITH_BLENDER_GUARDEDALLOC
|
|
|
|
|
# include "../../guardedalloc/MEM_guardedalloc.h"
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-02-14 18:03:39 +05:00
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
|
|
|
|
/* Internal use only. */
|
2025-01-01 18:15:54 +01:00
|
|
|
void util_guarded_mem_alloc(const size_t n);
|
|
|
|
|
void util_guarded_mem_free(const size_t n);
|
2015-02-14 18:03:39 +05:00
|
|
|
|
|
|
|
|
/* Guarded allocator for the use with STL. */
|
2016-02-07 03:40:41 +05:00
|
|
|
template<typename T> class GuardedAllocator {
|
2015-02-14 18:03:39 +05:00
|
|
|
public:
|
2024-12-26 17:53:59 +01:00
|
|
|
using size_type = size_t;
|
|
|
|
|
using difference_type = ptrdiff_t;
|
|
|
|
|
using pointer = T *;
|
|
|
|
|
using const_pointer = const T *;
|
|
|
|
|
using reference = T &;
|
|
|
|
|
using const_reference = const T &;
|
|
|
|
|
using value_type = T;
|
|
|
|
|
|
|
|
|
|
GuardedAllocator() = default;
|
|
|
|
|
GuardedAllocator(const GuardedAllocator & /*unused*/) = default;
|
|
|
|
|
|
2025-01-01 18:15:54 +01:00
|
|
|
T *allocate(const size_t n, const void *hint = nullptr)
|
2015-02-14 18:03:39 +05:00
|
|
|
{
|
2018-11-09 12:08:51 +01:00
|
|
|
(void)hint;
|
2016-02-13 12:35:33 +01:00
|
|
|
size_t size = n * sizeof(T);
|
|
|
|
|
util_guarded_mem_alloc(size);
|
2016-02-07 03:40:41 +05:00
|
|
|
if (n == 0) {
|
2024-12-26 17:53:55 +01:00
|
|
|
return nullptr;
|
2016-02-07 03:40:41 +05:00
|
|
|
}
|
2016-04-20 15:49:52 +02:00
|
|
|
T *mem;
|
|
|
|
|
#ifdef WITH_BLENDER_GUARDEDALLOC
|
2016-02-13 12:35:33 +01:00
|
|
|
/* C++ standard requires allocation functions to allocate memory suitably
|
|
|
|
|
* aligned for any standard type. This is 16 bytes for 64 bit platform as
|
|
|
|
|
* far as i concerned. We might over-align on 32bit here, but that should
|
|
|
|
|
* be all safe actually.
|
|
|
|
|
*/
|
2016-04-20 15:49:52 +02:00
|
|
|
mem = (T *)MEM_mallocN_aligned(size, 16, "Cycles Alloc");
|
2015-02-14 23:26:07 +05:00
|
|
|
#else
|
2016-04-20 15:49:52 +02:00
|
|
|
mem = (T *)malloc(size);
|
2015-02-14 23:26:07 +05:00
|
|
|
#endif
|
2024-12-26 17:53:55 +01:00
|
|
|
if (mem == nullptr) {
|
2016-04-20 15:49:52 +02:00
|
|
|
throw std::bad_alloc();
|
|
|
|
|
}
|
|
|
|
|
return mem;
|
2015-02-14 18:03:39 +05:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2025-01-01 18:15:54 +01:00
|
|
|
void deallocate(T *p, const size_t n)
|
2015-02-14 18:03:39 +05:00
|
|
|
{
|
|
|
|
|
util_guarded_mem_free(n * sizeof(T));
|
2024-12-26 17:53:55 +01:00
|
|
|
if (p != nullptr) {
|
2015-02-14 23:26:07 +05:00
|
|
|
#ifdef WITH_BLENDER_GUARDEDALLOC
|
2025-02-20 10:37:10 +01:00
|
|
|
MEM_freeN(const_cast<void *>(static_cast<const void *>(p)));
|
2015-02-14 23:26:07 +05:00
|
|
|
#else
|
2016-02-07 03:40:41 +05:00
|
|
|
free(p);
|
2015-02-14 23:26:07 +05:00
|
|
|
#endif
|
2016-02-07 03:40:41 +05:00
|
|
|
}
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-02-07 03:40:41 +05:00
|
|
|
T *address(T &x) const
|
|
|
|
|
{
|
|
|
|
|
return &x;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-02-07 03:40:41 +05:00
|
|
|
const T *address(const T &x) const
|
|
|
|
|
{
|
|
|
|
|
return &x;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-26 17:53:59 +01:00
|
|
|
GuardedAllocator<T> &operator=(const GuardedAllocator & /*unused*/) = default;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-02-07 03:40:41 +05:00
|
|
|
size_t max_size() const
|
|
|
|
|
{
|
|
|
|
|
return size_t(-1);
|
2015-02-14 18:03:39 +05:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2015-02-14 18:03:39 +05:00
|
|
|
template<class U> struct rebind {
|
2024-12-26 17:53:59 +01:00
|
|
|
using other = GuardedAllocator<U>;
|
2016-02-07 03:40:41 +05:00
|
|
|
};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-26 17:53:59 +01:00
|
|
|
template<class U> GuardedAllocator(const GuardedAllocator<U> & /*unused*/) {}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-26 17:53:59 +01:00
|
|
|
template<class U> GuardedAllocator &operator=(const GuardedAllocator<U> & /*unused*/)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
2016-02-07 03:40:41 +05:00
|
|
|
return *this;
|
2016-02-15 18:31:13 +05:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2024-12-29 17:32:00 +01:00
|
|
|
bool operator==(const GuardedAllocator & /*other*/) const
|
2016-02-15 18:31:13 +05:00
|
|
|
{
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2024-12-29 17:32:00 +01:00
|
|
|
bool operator!=(const GuardedAllocator &other) const
|
2016-02-15 18:31:13 +05:00
|
|
|
{
|
|
|
|
|
return !operator==(other);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2016-02-15 11:46:13 +01:00
|
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
|
/* Welcome to the black magic here.
|
|
|
|
|
*
|
|
|
|
|
* The issue is that MSVC C++ allocates container proxy on any
|
|
|
|
|
* vector initialization, including static vectors which don't
|
|
|
|
|
* have any data yet. This leads to several issues:
|
|
|
|
|
*
|
|
|
|
|
* - Static objects initialization fiasco (global_stats from
|
|
|
|
|
* util_stats.h might not be initialized yet).
|
|
|
|
|
* - If main() function changes allocator type (for example,
|
|
|
|
|
* this might happen with `blender --debug-memory`) nobody
|
|
|
|
|
* will know how to convert already allocated memory to a new
|
|
|
|
|
* guarded allocator.
|
|
|
|
|
*
|
|
|
|
|
* Here we work this around by making it so container proxy does
|
|
|
|
|
* not use guarded allocation. A bit fragile, unfortunately.
|
|
|
|
|
*/
|
|
|
|
|
template<> struct rebind<std::_Container_proxy> {
|
|
|
|
|
typedef std::allocator<std::_Container_proxy> other;
|
|
|
|
|
};
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2016-02-15 11:46:13 +01:00
|
|
|
operator std::allocator<std::_Container_proxy>() const
|
|
|
|
|
{
|
|
|
|
|
return std::allocator<std::_Container_proxy>();
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2015-02-14 18:03:39 +05:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Get memory usage and peak from the guarded STL allocator. */
|
2018-11-09 12:01:38 +01:00
|
|
|
size_t util_guarded_get_mem_used();
|
|
|
|
|
size_t util_guarded_get_mem_peak();
|
2015-02-14 18:03:39 +05:00
|
|
|
|
Cycles: Stop rendering when bad_alloc happens
This is an attempt to gracefully handle out-of-memory events
and stop rendering with an error message instead of a crash.
It uses bad_alloc exception, and usually i'm not really fond
of exceptions, but for such limited use for errors from which
we can't recover it should be fine.
Ideally we'll need to stop full Cycles Session, so viewport
render and persistent images frees all the memory, but that
we can support later, since it'll mainly related on telling
Blender what to do.
General rules are:
- Use as less exception handles as possible, try to find a
most geenric pace where to handle those.
For example, ccl::Session.
- Threads needs own handling, exception trap from one thread
will not catch exceptions from other threads.
That's why BVH build needs own thing.
Reviewers: brecht, juicyfruit, dingto, lukasstockner97
Differential Revision: https://developer.blender.org/D1898
2016-04-20 16:15:11 +02:00
|
|
|
/* Call given function and keep track if it runs out of memory.
|
|
|
|
|
*
|
|
|
|
|
* If it does run out f memory, stop execution and set progress
|
|
|
|
|
* to do a global cancel.
|
|
|
|
|
*
|
|
|
|
|
* It's not fully robust, but good enough to catch obvious issues
|
|
|
|
|
* when running out of memory.
|
|
|
|
|
*/
|
|
|
|
|
#define MEM_GUARDED_CALL(progress, func, ...) \
|
|
|
|
|
do { \
|
|
|
|
|
try { \
|
|
|
|
|
(func)(__VA_ARGS__); \
|
|
|
|
|
} \
|
|
|
|
|
catch (std::bad_alloc &) { \
|
|
|
|
|
fprintf(stderr, "Error: run out of memory!\n"); \
|
|
|
|
|
fflush(stderr); \
|
|
|
|
|
(progress)->set_error("Out of memory"); \
|
|
|
|
|
} \
|
|
|
|
|
} while (false)
|
|
|
|
|
|
2015-02-14 18:03:39 +05:00
|
|
|
CCL_NAMESPACE_END
|