BLI: Add bit span operations and bounded bit spans
Most of this patch is by Jacques Lucke, from the simulation branch. This commit adds generic expression evaluation for bit spans, helping to generalize the optimizations that avoid processing a single bit at a time. Operations like "for each 1 index", "or", and "and" are already implemented in this pull request. Bits in full integers are processed 64 at a time, then remaining bits are processed all at once. The operations allow implementing a `copy_from` method for bit spans. Currently this optimized evaluation is only implemented for simpler bounded bit spans. Bounded bit spans have constraints on their bit ranges that make them more efficient to process. Large spans must start at the beginning of the first int, and small spans must start and end within the first int. Knowing these constraints at compile time reduces the number of edge cases in the operations, but mainly allows skipping alignment between multiple spans with different offsets. Pull Request: https://projects.blender.org/blender/blender/pulls/107408
This commit is contained in:
@@ -214,6 +214,22 @@ class MutableBitRef {
|
||||
/* Optionally set it again. The -1 turns a 1 into `0x00...` and a 0 into `0xff...`. */
|
||||
| (mask_ & ~(value_int - 1));
|
||||
}
|
||||
|
||||
MutableBitRef &operator|=(const bool value)
|
||||
{
|
||||
if (value) {
|
||||
this->set();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
MutableBitRef &operator&=(const bool value)
|
||||
{
|
||||
if (!value) {
|
||||
this->reset();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &stream, const BitRef &bit)
|
||||
|
||||
@@ -2,8 +2,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "BLI_bit_ref.hh"
|
||||
#include "BLI_index_range.hh"
|
||||
#include "BLI_math_bits.h"
|
||||
#include "BLI_memory_utils.hh"
|
||||
|
||||
namespace blender::bits {
|
||||
@@ -62,7 +65,7 @@ class MutableBitIterator : public BitIteratorBase {
|
||||
* and end at any bit.
|
||||
*/
|
||||
class BitSpan {
|
||||
private:
|
||||
protected:
|
||||
/** Base pointer to the integers containing the bits. The actual bit span might start at a much
|
||||
* higher address when `bit_range_.start()` is large. */
|
||||
const BitInt *data_ = nullptr;
|
||||
@@ -128,9 +131,75 @@ class BitSpan {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the span fullfills the requirements for a bounded span. Bounded spans can often be
|
||||
* processed more efficiently, because fewer cases have to be considered when aligning multiple
|
||||
* such spans.
|
||||
*
|
||||
* See comments in the function for the exact requirements.
|
||||
*/
|
||||
inline bool is_bounded_span(const BitSpan span)
|
||||
{
|
||||
const int64_t offset = span.bit_range().start();
|
||||
const int64_t size = span.size();
|
||||
if (offset >= BitsPerInt) {
|
||||
/* The data pointer must point at the first int already. If the offset is a multiple of
|
||||
* #BitsPerInt, the bit span could theoretically become bounded as well if the data pointer is
|
||||
* adjusted. But that is not handled here. */
|
||||
return false;
|
||||
}
|
||||
if (size < BitsPerInt) {
|
||||
/** Don't allow small sized spans to cross `BitInt` boundaries. */
|
||||
return offset + size <= 64;
|
||||
}
|
||||
if (offset != 0) {
|
||||
/* Start of larger spans must be aligned to `BitInt` boundaries. */
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as #BitSpan but fullfills the requirements mentioned on #is_bounded_span.
|
||||
*/
|
||||
class BoundedBitSpan : public BitSpan {
|
||||
public:
|
||||
BoundedBitSpan() = default;
|
||||
|
||||
BoundedBitSpan(const BitInt *data, const int64_t size_in_bits) : BitSpan(data, size_in_bits)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
BoundedBitSpan(const BitInt *data, const IndexRange bit_range) : BitSpan(data, bit_range)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
explicit BoundedBitSpan(const BitSpan other) : BitSpan(other)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
int64_t offset() const
|
||||
{
|
||||
return bit_range_.start();
|
||||
}
|
||||
|
||||
int64_t full_ints_num() const
|
||||
{
|
||||
return bit_range_.size() >> BitToIntIndexShift;
|
||||
}
|
||||
|
||||
int64_t final_bits_num() const
|
||||
{
|
||||
return bit_range_.size() & BitIndexMask;
|
||||
}
|
||||
};
|
||||
|
||||
/** Same as #BitSpan, but also allows modifying the referenced bits. */
|
||||
class MutableBitSpan {
|
||||
private:
|
||||
protected:
|
||||
BitInt *data_ = nullptr;
|
||||
IndexRange bit_range_ = {0, 0};
|
||||
|
||||
@@ -199,6 +268,9 @@ class MutableBitSpan {
|
||||
/** Sets all referenced bits to 0. */
|
||||
void reset_all();
|
||||
|
||||
void copy_from(const BitSpan other);
|
||||
void copy_from(const BoundedBitSpan other);
|
||||
|
||||
/** Sets all referenced bits to either 0 or 1. */
|
||||
void set_all(const bool value)
|
||||
{
|
||||
@@ -217,6 +289,82 @@ class MutableBitSpan {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Same as #MutableBitSpan but fullfills the requirements mentioned on #is_bounded_span.
|
||||
*/
|
||||
class MutableBoundedBitSpan : public MutableBitSpan {
|
||||
public:
|
||||
MutableBoundedBitSpan() = default;
|
||||
|
||||
MutableBoundedBitSpan(BitInt *data, const int64_t size) : MutableBitSpan(data, size)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
MutableBoundedBitSpan(BitInt *data, const IndexRange bit_range) : MutableBitSpan(data, bit_range)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
explicit MutableBoundedBitSpan(const MutableBitSpan other) : MutableBitSpan(other)
|
||||
{
|
||||
BLI_assert(is_bounded_span(*this));
|
||||
}
|
||||
|
||||
operator BoundedBitSpan() const
|
||||
{
|
||||
return BoundedBitSpan{BitSpan(*this)};
|
||||
}
|
||||
|
||||
int64_t offset() const
|
||||
{
|
||||
return bit_range_.start();
|
||||
}
|
||||
|
||||
int64_t full_ints_num() const
|
||||
{
|
||||
return bit_range_.size() >> BitToIntIndexShift;
|
||||
}
|
||||
|
||||
int64_t final_bits_num() const
|
||||
{
|
||||
return bit_range_.size() & BitIndexMask;
|
||||
}
|
||||
|
||||
void copy_from(const BitSpan other);
|
||||
void copy_from(const BoundedBitSpan other);
|
||||
};
|
||||
|
||||
inline std::optional<BoundedBitSpan> try_get_bounded_span(const BitSpan span)
|
||||
{
|
||||
if (is_bounded_span(span)) {
|
||||
return BoundedBitSpan(span);
|
||||
}
|
||||
if (span.bit_range().start() % BitsPerInt == 0) {
|
||||
return BoundedBitSpan(span.data() + (span.bit_range().start() >> BitToIntIndexShift),
|
||||
span.size());
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Overloaded in BLI_bit_vector.hh. The purpose is to make passing #BitVector into bit span
|
||||
* operations more efficient (interpreting it as `BoundedBitSpan` instead of just `BitSpan`).
|
||||
*/
|
||||
template<typename T> inline T to_best_bit_span(const T &data)
|
||||
{
|
||||
static_assert(is_same_any_v<std::decay_t<T>,
|
||||
BitSpan,
|
||||
MutableBitSpan,
|
||||
BoundedBitSpan,
|
||||
MutableBoundedBitSpan>);
|
||||
return data;
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
constexpr bool all_bounded_spans =
|
||||
(is_same_any_v<std::decay_t<Args>, BoundedBitSpan, MutableBoundedBitSpan> && ...);
|
||||
|
||||
std::ostream &operator<<(std::ostream &stream, const BitSpan &span);
|
||||
std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span);
|
||||
|
||||
@@ -224,5 +372,7 @@ std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span);
|
||||
|
||||
namespace blender {
|
||||
using bits::BitSpan;
|
||||
using bits::BoundedBitSpan;
|
||||
using bits::MutableBitSpan;
|
||||
using bits::MutableBoundedBitSpan;
|
||||
} // namespace blender
|
||||
|
||||
244
source/blender/blenlib/BLI_bit_span_ops.hh
Normal file
244
source/blender/blenlib/BLI_bit_span_ops.hh
Normal file
@@ -0,0 +1,244 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BLI_bit_span.hh"
|
||||
|
||||
namespace blender::bits {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/**
|
||||
* Evaluates the expression on one or more bit spans and stores the result in the first.
|
||||
*
|
||||
* The expected type for the expression is:
|
||||
* (BitInt ...one_or_more_args) -> BitInt
|
||||
*/
|
||||
template<typename ExprFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void mix_into_first_expr(ExprFn &&expr,
|
||||
const FirstBitSpanT &first_arg,
|
||||
const BitSpanT &...args)
|
||||
{
|
||||
const int64_t size = first_arg.size();
|
||||
BLI_assert(((size == args.size()) && ...));
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (all_bounded_spans<FirstBitSpanT, BitSpanT...>) {
|
||||
BitInt *first_data = first_arg.data();
|
||||
const int64_t first_offset = first_arg.offset();
|
||||
const int64_t full_ints_num = first_arg.full_ints_num();
|
||||
/* Compute expression without any masking, all the spans are expected to be aligned to the
|
||||
* beginning of a #BitInt. */
|
||||
for (const int64_t i : IndexRange(full_ints_num)) {
|
||||
first_data[i] = expr(first_data[i], args.data()[i]...);
|
||||
}
|
||||
/* Compute expression for the remaining bits. */
|
||||
if (const int64_t final_bits = first_arg.final_bits_num()) {
|
||||
const BitInt result = expr(first_data[full_ints_num] >> first_offset,
|
||||
(args.data()[full_ints_num] >> args.offset())...);
|
||||
const BitInt mask = mask_range_bits(first_offset, final_bits);
|
||||
first_data[full_ints_num] = ((result << first_offset) & mask) |
|
||||
(first_data[full_ints_num] & ~mask);
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Fallback or arbitrary bit spans. This could be implemented more efficiently but adds more
|
||||
* complexity and is not necessary yet. */
|
||||
for (const int64_t i : IndexRange(size)) {
|
||||
const bool result = expr(BitInt(first_arg[i].test()), BitInt(args[i].test())...) != 0;
|
||||
first_arg[i].set(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluates the expression on one or more bit spans and returns true when the result contains a 1
|
||||
* anywhere.
|
||||
*
|
||||
* The expected type for the expression is:
|
||||
* (BitInt ...one_or_more_args) -> BitInt
|
||||
*/
|
||||
template<typename ExprFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline bool any_set_expr(ExprFn &&expr, const FirstBitSpanT &first_arg, const BitSpanT &...args)
|
||||
{
|
||||
const int64_t size = first_arg.size();
|
||||
BLI_assert(((size == args.size()) && ...));
|
||||
if (size == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if constexpr (all_bounded_spans<FirstBitSpanT, BitSpanT...>) {
|
||||
const BitInt *first_data = first_arg.data();
|
||||
const int64_t full_ints_num = first_arg.full_ints_num();
|
||||
/* Compute expression without any masking, all the spans are expected to be aligned to the
|
||||
* beginning of a #BitInt. */
|
||||
for (const int64_t i : IndexRange(full_ints_num)) {
|
||||
if (expr(first_data[i], args.data()[i]...) != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
/* Compute expression for the remaining bits. */
|
||||
if (const int64_t final_bits = first_arg.final_bits_num()) {
|
||||
const BitInt result = expr(first_data[full_ints_num] >> first_arg.offset(),
|
||||
(args.data()[full_ints_num] >> args.offset())...);
|
||||
const BitInt mask = mask_first_n_bits(final_bits);
|
||||
if ((result & mask) != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
/* Fallback or arbitrary bit spans. This could be implemented more efficiently but adds more
|
||||
* complexity and is not necessary yet. */
|
||||
for (const int64_t i : IndexRange(size)) {
|
||||
const BitInt result = expr(BitInt(first_arg[i].test()), BitInt(args[i].test())...);
|
||||
if (result != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluates the expression on one or more bit spans and calls the `handle` function for each bit
|
||||
* index where the result is 1.
|
||||
*
|
||||
* The expected type for the expression is:
|
||||
* (BitInt ...one_or_more_args) -> BitInt
|
||||
*/
|
||||
template<typename ExprFn, typename HandleFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void foreach_1_index_expr(ExprFn &&expr,
|
||||
HandleFn &&handle,
|
||||
const FirstBitSpanT &first_arg,
|
||||
const BitSpanT &...args)
|
||||
{
|
||||
const int64_t size = first_arg.size();
|
||||
BLI_assert(((size == args.size()) && ...));
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (all_bounded_spans<FirstBitSpanT, BitSpanT...>) {
|
||||
const BitInt *first_data = first_arg.data();
|
||||
const int64_t full_ints_num = first_arg.full_ints_num();
|
||||
/* Iterate over full ints without any bit masks. */
|
||||
for (const int64_t int_i : IndexRange(full_ints_num)) {
|
||||
BitInt tmp = expr(first_data[int_i], args.data()[int_i]...);
|
||||
const int64_t offset = int_i << BitToIntIndexShift;
|
||||
while (tmp != 0) {
|
||||
static_assert(std::is_same_v<BitInt, uint64_t>);
|
||||
const int index = bitscan_forward_uint64(tmp);
|
||||
handle(index + offset);
|
||||
tmp &= ~mask_single_bit(index);
|
||||
}
|
||||
}
|
||||
/* Iterate over remaining bits. */
|
||||
if (const int64_t final_bits = first_arg.final_bits_num()) {
|
||||
BitInt tmp = expr(first_data[full_ints_num] >> first_arg.offset(),
|
||||
(*args.data()[full_ints_num] >> args.offset())...) &
|
||||
mask_first_n_bits(final_bits);
|
||||
const int64_t offset = full_ints_num << BitToIntIndexShift;
|
||||
while (tmp != 0) {
|
||||
static_assert(std::is_same_v<BitInt, uint64_t>);
|
||||
const int index = bitscan_forward_uint64(tmp);
|
||||
handle(index + offset);
|
||||
tmp &= ~mask_single_bit(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Fallback or arbitrary bit spans. This could be implemented more efficiently but adds more
|
||||
* complexity and is not necessary yet. */
|
||||
for (const int64_t i : IndexRange(size)) {
|
||||
const BitInt result = expr(BitInt(first_arg[i].test()), BitInt(args[i].test())...);
|
||||
if (result) {
|
||||
handle(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template<typename ExprFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void mix_into_first_expr(ExprFn &&expr,
|
||||
const FirstBitSpanT &first_arg,
|
||||
const BitSpanT &...args)
|
||||
{
|
||||
detail::mix_into_first_expr(expr, to_best_bit_span(first_arg), to_best_bit_span(args)...);
|
||||
}
|
||||
|
||||
template<typename ExprFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline bool any_set_expr(ExprFn &&expr, const FirstBitSpanT &first_arg, const BitSpanT &...args)
|
||||
{
|
||||
return detail::any_set_expr(expr, to_best_bit_span(first_arg), to_best_bit_span(args)...);
|
||||
}
|
||||
|
||||
template<typename ExprFn, typename HandleFn, typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void foreach_1_index_expr(ExprFn &&expr,
|
||||
HandleFn &&handle,
|
||||
const FirstBitSpanT &first_arg,
|
||||
const BitSpanT &...args)
|
||||
{
|
||||
detail::foreach_1_index_expr(
|
||||
expr, handle, to_best_bit_span(first_arg), to_best_bit_span(args)...);
|
||||
}
|
||||
|
||||
template<typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void inplace_or(FirstBitSpanT &first_arg, const BitSpanT &...args)
|
||||
{
|
||||
mix_into_first_expr([](const auto... x) { return (x | ...); }, first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void copy_from_or(FirstBitSpanT &first_arg, const BitSpanT &...args)
|
||||
{
|
||||
mix_into_first_expr(
|
||||
[](auto /*first*/, auto... rest) { return (rest | ...); }, first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename FirstBitSpanT, typename... BitSpanT>
|
||||
inline void inplace_and(FirstBitSpanT &first_arg, const BitSpanT &...args)
|
||||
{
|
||||
mix_into_first_expr([](const auto... x) { return (x & ...); }, first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename... BitSpanT>
|
||||
inline void operator|=(MutableBitSpan first_arg, const BitSpanT &...args)
|
||||
{
|
||||
inplace_or(first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename... BitSpanT>
|
||||
inline void operator|=(MutableBoundedBitSpan first_arg, const BitSpanT &...args)
|
||||
{
|
||||
inplace_or(first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename... BitSpanT>
|
||||
inline void operator&=(MutableBitSpan first_arg, const BitSpanT &...args)
|
||||
{
|
||||
inplace_and(first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename... BitSpanT>
|
||||
inline void operator&=(MutableBoundedBitSpan first_arg, const BitSpanT &...args)
|
||||
{
|
||||
inplace_and(first_arg, args...);
|
||||
}
|
||||
|
||||
template<typename... BitSpanT> inline bool has_common_set_bits(const BitSpanT &...args)
|
||||
{
|
||||
return any_set_expr([](const auto... x) { return (x & ...); }, args...);
|
||||
}
|
||||
|
||||
template<typename BitSpanT, typename Fn> inline void foreach_1_index(const BitSpanT &data, Fn &&fn)
|
||||
{
|
||||
foreach_1_index_expr([](const BitInt x) { return x; }, fn, data);
|
||||
}
|
||||
|
||||
} // namespace blender::bits
|
||||
@@ -170,12 +170,12 @@ class BitVector {
|
||||
return move_assign_container(*this, std::move(other));
|
||||
}
|
||||
|
||||
operator BitSpan() const
|
||||
operator BoundedBitSpan() const
|
||||
{
|
||||
return {data_, IndexRange(size_in_bits_)};
|
||||
}
|
||||
|
||||
operator MutableBitSpan()
|
||||
operator MutableBoundedBitSpan()
|
||||
{
|
||||
return {data_, IndexRange(size_in_bits_)};
|
||||
}
|
||||
@@ -193,6 +193,16 @@ class BitVector {
|
||||
return size_in_bits_ == 0;
|
||||
}
|
||||
|
||||
BitInt *data()
|
||||
{
|
||||
return data_;
|
||||
}
|
||||
|
||||
const BitInt *data() const
|
||||
{
|
||||
return data_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a read-only reference to a specific bit.
|
||||
*/
|
||||
@@ -357,6 +367,18 @@ class BitVector {
|
||||
}
|
||||
};
|
||||
|
||||
template<int64_t InlineBufferCapacity, typename Allocator>
|
||||
inline BoundedBitSpan to_best_bit_span(const BitVector<InlineBufferCapacity, Allocator> &data)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
template<int64_t InlineBufferCapacity, typename Allocator>
|
||||
inline MutableBoundedBitSpan to_best_bit_span(BitVector<InlineBufferCapacity, Allocator> &data)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
} // namespace blender::bits
|
||||
|
||||
namespace blender {
|
||||
|
||||
@@ -184,6 +184,7 @@ set(SRC
|
||||
BLI_atomic_disjoint_set.hh
|
||||
BLI_bit_ref.hh
|
||||
BLI_bit_span.hh
|
||||
BLI_bit_span_ops.hh
|
||||
BLI_bit_vector.hh
|
||||
BLI_bitmap.h
|
||||
BLI_bitmap_draw_2d.h
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "BLI_bit_span.hh"
|
||||
#include "BLI_bit_span_ops.hh"
|
||||
|
||||
namespace blender::bits {
|
||||
|
||||
@@ -54,6 +55,30 @@ void MutableBitSpan::reset_all()
|
||||
}
|
||||
}
|
||||
|
||||
void MutableBitSpan::copy_from(const BitSpan other)
|
||||
{
|
||||
BLI_assert(this->size() == other.size());
|
||||
copy_from_or(*this, other);
|
||||
}
|
||||
|
||||
void MutableBitSpan::copy_from(const BoundedBitSpan other)
|
||||
{
|
||||
BLI_assert(this->size() == other.size());
|
||||
copy_from_or(*this, other);
|
||||
}
|
||||
|
||||
void MutableBoundedBitSpan::copy_from(const BitSpan other)
|
||||
{
|
||||
BLI_assert(this->size() == other.size());
|
||||
copy_from_or(*this, other);
|
||||
}
|
||||
|
||||
void MutableBoundedBitSpan::copy_from(const BoundedBitSpan other)
|
||||
{
|
||||
BLI_assert(this->size() == other.size());
|
||||
copy_from_or(*this, other);
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &stream, const BitSpan &span)
|
||||
{
|
||||
stream << "(Size: " << span.size() << ", ";
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
#include <array>
|
||||
|
||||
#include "BLI_bit_span.hh"
|
||||
#include "BLI_bit_span_ops.hh"
|
||||
#include "BLI_timeit.hh"
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
#include "testing/testing.h"
|
||||
|
||||
@@ -142,4 +145,105 @@ TEST(bit_span, SetSliced)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(bit_span, IsBounded)
|
||||
{
|
||||
std::array<uint64_t, 10> data;
|
||||
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 0)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 1)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 50)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 63)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 64)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 65)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 100)));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), 400)));
|
||||
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), IndexRange(0, 3))));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), IndexRange(1, 3))));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), IndexRange(10, 20))));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), IndexRange(63, 1))));
|
||||
EXPECT_TRUE(is_bounded_span(BitSpan(data.data(), IndexRange(10, 54))));
|
||||
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(1, 64))));
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(10, 64))));
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(10, 200))));
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(60, 5))));
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(64, 0))));
|
||||
EXPECT_FALSE(is_bounded_span(BitSpan(data.data(), IndexRange(70, 5))));
|
||||
}
|
||||
|
||||
TEST(bit_span, CopyFrom)
|
||||
{
|
||||
std::array<uint64_t, 30> src_data;
|
||||
uint64_t i = 0;
|
||||
for (uint64_t &value : src_data) {
|
||||
value = i;
|
||||
i += 234589766883;
|
||||
}
|
||||
const BitSpan src(src_data.data(), src_data.size() * BitsPerInt);
|
||||
|
||||
std::array<uint64_t, 4> dst_data;
|
||||
dst_data.fill(-1);
|
||||
MutableBitSpan dst(dst_data.data(), 100);
|
||||
dst.copy_from(src.slice({401, 100}));
|
||||
|
||||
for (const int i : dst.index_range()) {
|
||||
EXPECT_TRUE(dst[i].test() == src[401 + i].test());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(bit_span, InPlaceOr)
|
||||
{
|
||||
std::array<uint64_t, 100> data_1;
|
||||
MutableBitSpan span_1(data_1.data(), data_1.size() * BitsPerInt);
|
||||
for (const int i : span_1.index_range()) {
|
||||
span_1[i].set(i % 2 == 0);
|
||||
}
|
||||
|
||||
std::array<uint64_t, 100> data_2;
|
||||
MutableBitSpan span_2(data_2.data(), data_2.size() * BitsPerInt);
|
||||
for (const int i : span_2.index_range()) {
|
||||
span_2[i].set(i % 2 != 0);
|
||||
}
|
||||
|
||||
span_1 |= span_2;
|
||||
for (const int i : span_1.index_range()) {
|
||||
EXPECT_TRUE(span_1[i].test());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(bit_span, InPlaceAnd)
|
||||
{
|
||||
std::array<uint64_t, 100> data_1;
|
||||
MutableBitSpan span_1(data_1.data(), data_1.size() * BitsPerInt);
|
||||
for (const int i : span_1.index_range()) {
|
||||
span_1[i].set(i % 2 == 0);
|
||||
}
|
||||
|
||||
std::array<uint64_t, 100> data_2;
|
||||
MutableBitSpan span_2(data_2.data(), data_2.size() * BitsPerInt);
|
||||
for (const int i : span_2.index_range()) {
|
||||
span_2[i].set(i % 2 != 0);
|
||||
}
|
||||
|
||||
span_1 &= span_2;
|
||||
for (const int i : span_1.index_range()) {
|
||||
EXPECT_FALSE(span_1[i].test());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(bit_span, ForEach1)
|
||||
{
|
||||
std::array<uint64_t, 2> data{};
|
||||
MutableBitSpan span(data.data(), data.size() * BitsPerInt);
|
||||
for (const int i : {1, 28, 37, 86}) {
|
||||
span[i].set();
|
||||
}
|
||||
|
||||
Vector<int> indices_test;
|
||||
foreach_1_index(span.slice({4, span.size() - 4}), [&](const int i) { indices_test.append(i); });
|
||||
|
||||
EXPECT_EQ(indices_test.as_span(), Span({24, 33, 82}));
|
||||
}
|
||||
|
||||
} // namespace blender::bits::tests
|
||||
|
||||
Reference in New Issue
Block a user