8#ifndef sktext_gpu_SubRunAllocator_DEFINED
9#define sktext_gpu_SubRunAllocator_DEFINED
36 BagOfBytes(
char* block,
size_t blockSize,
size_t firstHeapAllocation);
37 explicit BagOfBytes(
size_t firstHeapAllocation = 0);
41 : fEndByte{
std::exchange(that.fEndByte, nullptr)}
42 , fCapacity{that.fCapacity}
43 , fFibProgression{that.fFibProgression} {}
56 requestedSize, assumedAlignment,
sizeof(
Block), kMaxAlignment);
60 int requestedSize,
int assumedAlignment,
int blockSize,
int maxAlignment) {
64 const int minAlignment =
std::min(maxAlignment, assumedAlignment);
83 int minimumSize =
SkToInt(AlignUp(requestedSize, minAlignment))
85 + maxAlignment - minAlignment;
89 constexpr int k32K = (1 << 15);
91 minimumSize =
SkToInt(AlignUp(minimumSize, k4K));
101 template <
typename T>
103 constexpr int kMaxN = kMaxByteSize /
sizeof(
T);
104 return 0 <= n && n < kMaxN;
109 static_assert(
alignof(
T) <= kMaxAlignment,
"Alignment is too big for arena");
110 static_assert(
sizeof(
T) < kMaxByteSize,
"Size is too big for arena");
113 int size = n ? n *
sizeof(
T) : 1;
114 return this->allocateBytes(
size,
alignof(
T));
117 void*
alignedBytes(
int unsafeSize,
int unsafeAlignment);
122 inline static constexpr int kMaxAlignment =
std::max(16, (
int)
alignof(std::max_align_t));
125 inline static constexpr int k4K = (1 << 12);
132 #if !defined(SK_FORCE_8_BYTE_ALIGNMENT)
133 static constexpr int kAllocationAlignment =
alignof(std::max_align_t);
135 static constexpr int kAllocationAlignment = 8;
138 static constexpr size_t AlignUp(
int size,
int alignment) {
139 return (
size + (alignment - 1)) & -alignment;
146 Block(
char* previous,
char* startOfBlock);
148 char*
const fBlockStart;
149 Block*
const fPrevious;
154 char* allocateBytes(
int size,
int alignment) {
155 fCapacity = fCapacity & -alignment;
156 if (fCapacity <
size) {
157 this->needMoreBytes(
size, alignment);
159 char*
const ptr = fEndByte - fCapacity;
160 SkASSERT(((intptr_t)ptr & (alignment - 1)) == 0);
167 void setupBytesAndCapacity(
char* bytes,
int size);
170 void needMoreBytes(
int size,
int alignment);
177 char* fEndByte{
nullptr};
190 ::operator
delete(fMemory);
192 template <
typename... Args>
196 return new (std::exchange(fMemory,
nullptr))
T(std::forward<Args>(
args)...);
211 template <
typename T>
217 template <
typename T>
219 for (
int i = 0;
i <
n;
i++) { ptr[
i].~T(); }
233 template <
typename T>
242 int totalMemorySize =
sizeof(
T) + extraSize;
244 void* memory = ::operator
new (totalMemorySize);
245 SubRunAllocator alloc{SkTAddOffset<char>(memory,
sizeof(
T)), extraSize, extraSize/2};
246 return {memory, totalMemorySize, std::move(alloc)};
250 static_assert(HasNoDestructor<T>,
"This is not POD. Use makeUnique.");
251 char* bytes = fAlloc.template allocateBytesFor<T>();
252 return new (bytes)
T(std::forward<Args>(
args)...);
255 template <
typename T,
typename... Args>
257 static_assert(!HasNoDestructor<T>,
"This is POD. Use makePOD.");
258 char* bytes = fAlloc.template allocateBytesFor<T>();
259 return std::unique_ptr<T, Destroyer>{
new (bytes)
T(std::forward<Args>(
args)...)};
263 static_assert(HasNoDestructor<T>,
"This is not POD. Use makeUniqueArray.");
264 return reinterpret_cast<T*
>(fAlloc.template allocateBytesFor<T>(n));
269 static_assert(HasNoDestructor<T>,
"This is not POD. Use makeUniqueArray.");
274 T*
result = this->makePODArray<T>(SkTo<int>(
s.size()));
275 memcpy(
result,
s.data(),
s.size_bytes());
279 template<
typename T,
typename Src,
typename Map>
281 static_assert(HasNoDestructor<T>,
"This is not POD. Use makeUniqueArray.");
282 int size = SkTo<int>(
src.size());
284 for (
int i = 0;
i <
size;
i++) {
292 static_assert(!HasNoDestructor<T>,
"This is POD. Use makePODArray.");
293 T* array =
reinterpret_cast<T*
>(fAlloc.template allocateBytesFor<T>(n));
294 for (
int i = 0;
i < n;
i++) {
297 return std::unique_ptr<T[], ArrayDestroyer>{array,
ArrayDestroyer{n}};
300 template<
typename T,
typename I>
302 static_assert(!HasNoDestructor<T>,
"This is POD. Use makePODArray.");
303 T* array =
reinterpret_cast<T*
>(fAlloc.template allocateBytesFor<T>(n));
304 for (
int i = 0;
i < n;
i++) {
307 return std::unique_ptr<T[], ArrayDestroyer>{array,
ArrayDestroyer{n}};
310 template<
typename T,
typename U,
typename Map>
312 static_assert(!HasNoDestructor<T>,
"This is POD. Use makePODArray.");
314 T* array =
reinterpret_cast<T*
>(fAlloc.template allocateBytesFor<T>(
src.size()));
332template <
size_t InlineStorageSize,
size_t InlineStorageAlignment>
335 InlineStorageSize, InlineStorageAlignment)>,
static struct Initializer initializer
#define SkASSERT_RELEASE(cond)
SkBlockAllocator::Block Block
constexpr bool SkIsPow2(T value)
constexpr int SkCount(const Container &c)
constexpr int SkToInt(S x)
BagOfBytes & operator=(BagOfBytes &&that)
static bool WillCountFit(int n)
BagOfBytes & operator=(const BagOfBytes &)=delete
static constexpr int MinimumSizeWithOverhead(int requestedSize, int assumedAlignment, int blockSize, int maxAlignment)
void * alignedBytes(int unsafeSize, int unsafeAlignment)
BagOfBytes(const BagOfBytes &)=delete
std::array< char, PlatformMinimumSizeWithOverhead(size, 1)> Storage
BagOfBytes(char *block, size_t blockSize, size_t firstHeapAllocation)
BagOfBytes(BagOfBytes &&that)
char * allocateBytesFor(int n=1)
static constexpr int PlatformMinimumSizeWithOverhead(int requestedSize, int assumedAlignment)
STSubRunAllocator(size_t firstHeapAllocation=InlineStorageSize)
SubRunAllocator & operator=(const SubRunAllocator &)=delete
SkSpan< T > makePODArray(const Src &src, Map map)
static std::tuple< SubRunInitializer< T >, int, SubRunAllocator > AllocateClassMemoryAndArena(int allocSizeHint)
std::unique_ptr< T[], ArrayDestroyer > makeUniqueArray(int n)
std::unique_ptr< T[], ArrayDestroyer > makeUniqueArray(int n, I initializer)
SubRunAllocator(SubRunAllocator &&)=default
std::unique_ptr< T, Destroyer > makeUnique(Args &&... args)
T * makePOD(Args &&... args)
SubRunAllocator(const SubRunAllocator &)=delete
static constexpr bool HasNoDestructor
std::unique_ptr< T[], ArrayDestroyer > makeUniqueArray(SkSpan< const U > src, Map map)
SkSpan< T > makePODSpan(SkSpan< const T > s)
void * alignedBytes(int size, int alignment)
SubRunAllocator(char *block, int blockSize, int firstHeapAllocation)
SubRunAllocator & operator=(SubRunAllocator &&)=default
SubRunInitializer(void *memory)
T * initialize(Args &&... args)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static float max(float r, float g, float b)
static float min(float r, float g, float b)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
std::shared_ptr< const fml::Mapping > data