diff --git a/src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorWrapper.h b/src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorWrapper.h
index 1c6212bd47..756175b4e7 100644
--- a/src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorWrapper.h
+++ b/src/gpu/vk/vulkanmemoryallocator/VulkanMemoryAllocatorWrapper.h
@@ -39,7 +39,7 @@
// VMA outside of Skia, the client should instead tell Skia not to use VMA.
// Then they should wrap their own instance of VMA into an implementation of
// Skia's VulkanMemoryAllocator interface, and pass that object into context creation.
-#include "vk_mem_alloc.h" // NO_G3_REWRITE
+#include "include/vk_mem_alloc.h"
#ifdef GR_NEEDED_TO_DEFINE_VULKAN_H
#undef VULKAN_H_
#endif
diff --git a/third_party/vulkanmemoryallocator/include/LICENSE.txt b/third_party/vulkanmemoryallocator/include/LICENSE.txt
new file mode 100644
index 0000000000..dbfe253391
--- /dev/null
+++ b/third_party/vulkanmemoryallocator/include/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/vulkanmemoryallocator/include/vk_mem_alloc.h b/third_party/vulkanmemoryallocator/include/vk_mem_alloc.h
new file mode 100644
index 0000000000..90410b56af
--- /dev/null
+++ b/third_party/vulkanmemoryallocator/include/vk_mem_alloc.h
@@ -0,0 +1,19595 @@
+//
+// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
+#define AMD_VULKAN_MEMORY_ALLOCATOR_H
+
+/** \mainpage Vulkan Memory Allocator
+
+Version 3.0.1-development (2022-03-28)
+
+Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
+License: MIT
+
+API documentation divided into groups: [Modules](modules.html)
+
+\section main_table_of_contents Table of contents
+
+- User guide
+ - \subpage quick_start
+ - [Project setup](@ref quick_start_project_setup)
+ - [Initialization](@ref quick_start_initialization)
+ - [Resource allocation](@ref quick_start_resource_allocation)
+ - \subpage choosing_memory_type
+ - [Usage](@ref choosing_memory_type_usage)
+ - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
+ - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
+ - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
+ - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
+ - \subpage memory_mapping
+ - [Mapping functions](@ref memory_mapping_mapping_functions)
+ - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
+ - [Cache flush and invalidate](@ref memory_mapping_cache_control)
+ - \subpage staying_within_budget
+ - [Querying for budget](@ref staying_within_budget_querying_for_budget)
+ - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
+ - \subpage resource_aliasing
+ - \subpage custom_memory_pools
+ - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
+ - [Linear allocation algorithm](@ref linear_algorithm)
+ - [Free-at-once](@ref linear_algorithm_free_at_once)
+ - [Stack](@ref linear_algorithm_stack)
+ - [Double stack](@ref linear_algorithm_double_stack)
+ - [Ring buffer](@ref linear_algorithm_ring_buffer)
+ - \subpage defragmentation
+ - \subpage statistics
+ - [Numeric statistics](@ref statistics_numeric_statistics)
+ - [JSON dump](@ref statistics_json_dump)
+ - \subpage allocation_annotation
+ - [Allocation user data](@ref allocation_user_data)
+ - [Allocation names](@ref allocation_names)
+ - \subpage virtual_allocator
+ - \subpage debugging_memory_usage
+ - [Memory initialization](@ref debugging_memory_usage_initialization)
+ - [Margins](@ref debugging_memory_usage_margins)
+ - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
+ - \subpage opengl_interop
+- \subpage usage_patterns
+ - [GPU-only resource](@ref usage_patterns_gpu_only)
+ - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
+ - [Readback](@ref usage_patterns_readback)
+ - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
+ - [Other use cases](@ref usage_patterns_other_use_cases)
+- \subpage configuration
+ - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
+ - [Custom host memory allocator](@ref custom_memory_allocator)
+ - [Device memory allocation callbacks](@ref allocation_callbacks)
+ - [Device heap memory limit](@ref heap_memory_limit)
+- Extension support
+ - \subpage vk_khr_dedicated_allocation
+ - \subpage enabling_buffer_device_address
+ - \subpage vk_ext_memory_priority
+ - \subpage vk_amd_device_coherent_memory
+- \subpage general_considerations
+ - [Thread safety](@ref general_considerations_thread_safety)
+ - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
+ - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
+ - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
+ - [Features not supported](@ref general_considerations_features_not_supported)
+
+\section main_see_also See also
+
+- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
+- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
+
+\defgroup group_init Library initialization
+
+\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
+
+\defgroup group_alloc Memory allocation
+
+\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
+Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
+
+\defgroup group_virtual Virtual allocator
+
+\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
+for user-defined purpose without allocating any real GPU memory.
+
+\defgroup group_stats Statistics
+
+\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
+See documentation chapter: \ref statistics.
+*/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef VULKAN_H_
+ #include
+#endif
+
+// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
+// where AAA = major, BBB = minor, CCC = patch.
+// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
+#if !defined(VMA_VULKAN_VERSION)
+ #if defined(VK_VERSION_1_3)
+ #define VMA_VULKAN_VERSION 1003000
+ #elif defined(VK_VERSION_1_2)
+ #define VMA_VULKAN_VERSION 1002000
+ #elif defined(VK_VERSION_1_1)
+ #define VMA_VULKAN_VERSION 1001000
+ #else
+ #define VMA_VULKAN_VERSION 1000000
+ #endif
+#endif
+
+#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
+ extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+ extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
+ extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
+ extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
+ extern PFN_vkAllocateMemory vkAllocateMemory;
+ extern PFN_vkFreeMemory vkFreeMemory;
+ extern PFN_vkMapMemory vkMapMemory;
+ extern PFN_vkUnmapMemory vkUnmapMemory;
+ extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
+ extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
+ extern PFN_vkBindBufferMemory vkBindBufferMemory;
+ extern PFN_vkBindImageMemory vkBindImageMemory;
+ extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
+ extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
+ extern PFN_vkCreateBuffer vkCreateBuffer;
+ extern PFN_vkDestroyBuffer vkDestroyBuffer;
+ extern PFN_vkCreateImage vkCreateImage;
+ extern PFN_vkDestroyImage vkDestroyImage;
+ extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
+ #if VMA_VULKAN_VERSION >= 1001000
+ extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
+ extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
+ extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
+ extern PFN_vkBindImageMemory2 vkBindImageMemory2;
+ extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
+ #endif // #if VMA_VULKAN_VERSION >= 1001000
+#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
+
+#if !defined(VMA_DEDICATED_ALLOCATION)
+ #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
+ #define VMA_DEDICATED_ALLOCATION 1
+ #else
+ #define VMA_DEDICATED_ALLOCATION 0
+ #endif
+#endif
+
+#if !defined(VMA_BIND_MEMORY2)
+ #if VK_KHR_bind_memory2
+ #define VMA_BIND_MEMORY2 1
+ #else
+ #define VMA_BIND_MEMORY2 0
+ #endif
+#endif
+
+#if !defined(VMA_MEMORY_BUDGET)
+ #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
+ #define VMA_MEMORY_BUDGET 1
+ #else
+ #define VMA_MEMORY_BUDGET 0
+ #endif
+#endif
+
+// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
+#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
+ #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
+ #define VMA_BUFFER_DEVICE_ADDRESS 1
+ #else
+ #define VMA_BUFFER_DEVICE_ADDRESS 0
+ #endif
+#endif
+
+// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
+#if !defined(VMA_MEMORY_PRIORITY)
+ #if VK_EXT_memory_priority
+ #define VMA_MEMORY_PRIORITY 1
+ #else
+ #define VMA_MEMORY_PRIORITY 0
+ #endif
+#endif
+
+// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
+#if !defined(VMA_EXTERNAL_MEMORY)
+ #if VK_KHR_external_memory
+ #define VMA_EXTERNAL_MEMORY 1
+ #else
+ #define VMA_EXTERNAL_MEMORY 0
+ #endif
+#endif
+
+// Define these macros to decorate all public functions with additional code,
+// before and after returned type, appropriately. This may be useful for
+// exporting the functions when compiling VMA as a separate library. Example:
+// #define VMA_CALL_PRE __declspec(dllexport)
+// #define VMA_CALL_POST __cdecl
+#ifndef VMA_CALL_PRE
+ #define VMA_CALL_PRE
+#endif
+#ifndef VMA_CALL_POST
+ #define VMA_CALL_POST
+#endif
+
+// Define this macro to decorate pointers with an attribute specifying the
+// length of the array they point to if they are not null.
+//
+// The length may be one of
+// - The name of another parameter in the argument list where the pointer is declared
+// - The name of another member in the struct where the pointer is declared
+// - The name of a member of a struct type, meaning the value of that member in
+// the context of the call. For example
+// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
+// this means the number of memory heaps available in the device associated
+// with the VmaAllocator being dealt with.
+#ifndef VMA_LEN_IF_NOT_NULL
+ #define VMA_LEN_IF_NOT_NULL(len)
+#endif
+
+// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
+// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
+#ifndef VMA_NULLABLE
+ #ifdef __clang__
+ #define VMA_NULLABLE _Nullable
+ #else
+ #define VMA_NULLABLE
+ #endif
+#endif
+
+// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
+// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
+#ifndef VMA_NOT_NULL
+ #ifdef __clang__
+ #define VMA_NOT_NULL _Nonnull
+ #else
+ #define VMA_NOT_NULL
+ #endif
+#endif
+
+// If non-dispatchable handles are represented as pointers then we can give
+// then nullability annotations
+#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
+ #else
+ #define VMA_NOT_NULL_NON_DISPATCHABLE
+ #endif
+#endif
+
+#ifndef VMA_NULLABLE_NON_DISPATCHABLE
+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
+ #else
+ #define VMA_NULLABLE_NON_DISPATCHABLE
+ #endif
+#endif
+
+#ifndef VMA_STATS_STRING_ENABLED
+ #define VMA_STATS_STRING_ENABLED 1
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+// INTERFACE
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
+#ifndef _VMA_ENUM_DECLARATIONS
+
+/**
+\addtogroup group_init
+@{
+*/
+
+/// Flags for created #VmaAllocator.
+typedef enum VmaAllocatorCreateFlagBits
+{
+ /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
+
+ Using this flag may increase performance because internal mutexes are not used.
+ */
+ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
+ /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
+
+ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
+ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
+
+ Using this extension will automatically allocate dedicated blocks of memory for
+ some buffers and images instead of suballocating place for them out of bigger
+ memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
+ flag) when it is recommended by the driver. It may improve performance on some
+ GPUs.
+
+ You may set this flag only if you found out that following device extensions are
+ supported, you enabled them while creating Vulkan device passed as
+ VmaAllocatorCreateInfo::device, and you want them to be used internally by this
+ library:
+
+ - VK_KHR_get_memory_requirements2 (device extension)
+ - VK_KHR_dedicated_allocation (device extension)
+
+ When this flag is set, you can experience following warnings reported by Vulkan
+ validation layer. You can ignore them.
+
+ > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
+ */
+ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
+ /**
+ Enables usage of VK_KHR_bind_memory2 extension.
+
+ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
+ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
+
+ You may set this flag only if you found out that this device extension is supported,
+ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+ and you want it to be used internally by this library.
+
+ The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
+ which allow to pass a chain of `pNext` structures while binding.
+ This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
+ */
+ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
+ /**
+ Enables usage of VK_EXT_memory_budget extension.
+
+ You may set this flag only if you found out that this device extension is supported,
+ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+ and you want it to be used internally by this library, along with another instance extension
+ VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
+
+ The extension provides query for current memory usage and budget, which will probably
+ be more accurate than an estimation used by the library otherwise.
+ */
+ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
+ /**
+ Enables usage of VK_AMD_device_coherent_memory extension.
+
+ You may set this flag only if you:
+
+ - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+ - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
+ - want it to be used internally by this library.
+
+ The extension and accompanying device feature provide access to memory types with
+ `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
+ They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
+
+ When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
+ To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
+ returning `VK_ERROR_FEATURE_NOT_PRESENT`.
+ */
+ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
+ /**
+ Enables usage of "buffer device address" feature, which allows you to use function
+ `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
+
+ You may set this flag only if you:
+
+ 1. (For Vulkan version < 1.2) Found as available and enabled device extension
+ VK_KHR_buffer_device_address.
+ This extension is promoted to core Vulkan 1.2.
+ 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
+
+ When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
+ The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
+ allocated memory blocks wherever it might be needed.
+
+ For more information, see documentation chapter \ref enabling_buffer_device_address.
+ */
+ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
+ /**
+ Enables usage of VK_EXT_memory_priority extension in the library.
+
+ You may set this flag only if you found available and enabled this device extension,
+ along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
+ while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
+
+ When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
+ are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
+
+ A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+ Larger values are higher priority. The granularity of the priorities is implementation-dependent.
+ It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
+ The value to be used for default priority is 0.5.
+ For more details, see the documentation of the VK_EXT_memory_priority extension.
+ */
+ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
+
+ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaAllocatorCreateFlagBits;
+/// See #VmaAllocatorCreateFlagBits.
+typedef VkFlags VmaAllocatorCreateFlags;
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/// \brief Intended usage of the allocated memory.
+typedef enum VmaMemoryUsage
+{
+ /** No intended memory usage specified.
+ Use other members of VmaAllocationCreateInfo to specify your requirements.
+ */
+ VMA_MEMORY_USAGE_UNKNOWN = 0,
+ /**
+ \deprecated Obsolete, preserved for backward compatibility.
+ Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+ */
+ VMA_MEMORY_USAGE_GPU_ONLY = 1,
+ /**
+ \deprecated Obsolete, preserved for backward compatibility.
+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
+ */
+ VMA_MEMORY_USAGE_CPU_ONLY = 2,
+ /**
+ \deprecated Obsolete, preserved for backward compatibility.
+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+ */
+ VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
+ /**
+ \deprecated Obsolete, preserved for backward compatibility.
+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
+ */
+ VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
+ /**
+ \deprecated Obsolete, preserved for backward compatibility.
+ Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+ */
+ VMA_MEMORY_USAGE_CPU_COPY = 5,
+ /**
+ Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
+ Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
+
+ Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
+
+ Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+ */
+ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
+ /**
+ Selects best memory type automatically.
+ This flag is recommended for most common use cases.
+
+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+ in VmaAllocationCreateInfo::flags.
+
+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+ and not with generic memory allocation functions.
+ */
+ VMA_MEMORY_USAGE_AUTO = 7,
+ /**
+ Selects best memory type automatically with preference for GPU (device) memory.
+
+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+ in VmaAllocationCreateInfo::flags.
+
+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+ and not with generic memory allocation functions.
+ */
+ VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
+ /**
+ Selects best memory type automatically with preference for CPU (host) memory.
+
+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+ in VmaAllocationCreateInfo::flags.
+
+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+ and not with generic memory allocation functions.
+ */
+ VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
+
+ VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
+} VmaMemoryUsage;
+
+/// Flags to be passed as VmaAllocationCreateInfo::flags.
+typedef enum VmaAllocationCreateFlagBits
+{
+ /** \brief Set this flag if the allocation should have its own memory block.
+
+ Use it for special, big resources, like fullscreen images used as attachments.
+ */
+ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
+
+ /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
+
+ If new allocation cannot be placed in any of the existing blocks, allocation
+ fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
+
+ You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
+ #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
+ */
+ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
+ /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
+
+ Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
+
+ It is valid to use this flag for allocation made from memory type that is not
+ `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
+ useful if you need an allocation that is efficient to use on GPU
+ (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
+ support it (e.g. Intel GPU).
+ */
+ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
+ /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
+
+ Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
+ null-terminated string. Instead of copying pointer value, a local copy of the
+ string is made and stored in allocation's `pName`. The string is automatically
+ freed together with the allocation. It is also used in vmaBuildStatsString().
+ */
+ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
+ /** Allocation will be created from upper stack in a double stack pool.
+
+ This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
+ */
+ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
+ /** Create both buffer/image and allocation, but don't bind them together.
+ It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
+ The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
+ Otherwise it is ignored.
+
+ If you want to make sure the new buffer/image is not tied to the new memory allocation
+ through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
+ use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
+ */
+ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
+ /** Create allocation only if additional device memory required for it, if any, won't exceed
+ memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+ */
+ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
+ /** \brief Set this flag if the allocated memory will have aliasing resources.
+
+ Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
+ Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
+ */
+ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
+ /**
+ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
+
+ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
+ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
+ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
+ This includes allocations created in \ref custom_memory_pools.
+
+ Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
+ never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
+
+ \warning Violating this declaration may work correctly, but will likely be very slow.
+ Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
+ Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
+ */
+ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
+ /**
+ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
+
+ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
+ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
+ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
+ This includes allocations created in \ref custom_memory_pools.
+
+ Declares that mapped memory can be read, written, and accessed in random order,
+ so a `HOST_CACHED` memory type is required.
+ */
+ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
+ /**
+ Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
+ it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
+ if it may improve performance.
+
+ By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
+ (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
+ issue an explicit transfer to write/read your data.
+ To prepare for this possibility, don't forget to add appropriate flags like
+ `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
+ */
+ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
+ /** Allocation strategy that chooses smallest possible free range for the allocation
+ to minimize memory usage and fragmentation, possibly at the expense of allocation time.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
+ /** Allocation strategy that chooses first suitable free range for the allocation -
+ not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
+ to minimize allocation time, possibly at the expense of allocation quality.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
+ /** Allocation strategy that chooses always the lowest offset in available space.
+ This is not the most efficient strategy but achieves highly packed data.
+ Used internally by defragmentation, not recomended in typical usage.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
+ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
+ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
+ /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
+ */
+ VMA_ALLOCATION_CREATE_STRATEGY_MASK =
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+
+ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaAllocationCreateFlagBits;
+/// See #VmaAllocationCreateFlagBits.
+typedef VkFlags VmaAllocationCreateFlags;
+
+/// Flags to be passed as VmaPoolCreateInfo::flags.
+typedef enum VmaPoolCreateFlagBits
+{
+ /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
+
+ This is an optional optimization flag.
+
+ If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
+ vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
+ knows exact type of your allocations so it can handle Buffer-Image Granularity
+ in the optimal way.
+
+ If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
+ exact type of such allocations is not known, so allocator must be conservative
+ in handling Buffer-Image Granularity, which can lead to suboptimal allocation
+ (wasted memory). In that case, if you can make sure you always allocate only
+ buffers and linear images or only optimal images out of this pool, use this flag
+ to make allocator disregard Buffer-Image Granularity and so make allocations
+ faster and more optimal.
+ */
+ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
+
+ /** \brief Enables alternative, linear allocation algorithm in this pool.
+
+ Specify this flag to enable linear allocation algorithm, which always creates
+ new allocations after last one and doesn't reuse space from allocations freed in
+ between. It trades memory consumption for simplified algorithm and data
+ structure, which has better performance and uses less memory for metadata.
+
+ By using this flag, you can achieve behavior of free-at-once, stack,
+ ring buffer, and double stack.
+ For details, see documentation chapter \ref linear_algorithm.
+ */
+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
+
+ /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
+ */
+ VMA_POOL_CREATE_ALGORITHM_MASK =
+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
+
+ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaPoolCreateFlagBits;
+/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
+typedef VkFlags VmaPoolCreateFlags;
+
+/// Flags to be passed as VmaDefragmentationInfo::flags.
+typedef enum VmaDefragmentationFlagBits
+{
+ /* \brief Use simple but fast algorithm for defragmentation.
+ May not achieve best results but will require least time to compute and least allocations to copy.
+ */
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
+ /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
+ Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
+ */
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
+ /* \brief Perform full defragmentation of memory.
+ Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
+ */
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
+ /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
+ Only available when bufferImageGranularity is greater than 1, since it aims to reduce
+ alignment issues between different types of resources.
+ Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
+ */
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
+
+ /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
+
+ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaDefragmentationFlagBits;
+/// See #VmaDefragmentationFlagBits.
+typedef VkFlags VmaDefragmentationFlags;
+
+/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
+typedef enum VmaDefragmentationMoveOperation
+{
+ /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
+ VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
+ /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
+ VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
+ /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
+ VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
+} VmaDefragmentationMoveOperation;
+
+/** @} */
+
+/**
+\addtogroup group_virtual
+@{
+*/
+
+/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
+typedef enum VmaVirtualBlockCreateFlagBits
+{
+ /** \brief Enables alternative, linear allocation algorithm in this virtual block.
+
+ Specify this flag to enable linear allocation algorithm, which always creates
+ new allocations after last one and doesn't reuse space from allocations freed in
+ between. It trades memory consumption for simplified algorithm and data
+ structure, which has better performance and uses less memory for metadata.
+
+ By using this flag, you can achieve behavior of free-at-once, stack,
+ ring buffer, and double stack.
+ For details, see documentation chapter \ref linear_algorithm.
+ */
+ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
+
+ /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
+ */
+ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
+ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
+
+ VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaVirtualBlockCreateFlagBits;
+/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
+typedef VkFlags VmaVirtualBlockCreateFlags;
+
+/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
+typedef enum VmaVirtualAllocationCreateFlagBits
+{
+ /** \brief Allocation will be created from upper stack in a double stack pool.
+
+ This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
+ */
+ VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
+ /** \brief Allocation strategy that tries to minimize memory usage.
+ */
+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
+ /** \brief Allocation strategy that tries to minimize allocation time.
+ */
+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
+ /** Allocation strategy that chooses always the lowest offset in available space.
+ This is not the most efficient strategy but achieves highly packed data.
+ */
+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+ /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
+
+ These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
+ */
+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
+
+ VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VmaVirtualAllocationCreateFlagBits;
+/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
+typedef VkFlags VmaVirtualAllocationCreateFlags;
+
+/** @} */
+
+#endif // _VMA_ENUM_DECLARATIONS
+
+#ifndef _VMA_DATA_TYPES_DECLARATIONS
+
+/**
+\addtogroup group_init
+@{ */
+
+/** \struct VmaAllocator
+\brief Represents main object of this library initialized.
+
+Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
+Call function vmaDestroyAllocator() to destroy it.
+
+It is recommended to create just one object of this type per `VkDevice` object,
+right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
+*/
+VK_DEFINE_HANDLE(VmaAllocator)
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/** \struct VmaPool
+\brief Represents custom memory pool
+
+Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
+Call function vmaDestroyPool() to destroy it.
+
+For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
+*/
+VK_DEFINE_HANDLE(VmaPool)
+
+/** \struct VmaAllocation
+\brief Represents single memory allocation.
+
+It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
+plus unique offset.
+
+There are multiple ways to create such object.
+You need to fill structure VmaAllocationCreateInfo.
+For more information see [Choosing memory type](@ref choosing_memory_type).
+
+Although the library provides convenience functions that create Vulkan buffer or image,
+allocate memory for it and bind them together,
+binding of the allocation to a buffer or an image is out of scope of the allocation itself.
+Allocation object can exist without buffer/image bound,
+binding can be done manually by the user, and destruction of it can be done
+independently of destruction of the allocation.
+
+The object also remembers its size and some other information.
+To retrieve this information, use function vmaGetAllocationInfo() and inspect
+returned structure VmaAllocationInfo.
+*/
+VK_DEFINE_HANDLE(VmaAllocation)
+
+/** \struct VmaDefragmentationContext
+\brief An opaque object that represents started defragmentation process.
+
+Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
+Call function vmaEndDefragmentation() to destroy it.
+*/
+VK_DEFINE_HANDLE(VmaDefragmentationContext)
+
+/** @} */
+
+/**
+\addtogroup group_virtual
+@{
+*/
+
+/** \struct VmaVirtualAllocation
+\brief Represents single memory allocation done inside VmaVirtualBlock.
+
+Use it as a unique identifier to virtual allocation within the single block.
+
+Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
+*/
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation);
+
+/** @} */
+
+/**
+\addtogroup group_virtual
+@{
+*/
+
+/** \struct VmaVirtualBlock
+\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
+
+Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
+For more information, see documentation chapter \ref virtual_allocator.
+
+This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
+*/
+VK_DEFINE_HANDLE(VmaVirtualBlock)
+
+/** @} */
+
+/**
+\addtogroup group_init
+@{
+*/
+
+/// Callback function called after successful vkAllocateMemory.
+typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryType,
+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+ VkDeviceSize size,
+ void* VMA_NULLABLE pUserData);
+
+/// Callback function called before vkFreeMemory.
+typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryType,
+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+ VkDeviceSize size,
+ void* VMA_NULLABLE pUserData);
+
+/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
+
+Provided for informative purpose, e.g. to gather statistics about number of
+allocations or total amount of memory allocated in Vulkan.
+
+Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
+*/
+typedef struct VmaDeviceMemoryCallbacks
+{
+ /// Optional, can be null.
+ PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
+ /// Optional, can be null.
+ PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
+ /// Optional, can be null.
+ void* VMA_NULLABLE pUserData;
+} VmaDeviceMemoryCallbacks;
+
+/** \brief Pointers to some Vulkan functions - a subset used by the library.
+
+Used in VmaAllocatorCreateInfo::pVulkanFunctions.
+*/
+typedef struct VmaVulkanFunctions
+{
+ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
+ PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
+ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
+ PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
+ PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
+ PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
+ PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
+ PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
+ PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
+ PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
+ PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
+ PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
+ PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
+ PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
+ PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
+ PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
+ PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
+ PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
+ PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
+ PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
+ PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+ /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
+ PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
+ /// Fetch "vkGetImageMemoryRequirements 2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
+ PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
+#endif
+#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+ /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
+ PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
+ /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
+ PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
+#endif
+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+ PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
+#endif
+#if VMA_VULKAN_VERSION >= 1003000
+ /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
+ PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
+ /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
+ PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
+#endif
+} VmaVulkanFunctions;
+
+/// Description of a Allocator to be created.
+typedef struct VmaAllocatorCreateInfo
+{
+ /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
+ VmaAllocatorCreateFlags flags;
+ /// Vulkan physical device.
+ /** It must be valid throughout whole lifetime of created allocator. */
+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
+ /// Vulkan device.
+ /** It must be valid throughout whole lifetime of created allocator. */
+ VkDevice VMA_NOT_NULL device;
+ /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
+ /** Set to 0 to use default, which is currently 256 MiB. */
+ VkDeviceSize preferredLargeHeapBlockSize;
+ /// Custom CPU memory allocation callbacks. Optional.
+ /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
+ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
+ /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
+ /** Optional, can be null. */
+ const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
+ /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
+
+ If not NULL, it must be a pointer to an array of
+ `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
+ maximum number of bytes that can be allocated out of particular Vulkan memory
+ heap.
+
+ Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
+ heap. This is also the default in case of `pHeapSizeLimit` = NULL.
+
+ If there is a limit defined for a heap:
+
+ - If user tries to allocate more memory from that heap using this allocator,
+ the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+ - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
+ value of this limit will be reported instead when using vmaGetMemoryProperties().
+
+ Warning! Using this feature may not be equivalent to installing a GPU with
+ smaller amount of memory, because graphics driver doesn't necessary fail new
+ allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
+ exceeded. It may return success and just silently migrate some device memory
+ blocks to system RAM. This driver behavior can also be controlled using
+ VK_AMD_memory_overallocation_behavior extension.
+ */
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
+
+ /** \brief Pointers to Vulkan functions. Can be null.
+
+ For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
+ */
+ const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
+ /** \brief Handle to Vulkan instance object.
+
+ Starting from version 3.0.0 this member is no longer optional, it must be set!
+ */
+ VkInstance VMA_NOT_NULL instance;
+ /** \brief Optional. The highest version of Vulkan that the application is designed to use.
+
+ It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
+ The patch version number specified is ignored. Only the major and minor versions are considered.
+ It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
+ Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
+ Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
+ */
+ uint32_t vulkanApiVersion;
+#if VMA_EXTERNAL_MEMORY
+ /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
+
+ If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
+ elements, defining external memory handle types of particular Vulkan memory type,
+ to be passed using `VkExportMemoryAllocateInfoKHR`.
+
+ Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
+ This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
+ */
+ const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
+#endif // #if VMA_EXTERNAL_MEMORY
+} VmaAllocatorCreateInfo;
+
+/// Information about existing #VmaAllocator object.
+typedef struct VmaAllocatorInfo
+{
+ /** \brief Handle to Vulkan instance object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
+ */
+ VkInstance VMA_NOT_NULL instance;
+ /** \brief Handle to Vulkan physical device object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
+ */
+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
+ /** \brief Handle to Vulkan device object.
+
+ This is the same value as has been passed through VmaAllocatorCreateInfo::device.
+ */
+ VkDevice VMA_NOT_NULL device;
+} VmaAllocatorInfo;
+
+/** @} */
+
+/**
+\addtogroup group_stats
+@{
+*/
+
+/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
+
+These are fast to calculate.
+See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
+*/
+typedef struct VmaStatistics
+{
+ /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
+ */
+ uint32_t blockCount;
+ /** \brief Number of #VmaAllocation objects allocated.
+
+ Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
+ */
+ uint32_t allocationCount;
+ /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
+
+ \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
+ (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
+ "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
+ */
+ VkDeviceSize blockBytes;
+ /** \brief Total number of bytes occupied by all #VmaAllocation objects.
+
+ Always less or equal than `blockBytes`.
+ Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
+ but unused by any #VmaAllocation.
+ */
+ VkDeviceSize allocationBytes;
+} VmaStatistics;
+
+/** \brief More detailed statistics than #VmaStatistics.
+
+These are slower to calculate. Use for debugging purposes.
+See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
+
+Previous version of the statistics API provided averages, but they have been removed
+because they can be easily calculated as:
+
+\code
+VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
+VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
+VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
+\endcode
+*/
+typedef struct VmaDetailedStatistics
+{
+ /// Basic statistics.
+ VmaStatistics statistics;
+ /// Number of free ranges of memory between allocations.
+ uint32_t unusedRangeCount;
+ /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
+ VkDeviceSize allocationSizeMin;
+ /// Largest allocation size. 0 if there are 0 allocations.
+ VkDeviceSize allocationSizeMax;
+ /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
+ VkDeviceSize unusedRangeSizeMin;
+ /// Largest empty range size. 0 if there are 0 empty ranges.
+ VkDeviceSize unusedRangeSizeMax;
+} VmaDetailedStatistics;
+
+/** \brief General statistics from current state of the Allocator -
+total memory usage across all memory heaps and types.
+
+These are slower to calculate. Use for debugging purposes.
+See function vmaCalculateStatistics().
+*/
+typedef struct VmaTotalStatistics
+{
+ VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
+ VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
+ VmaDetailedStatistics total;
+} VmaTotalStatistics;
+
+/** \brief Statistics of current memory usage and available budget for a specific memory heap.
+
+These are fast to calculate.
+See function vmaGetHeapBudgets().
+*/
+typedef struct VmaBudget
+{
+ /** \brief Statistics fetched from the library.
+ */
+ VmaStatistics statistics;
+ /** \brief Estimated current memory usage of the program, in bytes.
+
+ Fetched from system using VK_EXT_memory_budget extension if enabled.
+
+ It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
+ also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
+ `VkDeviceMemory` blocks allocated outside of this library, if any.
+ */
+ VkDeviceSize usage;
+ /** \brief Estimated amount of memory available to the program, in bytes.
+
+ Fetched from system using VK_EXT_memory_budget extension if enabled.
+
+ It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
+ external to the program, decided by the operating system.
+ Difference `budget - usage` is the amount of additional memory that can probably
+ be allocated without problems. Exceeding the budget may result in various problems.
+ */
+ VkDeviceSize budget;
+} VmaBudget;
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/** \brief Parameters of new #VmaAllocation.
+
+To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
+*/
+typedef struct VmaAllocationCreateInfo
+{
+ /// Use #VmaAllocationCreateFlagBits enum.
+ VmaAllocationCreateFlags flags;
+ /** \brief Intended usage of memory.
+
+ You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
+ If `pool` is not null, this member is ignored.
+ */
+ VmaMemoryUsage usage;
+ /** \brief Flags that must be set in a Memory Type chosen for an allocation.
+
+ Leave 0 if you specify memory requirements in other way. \n
+ If `pool` is not null, this member is ignored.*/
+ VkMemoryPropertyFlags requiredFlags;
+ /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
+
+ Set to 0 if no additional flags are preferred. \n
+ If `pool` is not null, this member is ignored. */
+ VkMemoryPropertyFlags preferredFlags;
+ /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
+
+ Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
+ it meets other requirements specified by this structure, with no further
+ restrictions on memory type index. \n
+ If `pool` is not null, this member is ignored.
+ */
+ uint32_t memoryTypeBits;
+ /** \brief Pool that this allocation should be created in.
+
+ Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
+ `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
+ */
+ VmaPool VMA_NULLABLE pool;
+ /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
+
+ If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
+ null or pointer to a null-terminated string. The string will be then copied to
+ internal buffer, so it doesn't need to be valid after allocation call.
+ */
+ void* VMA_NULLABLE pUserData;
+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+
+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
+ and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+ Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
+ */
+ float priority;
+} VmaAllocationCreateInfo;
+
+/// Describes parameter of created #VmaPool.
+typedef struct VmaPoolCreateInfo
+{
+ /** \brief Vulkan memory type index to allocate this pool from.
+ */
+ uint32_t memoryTypeIndex;
+ /** \brief Use combination of #VmaPoolCreateFlagBits.
+ */
+ VmaPoolCreateFlags flags;
+ /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
+
+ Specify nonzero to set explicit, constant size of memory blocks used by this
+ pool.
+
+ Leave 0 to use default and let the library manage block sizes automatically.
+ Sizes of particular blocks may vary.
+ In this case, the pool will also support dedicated allocations.
+ */
+ VkDeviceSize blockSize;
+ /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
+
+ Set to 0 to have no preallocated blocks and allow the pool be completely empty.
+ */
+ size_t minBlockCount;
+ /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
+
+ Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
+
+ Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
+ throughout whole lifetime of this pool.
+ */
+ size_t maxBlockCount;
+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
+
+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
+ Otherwise, this variable is ignored.
+ */
+ float priority;
+ /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
+
+ Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
+ It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
+ e.g. when doing interop with OpenGL.
+ */
+ VkDeviceSize minAllocationAlignment;
+ /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
+
+ Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
+ It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
+ Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
+
+ Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
+ can be attached automatically by this library when using other, more convenient of its features.
+ */
+ void* VMA_NULLABLE pMemoryAllocateNext;
+} VmaPoolCreateInfo;
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
+typedef struct VmaAllocationInfo
+{
+ /** \brief Memory type index that this allocation was allocated from.
+
+ It never changes.
+ */
+ uint32_t memoryType;
+ /** \brief Handle to Vulkan memory object.
+
+ Same memory object can be shared by multiple allocations.
+
+ It can change after the allocation is moved during \ref defragmentation.
+ */
+ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
+ /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
+
+ You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
+ vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
+ not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
+ and apply this offset automatically.
+
+ It can change after the allocation is moved during \ref defragmentation.
+ */
+ VkDeviceSize offset;
+ /** \brief Size of this allocation, in bytes.
+
+ It never changes.
+
+ \note Allocation size returned in this variable may be greater than the size
+ requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
+ allocation is accessible for operations on memory e.g. using a pointer after
+ mapping with vmaMapMemory(), but operations on the resource e.g. using
+ `vkCmdCopyBuffer` must be limited to the size of the resource.
+ */
+ VkDeviceSize size;
+ /** \brief Pointer to the beginning of this allocation as mapped data.
+
+ If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
+ created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
+
+ It can change after call to vmaMapMemory(), vmaUnmapMemory().
+ It can also change after the allocation is moved during \ref defragmentation.
+ */
+ void* VMA_NULLABLE pMappedData;
+ /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
+
+ It can change after call to vmaSetAllocationUserData() for this allocation.
+ */
+ void* VMA_NULLABLE pUserData;
+ /** \brief Custom allocation name that was set with vmaSetAllocationName().
+
+ It can change after call to vmaSetAllocationName() for this allocation.
+
+ Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
+ additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
+ */
+ const char* VMA_NULLABLE pName;
+} VmaAllocationInfo;
+
+/** \brief Parameters for defragmentation.
+
+To be used with function vmaBeginDefragmentation().
+*/
+typedef struct VmaDefragmentationInfo
+{
+ /// \brief Use combination of #VmaDefragmentationFlagBits.
+ VmaDefragmentationFlags flags;
+ /** \brief Custom pool to be defragmented.
+
+ If null then default pools will undergo defragmentation process.
+ */
+ VmaPool VMA_NULLABLE pool;
+ /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
+
+ `0` means no limit.
+ */
+ VkDeviceSize maxBytesPerPass;
+ /** \brief Maximum number of allocations that can be moved during single pass to a different place.
+
+ `0` means no limit.
+ */
+ uint32_t maxAllocationsPerPass;
+} VmaDefragmentationInfo;
+
+/// Single move of an allocation to be done for defragmentation.
+typedef struct VmaDefragmentationMove
+{
+ /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
+ VmaDefragmentationMoveOperation operation;
+ /// Allocation that should be moved.
+ VmaAllocation VMA_NOT_NULL srcAllocation;
+ /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
+
+ \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
+ to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
+ vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
+ */
+ VmaAllocation VMA_NOT_NULL dstTmpAllocation;
+} VmaDefragmentationMove;
+
+/** \brief Parameters for incremental defragmentation steps.
+
+To be used with function vmaBeginDefragmentationPass().
+*/
+typedef struct VmaDefragmentationPassMoveInfo
+{
+ /// Number of elements in the `pMoves` array.
+ uint32_t moveCount;
+ /** \brief Array of moves to be performed by the user in the current defragmentation pass.
+
+ Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
+
+ For each element, you should:
+
+ 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
+ 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
+ 3. Make sure these commands finished executing on the GPU.
+ 4. Destroy the old buffer/image.
+
+ Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
+ After this call, the allocation will point to the new place in memory.
+
+ Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
+
+ Alternatively, if you decide you want to completely remove the allocation:
+
+ 1. Destroy its buffer/image.
+ 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
+
+ Then, after vmaEndDefragmentationPass() the allocation will be freed.
+ */
+ VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
+} VmaDefragmentationPassMoveInfo;
+
+/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
+typedef struct VmaDefragmentationStats
+{
+ /// Total number of bytes that have been copied while moving allocations to different places.
+ VkDeviceSize bytesMoved;
+ /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
+ VkDeviceSize bytesFreed;
+ /// Number of allocations that have been moved to different places.
+ uint32_t allocationsMoved;
+ /// Number of empty `VkDeviceMemory` objects that have been released to the system.
+ uint32_t deviceMemoryBlocksFreed;
+} VmaDefragmentationStats;
+
+/** @} */
+
+/**
+\addtogroup group_virtual
+@{
+*/
+
+/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
+typedef struct VmaVirtualBlockCreateInfo
+{
+ /** \brief Total size of the virtual block.
+
+ Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
+ For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
+ */
+ VkDeviceSize size;
+
+ /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
+ */
+ VmaVirtualBlockCreateFlags flags;
+
+ /** \brief Custom CPU memory allocation callbacks. Optional.
+
+ Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
+ */
+ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
+} VmaVirtualBlockCreateInfo;
+
+/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
+typedef struct VmaVirtualAllocationCreateInfo
+{
+ /** \brief Size of the allocation.
+
+ Cannot be zero.
+ */
+ VkDeviceSize size;
+ /** \brief Required alignment of the allocation. Optional.
+
+ Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
+ */
+ VkDeviceSize alignment;
+ /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
+ */
+ VmaVirtualAllocationCreateFlags flags;
+ /** \brief Custom pointer to be associated with the allocation. Optional.
+
+ It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
+ */
+ void* VMA_NULLABLE pUserData;
+} VmaVirtualAllocationCreateInfo;
+
+/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
+typedef struct VmaVirtualAllocationInfo
+{
+ /** \brief Offset of the allocation.
+
+ Offset at which the allocation was made.
+ */
+ VkDeviceSize offset;
+ /** \brief Size of the allocation.
+
+ Same value as passed in VmaVirtualAllocationCreateInfo::size.
+ */
+ VkDeviceSize size;
+ /** \brief Custom pointer associated with the allocation.
+
+ Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
+ */
+ void* VMA_NULLABLE pUserData;
+} VmaVirtualAllocationInfo;
+
+/** @} */
+
+#endif // _VMA_DATA_TYPES_DECLARATIONS
+
+#ifndef _VMA_FUNCTION_HEADERS
+
+/**
+\addtogroup group_init
+@{
+*/
+
+/// Creates #VmaAllocator object.
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
+ const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
+
+/// Destroys allocator object.
+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
+ VmaAllocator VMA_NULLABLE allocator);
+
+/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
+
+It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
+`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
+
+/**
+PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
+You can access it here, without fetching it again on your own.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
+
+/**
+PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
+You can access it here, without fetching it again on your own.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
+
+/**
+\brief Given Memory Type Index, returns Property Flags of this memory type.
+
+This is just a convenience function. Same information can be obtained using
+vmaGetMemoryProperties().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryTypeIndex,
+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
+
+/** \brief Sets index of the current frame.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t frameIndex);
+
+/** @} */
+
+/**
+\addtogroup group_stats
+@{
+*/
+
+/** \brief Retrieves statistics from current state of the Allocator.
+
+This function is called "calculate" not "get" because it has to traverse all
+internal data structures, so it may be quite slow. Use it for debugging purposes.
+For faster but more brief statistics suitable to be called every frame or every allocation,
+use vmaGetHeapBudgets().
+
+Note that when using allocator from multiple threads, returned information may immediately
+become outdated.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaTotalStatistics* VMA_NOT_NULL pStats);
+
+/** \brief Retrieves information about current memory usage and budget for all memory heaps.
+
+\param allocator
+\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
+
+This function is called "get" not "calculate" because it is very fast, suitable to be called
+every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
+
+Note that when using allocator from multiple threads, returned information may immediately
+become outdated.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/**
+\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
+
+This algorithm tries to find a memory type that:
+
+- Is allowed by memoryTypeBits.
+- Contains all the flags from pAllocationCreateInfo->requiredFlags.
+- Matches intended usage.
+- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
+
+\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
+from this function or any other allocating function probably means that your
+device doesn't support any memory type with requested features for the specific
+type of resource you want to use it for. Please check parameters of your
+resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryTypeBits,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+
+/**
+\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
+
+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+It internally creates a temporary, dummy buffer that never has memory bound.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+
+/**
+\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
+
+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+It internally creates a temporary, dummy image that never has memory bound.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+
+/** \brief Allocates Vulkan device memory and creates #VmaPool object.
+
+\param allocator Allocator object.
+\param pCreateInfo Parameters of pool to create.
+\param[out] pPool Handle to created pool.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
+
+/** \brief Destroys #VmaPool object and frees Vulkan device memory.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NULLABLE pool);
+
+/** @} */
+
+/**
+\addtogroup group_stats
+@{
+*/
+
+/** \brief Retrieves statistics of existing #VmaPool object.
+
+\param allocator Allocator object.
+\param pool Pool object.
+\param[out] pPoolStats Statistics of specified pool.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ VmaStatistics* VMA_NOT_NULL pPoolStats);
+
+/** \brief Retrieves detailed statistics of existing #VmaPool object.
+
+\param allocator Allocator object.
+\param pool Pool object.
+\param[out] pPoolStats Statistics of specified pool.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
+
+/** @} */
+
+/**
+\addtogroup group_alloc
+@{
+*/
+
+/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
+
+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+
+Possible return values:
+
+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
+ `VMA_ASSERT` is also fired in that case.
+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool);
+
+/** \brief Retrieves name of a custom pool.
+
+After the call `ppName` is either null or points to an internally-owned null-terminated string
+containing name of the pool that was previously set. The pointer becomes invalid when the pool is
+destroyed or its name is changed using vmaSetPoolName().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
+
+/** \brief Sets name of a custom pool.
+
+`pName` can be either null or pointer to a null-terminated string with new name for the pool.
+Function makes internal copy of the string, so it can be changed or freed immediately after this call.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaPool VMA_NOT_NULL pool,
+ const char* VMA_NULLABLE pName);
+
+/** \brief General purpose memory allocation.
+
+\param allocator
+\param pVkMemoryRequirements
+\param pCreateInfo
+\param[out] pAllocation Handle to allocated memory.
+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+
+It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
+vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/** \brief General purpose memory allocation for multiple allocation objects at once.
+
+\param allocator Allocator object.
+\param pVkMemoryRequirements Memory requirements for each allocation.
+\param pCreateInfo Creation parameters for each allocation.
+\param allocationCount Number of allocations to make.
+\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
+\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
+
+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+
+Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
+It is just a general purpose allocation function able to make multiple allocations at once.
+It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
+
+All allocations are made using same parameters. All of them are created out of the same memory pool and type.
+If any allocation fails, all allocations already made within this function call are also freed, so that when
+returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
+ size_t allocationCount,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
+ VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
+
+/** \brief Allocates memory suitable for given `VkBuffer`.
+
+\param allocator
+\param buffer
+\param pCreateInfo
+\param[out] pAllocation Handle to allocated memory.
+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
+
+This is a special-purpose function. In most cases you should use vmaCreateBuffer().
+
+You must free the allocation using vmaFreeMemory() when no longer needed.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/** \brief Allocates memory suitable for given `VkImage`.
+
+\param allocator
+\param image
+\param pCreateInfo
+\param[out] pAllocation Handle to allocated memory.
+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
+
+This is a special-purpose function. In most cases you should use vmaCreateImage().
+
+You must free the allocation using vmaFreeMemory() when no longer needed.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
+
+Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaAllocation VMA_NULLABLE allocation);
+
+/** \brief Frees memory and destroys multiple allocations.
+
+Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
+It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
+vmaAllocateMemoryPages() and other functions.
+It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
+
+Allocations in `pAllocations` array can come from any memory pools and types.
+Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
+ VmaAllocator VMA_NOT_NULL allocator,
+ size_t allocationCount,
+ const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
+
+/** \brief Returns current information about specified allocation.
+
+Current paramteres of given allocation are returned in `pAllocationInfo`.
+
+Although this function doesn't lock any mutex, so it should be quite efficient,
+you should avoid calling it too often.
+You can retrieve same VmaAllocationInfo structure while creating your resource, from function
+vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
+(e.g. due to defragmentation).
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
+
+/** \brief Sets pUserData in given allocation to new value.
+
+The value of pointer `pUserData` is copied to allocation's `pUserData`.
+It is opaque, so you can use it however you want - e.g.
+as a pointer, ordinal number or some handle to you own data.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ void* VMA_NULLABLE pUserData);
+
+/** \brief Sets pName in given allocation to new value.
+
+`pName` must be either null, or pointer to a null-terminated string. The function
+makes local copy of the string and sets it as allocation's `pName`. String
+passed as pName doesn't need to be valid for whole lifetime of the allocation -
+you can free it after this call. String previously pointed by allocation's
+`pName` is freed from memory.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ const char* VMA_NULLABLE pName);
+
+/**
+\brief Given an allocation, returns Property Flags of its memory type.
+
+This is just a convenience function. Same information can be obtained using
+vmaGetAllocationInfo() + vmaGetMemoryProperties().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
+
+/** \brief Maps memory represented by given allocation and returns pointer to it.
+
+Maps memory represented by given allocation to make it accessible to CPU code.
+When succeeded, `*ppData` contains pointer to first byte of this memory.
+
+\warning
+If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
+correctly offsetted to the beginning of region assigned to this particular allocation.
+Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
+You should not add VmaAllocationInfo::offset to it!
+
+Mapping is internally reference-counted and synchronized, so despite raw Vulkan
+function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
+multiple times simultaneously, it is safe to call this function on allocations
+assigned to the same memory block. Actual Vulkan memory will be mapped on first
+mapping and unmapped on last unmapping.
+
+If the function succeeded, you must call vmaUnmapMemory() to unmap the
+allocation when mapping is no longer needed or before freeing the allocation, at
+the latest.
+
+It also safe to call this function multiple times on the same allocation. You
+must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
+
+It is also safe to call this function on allocation created with
+#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
+You must still call vmaUnmapMemory() same number of times as you called
+vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
+"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
+
+This function fails when used on allocation made in memory type that is not
+`HOST_VISIBLE`.
+
+This function doesn't automatically flush or invalidate caches.
+If the allocation is made from a memory types that is not `HOST_COHERENT`,
+you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ void* VMA_NULLABLE* VMA_NOT_NULL ppData);
+
+/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
+
+For details, see description of vmaMapMemory().
+
+This function doesn't automatically flush or invalidate caches.
+If the allocation is made from a memory types that is not `HOST_COHERENT`,
+you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation);
+
+/** \brief Flushes memory of given allocation.
+
+Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
+It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
+Unmap operation doesn't do that automatically.
+
+- `offset` must be relative to the beginning of allocation.
+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+- `offset` and `size` don't have to be aligned.
+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+- If `size` is 0, this call is ignored.
+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+ this call is ignored.
+
+Warning! `offset` and `size` are relative to the contents of given `allocation`.
+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+Do not pass allocation's offset as `offset`!!!
+
+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
+
+/** \brief Invalidates memory of given allocation.
+
+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
+It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
+Map operation doesn't do that automatically.
+
+- `offset` must be relative to the beginning of allocation.
+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+- `offset` and `size` don't have to be aligned.
+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+- If `size` is 0, this call is ignored.
+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+ this call is ignored.
+
+Warning! `offset` and `size` are relative to the contents of given `allocation`.
+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+Do not pass allocation's offset as `offset`!!!
+
+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
+it is called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
+
+/** \brief Flushes memory of given set of allocations.
+
+Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+For more information, see documentation of vmaFlushAllocation().
+
+\param allocator
+\param allocationCount
+\param allocations
+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+
+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t allocationCount,
+ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+
+/** \brief Invalidates memory of given set of allocations.
+
+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+For more information, see documentation of vmaInvalidateAllocation().
+
+\param allocator
+\param allocationCount
+\param allocations
+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+
+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
+called, otherwise `VK_SUCCESS`.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t allocationCount,
+ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+
+/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
+
+\param allocator
+\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
+
+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+
+Possible return values:
+
+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
+ `VMA_ASSERT` is also fired in that case.
+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
+ VmaAllocator VMA_NOT_NULL allocator,
+ uint32_t memoryTypeBits);
+
+/** \brief Begins defragmentation process.
+
+\param allocator Allocator object.
+\param pInfo Structure filled with parameters of defragmentation.
+\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
+\returns
+- `VK_SUCCESS` if defragmentation can begin.
+- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
+
+For more information about defragmentation, see documentation chapter:
+[Defragmentation](@ref defragmentation).
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
+ VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
+
+/** \brief Ends defragmentation process.
+
+\param allocator Allocator object.
+\param context Context object that has been created by vmaBeginDefragmentation().
+\param[out] pStats Optional stats for the defragmentation. Can be null.
+
+Use this function to finish defragmentation started by vmaBeginDefragmentation().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NOT_NULL context,
+ VmaDefragmentationStats* VMA_NULLABLE pStats);
+
+/** \brief Starts single defragmentation pass.
+
+\param allocator Allocator object.
+\param context Context object that has been created by vmaBeginDefragmentation().
+\param[out] pPassInfo Computed informations for current pass.
+\returns
+- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
+- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
+ and then preferably try another pass with vmaBeginDefragmentationPass().
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NOT_NULL context,
+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
+
+/** \brief Ends single defragmentation pass.
+
+\param allocator Allocator object.
+\param context Context object that has been created by vmaBeginDefragmentation().
+\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
+
+Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
+
+Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
+After this call:
+
+- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
+ (which is the default) will be pointing to the new destination place.
+- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
+ will be freed.
+
+If no more moves are possible you can end whole defragmentation.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaDefragmentationContext VMA_NOT_NULL context,
+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
+
+/** \brief Binds buffer to allocation.
+
+Binds specified buffer to region of memory represented by specified allocation.
+Gets `VkDeviceMemory` handle and offset from the allocation.
+If you want to create a buffer, allocate memory for it and bind them together separately,
+you should use this function for binding instead of standard `vkBindBufferMemory()`,
+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+(which is illegal in Vulkan).
+
+It is recommended to use function vmaCreateBuffer() instead of this one.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
+
+/** \brief Binds buffer to allocation with additional parameters.
+
+\param allocator
+\param allocation
+\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
+\param buffer
+\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
+
+This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
+
+If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize allocationLocalOffset,
+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+ const void* VMA_NULLABLE pNext);
+
+/** \brief Binds image to allocation.
+
+Binds specified image to region of memory represented by specified allocation.
+Gets `VkDeviceMemory` handle and offset from the allocation.
+If you want to create an image, allocate memory for it and bind them together separately,
+you should use this function for binding instead of standard `vkBindImageMemory()`,
+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+(which is illegal in Vulkan).
+
+It is recommended to use function vmaCreateImage() instead of this one.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
+
+/** \brief Binds image to allocation with additional parameters.
+
+\param allocator
+\param allocation
+\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
+\param image
+\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
+
+This function is similar to vmaBindImageMemory(), but it provides additional parameters.
+
+If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ VkDeviceSize allocationLocalOffset,
+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+ const void* VMA_NULLABLE pNext);
+
+/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
+
+\param allocator
+\param pBufferCreateInfo
+\param pAllocationCreateInfo
+\param[out] pBuffer Buffer that was created.
+\param[out] pAllocation Allocation that was created.
+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+
+This function automatically:
+
+-# Creates buffer.
+-# Allocates appropriate memory for it.
+-# Binds the buffer with the memory.
+
+If any of these operations fail, buffer and allocation are not created,
+returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
+
+If the function succeeded, you must destroy both buffer and allocation when you
+no longer need them using either convenience function vmaDestroyBuffer() or
+separately, using `vkDestroyBuffer()` and vmaFreeMemory().
+
+If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
+VK_KHR_dedicated_allocation extension is used internally to query driver whether
+it requires or prefers the new buffer to have dedicated allocation. If yes,
+and if dedicated allocation is possible
+(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
+allocation for this buffer, just like when using
+#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+
+\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
+although recommended as a good practice, is out of scope of this library and could be implemented
+by the user as a higher-level logic on top of VMA.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/** \brief Creates a buffer with additional minimum alignment.
+
+Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
+minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
+for interop with OpenGL.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ VkDeviceSize minAlignment,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/** \brief Creates a new `VkBuffer`, binds already created memory for it.
+
+\param allocator
+\param allocation Allocation that provides memory to be used for binding new buffer to it.
+\param pBufferCreateInfo
+\param[out] pBuffer Buffer that was created.
+
+This function automatically:
+
+-# Creates buffer.
+-# Binds the buffer with the supplied memory.
+
+If any of these operations fail, buffer is not created,
+returned value is negative error code and `*pBuffer` is null.
+
+If the function succeeded, you must destroy the buffer when you
+no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
+allocation you can use convenience function vmaDestroyBuffer().
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
+
+/** \brief Destroys Vulkan buffer and frees allocated memory.
+
+This is just a convenience function equivalent to:
+
+\code
+vkDestroyBuffer(device, buffer, allocationCallbacks);
+vmaFreeMemory(allocator, allocation);
+\endcode
+
+It it safe to pass null as buffer and/or allocation.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
+ VmaAllocation VMA_NULLABLE allocation);
+
+/// Function similar to vmaCreateBuffer().
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
+ VmaAllocator VMA_NOT_NULL allocator,
+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+
+/// Function similar to vmaCreateAliasingBuffer().
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VmaAllocation VMA_NOT_NULL allocation,
+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
+
+/** \brief Destroys Vulkan image and frees allocated memory.
+
+This is just a convenience function equivalent to:
+
+\code
+vkDestroyImage(device, image, allocationCallbacks);
+vmaFreeMemory(allocator, allocation);
+\endcode
+
+It it safe to pass null as image and/or allocation.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
+ VmaAllocator VMA_NOT_NULL allocator,
+ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
+ VmaAllocation VMA_NULLABLE allocation);
+
+/** @} */
+
+/**
+\addtogroup group_virtual
+@{
+*/
+
+/** \brief Creates new #VmaVirtualBlock object.
+
+\param pCreateInfo Parameters for creation.
+\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
+ const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
+
+/** \brief Destroys #VmaVirtualBlock object.
+
+Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
+You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
+if you are sure this is what you want. If you do neither, an assert is called.
+
+If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
+don't forget to free them.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
+ VmaVirtualBlock VMA_NULLABLE virtualBlock);
+
+/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
+*/
+VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
+
+/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
+
+/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
+
+If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
+(despite the function doesn't ever allocate actual GPU memory).
+`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
+
+\param virtualBlock Virtual block
+\param pCreateInfo Parameters for the allocation
+\param[out] pAllocation Returned handle of the new allocation
+\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
+*/
+VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
+ VkDeviceSize* VMA_NULLABLE pOffset);
+
+/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
+
+It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
+
+/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
+
+You must either call this function or free each virtual allocation individually with vmaVirtualFree()
+before destroying a virtual block. Otherwise, an assert is called.
+
+If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
+don't forget to free it as well.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
+
+/** \brief Changes custom pointer associated with given virtual allocation.
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
+ void* VMA_NULLABLE pUserData);
+
+/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
+
+This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ VmaStatistics* VMA_NOT_NULL pStats);
+
+/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
+
+This function is slow to call. Use for debugging purposes.
+For less detailed statistics, see vmaGetVirtualBlockStatistics().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ VmaDetailedStatistics* VMA_NOT_NULL pStats);
+
+/** @} */
+
+#if VMA_STATS_STRING_ENABLED
+/**
+\addtogroup group_stats
+@{
+*/
+
+/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
+\param virtualBlock Virtual block.
+\param[out] ppStatsString Returned string.
+\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
+
+Returned string must be freed using vmaFreeVirtualBlockStatsString().
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
+ VkBool32 detailedMap);
+
+/// Frees a string returned by vmaBuildVirtualBlockStatsString().
+VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+ char* VMA_NULLABLE pStatsString);
+
+/** \brief Builds and returns statistics as a null-terminated string in JSON format.
+\param allocator
+\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
+\param detailedMap
+*/
+VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
+ VmaAllocator VMA_NOT_NULL allocator,
+ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
+ VkBool32 detailedMap);
+
+VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
+ VmaAllocator VMA_NOT_NULL allocator,
+ char* VMA_NULLABLE pStatsString);
+
+/** @} */
+
+#endif // VMA_STATS_STRING_ENABLED
+
+#endif // _VMA_FUNCTION_HEADERS
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+//
+// IMPLEMENTATION
+//
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+
+// For Visual Studio IntelliSense.
+#if defined(__cplusplus) && defined(__INTELLISENSE__)
+#define VMA_IMPLEMENTATION
+#endif
+
+#ifdef VMA_IMPLEMENTATION
+#undef VMA_IMPLEMENTATION
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef _MSC_VER
+ #include // For functions like __popcnt, _BitScanForward etc.
+#endif
+#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
+ #include // For std::popcount
+#endif
+
+/*******************************************************************************
+CONFIGURATION SECTION
+
+Define some of these macros before each #include of this header or change them
+here if you need other then default behavior depending on your environment.
+*/
+#ifndef _VMA_CONFIGURATION
+
+/*
+Define this macro to 1 to make the library fetch pointers to Vulkan functions
+internally, like:
+
+ vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
+*/
+#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
+ #define VMA_STATIC_VULKAN_FUNCTIONS 1
+#endif
+
+/*
+Define this macro to 1 to make the library fetch pointers to Vulkan functions
+internally, like:
+
+ vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
+
+To use this feature in new versions of VMA you now have to pass
+VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
+VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
+*/
+#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
+ #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
+#endif
+
+#ifndef VMA_USE_STL_SHARED_MUTEX
+ // Compiler conforms to C++17.
+ #if __cplusplus >= 201703L
+ #define VMA_USE_STL_SHARED_MUTEX 1
+ // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
+ // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
+ #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
+ #define VMA_USE_STL_SHARED_MUTEX 1
+ #else
+ #define VMA_USE_STL_SHARED_MUTEX 0
+ #endif
+#endif
+
+/*
+Define this macro to include custom header files without having to edit this file directly, e.g.:
+
+ // Inside of "my_vma_configuration_user_includes.h":
+
+ #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
+ #include "my_custom_min.h" // for my_custom_min
+ #include
+ #include
+
+ // Inside a different file, which includes "vk_mem_alloc.h":
+
+ #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
+ #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
+ #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
+ #include "vk_mem_alloc.h"
+ ...
+
+The following headers are used in this CONFIGURATION section only, so feel free to
+remove them if not needed.
+*/
+#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
+ #include // for assert
+ #include // for min, max
+ #include
+#else
+ #include VMA_CONFIGURATION_USER_INCLUDES_H
+#endif
+
+#ifndef VMA_NULL
+ // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
+ #define VMA_NULL nullptr
+#endif
+
+#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
+#include
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ // alignment must be >= sizeof(void*)
+ if(alignment < sizeof(void*))
+ {
+ alignment = sizeof(void*);
+ }
+
+ return memalign(alignment, size);
+}
+#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
+#include
+
+#if defined(__APPLE__)
+#include
+#endif
+
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
+ // Therefore, for now disable this specific exception until a proper solution is found.
+ //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
+ //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
+ // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
+ // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
+ // // MAC_OS_X_VERSION_10_16), even though the function is marked
+ // // availabe for 10.15. That is why the preprocessor checks for 10.16 but
+ // // the __builtin_available checks for 10.15.
+ // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
+ // if (__builtin_available(macOS 10.15, iOS 13, *))
+ // return aligned_alloc(alignment, size);
+ //#endif
+ //#endif
+
+ // alignment must be >= sizeof(void*)
+ if(alignment < sizeof(void*))
+ {
+ alignment = sizeof(void*);
+ }
+
+ void *pointer;
+ if(posix_memalign(&pointer, alignment, size) == 0)
+ return pointer;
+ return VMA_NULL;
+}
+#elif defined(_WIN32)
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ return _aligned_malloc(size, alignment);
+}
+#else
+static void* vma_aligned_alloc(size_t alignment, size_t size)
+{
+ return aligned_alloc(alignment, size);
+}
+#endif
+
+#if defined(_WIN32)
+static void vma_aligned_free(void* ptr)
+{
+ _aligned_free(ptr);
+}
+#else
+static void vma_aligned_free(void* VMA_NULLABLE ptr)
+{
+ free(ptr);
+}
+#endif
+
+// If your compiler is not compatible with C++11 and definition of
+// aligned_alloc() function is missing, uncommeting following line may help:
+
+//#include
+
+// Normal assert to check for programmer's errors, especially in Debug configuration.
+#ifndef VMA_ASSERT
+ #ifdef NDEBUG
+ #define VMA_ASSERT(expr)
+ #else
+ #define VMA_ASSERT(expr) assert(expr)
+ #endif
+#endif
+
+// Assert that will be called very often, like inside data structures e.g. operator[].
+// Making it non-empty can make program slow.
+#ifndef VMA_HEAVY_ASSERT
+ #ifdef NDEBUG
+ #define VMA_HEAVY_ASSERT(expr)
+ #else
+ #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
+ #endif
+#endif
+
+#ifndef VMA_ALIGN_OF
+ #define VMA_ALIGN_OF(type) (__alignof(type))
+#endif
+
+#ifndef VMA_SYSTEM_ALIGNED_MALLOC
+ #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
+#endif
+
+#ifndef VMA_SYSTEM_ALIGNED_FREE
+ // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
+ #if defined(VMA_SYSTEM_FREE)
+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
+ #else
+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
+ #endif
+#endif
+
+#ifndef VMA_COUNT_BITS_SET
+ // Returns number of bits set to 1 in (v)
+ #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
+#endif
+
+#ifndef VMA_BITSCAN_LSB
+ // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
+ #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
+#endif
+
+#ifndef VMA_BITSCAN_MSB
+ // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
+ #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
+#endif
+
+#ifndef VMA_MIN
+ #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
+#endif
+
+#ifndef VMA_MAX
+ #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
+#endif
+
+#ifndef VMA_SWAP
+ #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
+#endif
+
+#ifndef VMA_SORT
+ #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
+#endif
+
+#ifndef VMA_DEBUG_LOG
+ #define VMA_DEBUG_LOG(format, ...)
+ /*
+ #define VMA_DEBUG_LOG(format, ...) do { \
+ printf(format, __VA_ARGS__); \
+ printf("\n"); \
+ } while(false)
+ */
+#endif
+
+// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
+#if VMA_STATS_STRING_ENABLED
+ static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
+ {
+ snprintf(outStr, strLen, "%u", static_cast(num));
+ }
+ static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
+ {
+ snprintf(outStr, strLen, "%llu", static_cast(num));
+ }
+ static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
+ {
+ snprintf(outStr, strLen, "%p", ptr);
+ }
+#endif
+
+#ifndef VMA_MUTEX
+ class VmaMutex
+ {
+ public:
+ void Lock() { m_Mutex.lock(); }
+ void Unlock() { m_Mutex.unlock(); }
+ bool TryLock() { return m_Mutex.try_lock(); }
+ private:
+ std::mutex m_Mutex;
+ };
+ #define VMA_MUTEX VmaMutex
+#endif
+
+// Read-write mutex, where "read" is shared access, "write" is exclusive access.
+#ifndef VMA_RW_MUTEX
+ #if VMA_USE_STL_SHARED_MUTEX
+ // Use std::shared_mutex from C++17.
+ #include
+ class VmaRWMutex
+ {
+ public:
+ void LockRead() { m_Mutex.lock_shared(); }
+ void UnlockRead() { m_Mutex.unlock_shared(); }
+ bool TryLockRead() { return m_Mutex.try_lock_shared(); }
+ void LockWrite() { m_Mutex.lock(); }
+ void UnlockWrite() { m_Mutex.unlock(); }
+ bool TryLockWrite() { return m_Mutex.try_lock(); }
+ private:
+ std::shared_mutex m_Mutex;
+ };
+ #define VMA_RW_MUTEX VmaRWMutex
+ #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
+ // Use SRWLOCK from WinAPI.
+ // Minimum supported client = Windows Vista, server = Windows Server 2008.
+ class VmaRWMutex
+ {
+ public:
+ VmaRWMutex() { InitializeSRWLock(&m_Lock); }
+ void LockRead() { AcquireSRWLockShared(&m_Lock); }
+ void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+ bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
+ void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
+ void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+ bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
+ private:
+ SRWLOCK m_Lock;
+ };
+ #define VMA_RW_MUTEX VmaRWMutex
+ #else
+ // Less efficient fallback: Use normal mutex.
+ class VmaRWMutex
+ {
+ public:
+ void LockRead() { m_Mutex.Lock(); }
+ void UnlockRead() { m_Mutex.Unlock(); }
+ bool TryLockRead() { return m_Mutex.TryLock(); }
+ void LockWrite() { m_Mutex.Lock(); }
+ void UnlockWrite() { m_Mutex.Unlock(); }
+ bool TryLockWrite() { return m_Mutex.TryLock(); }
+ private:
+ VMA_MUTEX m_Mutex;
+ };
+ #define VMA_RW_MUTEX VmaRWMutex
+ #endif // #if VMA_USE_STL_SHARED_MUTEX
+#endif // #ifndef VMA_RW_MUTEX
+
+/*
+If providing your own implementation, you need to implement a subset of std::atomic.
+*/
+#ifndef VMA_ATOMIC_UINT32
+ #include
+ #define VMA_ATOMIC_UINT32 std::atomic
+#endif
+
+#ifndef VMA_ATOMIC_UINT64
+ #include
+ #define VMA_ATOMIC_UINT64 std::atomic
+#endif
+
+#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
+ /**
+ Every allocation will have its own memory block.
+ Define to 1 for debugging purposes only.
+ */
+ #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
+#endif
+
+#ifndef VMA_MIN_ALIGNMENT
+ /**
+ Minimum alignment of all allocations, in bytes.
+ Set to more than 1 for debugging purposes. Must be power of two.
+ */
+ #ifdef VMA_DEBUG_ALIGNMENT // Old name
+ #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
+ #else
+ #define VMA_MIN_ALIGNMENT (1)
+ #endif
+#endif
+
+#ifndef VMA_DEBUG_MARGIN
+ /**
+ Minimum margin after every allocation, in bytes.
+ Set nonzero for debugging purposes only.
+ */
+ #define VMA_DEBUG_MARGIN (0)
+#endif
+
+#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
+ /**
+ Define this macro to 1 to automatically fill new allocations and destroyed
+ allocations with some bit pattern.
+ */
+ #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
+#endif
+
+#ifndef VMA_DEBUG_DETECT_CORRUPTION
+ /**
+ Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
+ enable writing magic value to the margin after every allocation and
+ validating it, so that memory corruptions (out-of-bounds writes) are detected.
+ */
+ #define VMA_DEBUG_DETECT_CORRUPTION (0)
+#endif
+
+#ifndef VMA_DEBUG_GLOBAL_MUTEX
+ /**
+ Set this to 1 for debugging purposes only, to enable single mutex protecting all
+ entry calls to the library. Can be useful for debugging multithreading issues.
+ */
+ #define VMA_DEBUG_GLOBAL_MUTEX (0)
+#endif
+
+#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
+ /**
+ Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
+ Set to more than 1 for debugging purposes only. Must be power of two.
+ */
+ #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
+#endif
+
+#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+ /*
+ Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
+ and return error instead of leaving up to Vulkan implementation what to do in such cases.
+ */
+ #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
+#endif
+
+#ifndef VMA_SMALL_HEAP_MAX_SIZE
+ /// Maximum size of a memory heap in Vulkan to consider it "small".
+ #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
+#endif
+
+#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
+ /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
+ #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
+#endif
+
+/*
+Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
+or a persistently mapped allocation is created and destroyed several times in a row.
+It keeps additional +1 mapping of a device memory block to prevent calling actual
+vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
+tools like RenderDOc.
+*/
+#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
+ #define VMA_MAPPING_HYSTERESIS_ENABLED 1
+#endif
+
+#ifndef VMA_CLASS_NO_COPY
+ #define VMA_CLASS_NO_COPY(className) \
+ private: \
+ className(const className&) = delete; \
+ className& operator=(const className&) = delete;
+#endif
+
+#define VMA_VALIDATE(cond) do { if(!(cond)) { \
+ VMA_ASSERT(0 && "Validation failed: " #cond); \
+ return false; \
+ } } while(false)
+
+/*******************************************************************************
+END OF CONFIGURATION
+*/
+#endif // _VMA_CONFIGURATION
+
+
+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
+static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+
+// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
+static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
+static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
+static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+
+// This one is tricky. Vulkan specification defines this code as available since
+// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
+// See pull request #207.
+#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
+
+
+#if VMA_STATS_STRING_ENABLED
+// Correspond to values of enum VmaSuballocationType.
+static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
+{
+ "FREE",
+ "UNKNOWN",
+ "BUFFER",
+ "IMAGE_UNKNOWN",
+ "IMAGE_LINEAR",
+ "IMAGE_OPTIMAL",
+};
+#endif
+
+static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
+ { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
+
+
+#ifndef _VMA_ENUM_DECLARATIONS
+
+enum VmaSuballocationType
+{
+ VMA_SUBALLOCATION_TYPE_FREE = 0,
+ VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
+ VMA_SUBALLOCATION_TYPE_BUFFER = 2,
+ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
+ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
+ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
+ VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+};
+
+enum VMA_CACHE_OPERATION
+{
+ VMA_CACHE_FLUSH,
+ VMA_CACHE_INVALIDATE
+};
+
+enum class VmaAllocationRequestType
+{
+ Normal,
+ TLSF,
+ // Used by "Linear" algorithm.
+ UpperAddress,
+ EndOf1st,
+ EndOf2nd,
+};
+
+#endif // _VMA_ENUM_DECLARATIONS
+
+#ifndef _VMA_FORWARD_DECLARATIONS
+// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
+
+struct VmaMutexLock;
+struct VmaMutexLockRead;
+struct VmaMutexLockWrite;
+
+template
+struct AtomicTransactionalIncrement;
+
+template
+struct VmaStlAllocator;
+
+template
+class VmaVector;
+
+template
+class VmaSmallVector;
+
+template
+class VmaPoolAllocator;
+
+template
+struct VmaListItem;
+
+template
+class VmaRawList;
+
+template
+class VmaList;
+
+template
+class VmaIntrusiveLinkedList;
+
+// Unused in this version
+#if 0
+template
+struct VmaPair;
+template
+struct VmaPairFirstLess;
+
+template
+class VmaMap;
+#endif
+
+#if VMA_STATS_STRING_ENABLED
+class VmaStringBuilder;
+class VmaJsonWriter;
+#endif
+
+class VmaDeviceMemoryBlock;
+
+struct VmaDedicatedAllocationListItemTraits;
+class VmaDedicatedAllocationList;
+
+struct VmaSuballocation;
+struct VmaSuballocationOffsetLess;
+struct VmaSuballocationOffsetGreater;
+struct VmaSuballocationItemSizeLess;
+
+typedef VmaList> VmaSuballocationList;
+
+struct VmaAllocationRequest;
+
+class VmaBlockMetadata;
+class VmaBlockMetadata_Linear;
+class VmaBlockMetadata_TLSF;
+
+class VmaBlockVector;
+
+struct VmaPoolListItemTraits;
+
+struct VmaCurrentBudgetData;
+
+class VmaAllocationObjectAllocator;
+
+#endif // _VMA_FORWARD_DECLARATIONS
+
+
+#ifndef _VMA_FUNCTIONS
+
+/*
+Returns number of bits set to 1 in (v).
+
+On specific platforms and compilers you can use instrinsics like:
+
+Visual Studio:
+ return __popcnt(v);
+GCC, Clang:
+ return static_cast(__builtin_popcount(v));
+
+Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
+But you need to check in runtime whether user's CPU supports these, as some old processors don't.
+*/
+static inline uint32_t VmaCountBitsSet(uint32_t v)
+{
+#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
+ return std::popcount(v);
+#else
+ uint32_t c = v - ((v >> 1) & 0x55555555);
+ c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+ c = ((c >> 4) + c) & 0x0F0F0F0F;
+ c = ((c >> 8) + c) & 0x00FF00FF;
+ c = ((c >> 16) + c) & 0x0000FFFF;
+ return c;
+#endif
+}
+
+static inline uint8_t VmaBitScanLSB(uint64_t mask)
+{
+#if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long pos;
+ if (_BitScanForward64(&pos, mask))
+ return static_cast(pos);
+ return UINT8_MAX;
+#elif defined __GNUC__ || defined __clang__
+ return static_cast(__builtin_ffsll(mask)) - 1U;
+#else
+ uint8_t pos = 0;
+ uint64_t bit = 1;
+ do
+ {
+ if (mask & bit)
+ return pos;
+ bit <<= 1;
+ } while (pos++ < 63);
+ return UINT8_MAX;
+#endif
+}
+
+static inline uint8_t VmaBitScanLSB(uint32_t mask)
+{
+#ifdef _MSC_VER
+ unsigned long pos;
+ if (_BitScanForward(&pos, mask))
+ return static_cast(pos);
+ return UINT8_MAX;
+#elif defined __GNUC__ || defined __clang__
+ return static_cast(__builtin_ffs(mask)) - 1U;
+#else
+ uint8_t pos = 0;
+ uint32_t bit = 1;
+ do
+ {
+ if (mask & bit)
+ return pos;
+ bit <<= 1;
+ } while (pos++ < 31);
+ return UINT8_MAX;
+#endif
+}
+
+static inline uint8_t VmaBitScanMSB(uint64_t mask)
+{
+#if defined(_MSC_VER) && defined(_WIN64)
+ unsigned long pos;
+ if (_BitScanReverse64(&pos, mask))
+ return static_cast(pos);
+#elif defined __GNUC__ || defined __clang__
+ if (mask)
+ return 63 - static_cast(__builtin_clzll(mask));
+#else
+ uint8_t pos = 63;
+ uint64_t bit = 1ULL << 63;
+ do
+ {
+ if (mask & bit)
+ return pos;
+ bit >>= 1;
+ } while (pos-- > 0);
+#endif
+ return UINT8_MAX;
+}
+
+static inline uint8_t VmaBitScanMSB(uint32_t mask)
+{
+#ifdef _MSC_VER
+ unsigned long pos;
+ if (_BitScanReverse(&pos, mask))
+ return static_cast(pos);
+#elif defined __GNUC__ || defined __clang__
+ if (mask)
+ return 31 - static_cast(__builtin_clz(mask));
+#else
+ uint8_t pos = 31;
+ uint32_t bit = 1UL << 31;
+ do
+ {
+ if (mask & bit)
+ return pos;
+ bit >>= 1;
+ } while (pos-- > 0);
+#endif
+ return UINT8_MAX;
+}
+
+/*
+Returns true if given number is a power of two.
+T must be unsigned integer number or signed integer but always nonnegative.
+For 0 returns true.
+*/
+template
+inline bool VmaIsPow2(T x)
+{
+ return (x & (x - 1)) == 0;
+}
+
+// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
+// Use types like uint32_t, uint64_t as T.
+template
+static inline T VmaAlignUp(T val, T alignment)
+{
+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ return (val + alignment - 1) & ~(alignment - 1);
+}
+
+// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
+// Use types like uint32_t, uint64_t as T.
+template
+static inline T VmaAlignDown(T val, T alignment)
+{
+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+ return val & ~(alignment - 1);
+}
+
+// Division with mathematical rounding to nearest number.
+template
+static inline T VmaRoundDiv(T x, T y)
+{
+ return (x + (y / (T)2)) / y;
+}
+
+// Divide by 'y' and round up to nearest integer.
+template
+static inline T VmaDivideRoundingUp(T x, T y)
+{
+ return (x + y - (T)1) / y;
+}
+
+// Returns smallest power of 2 greater or equal to v.
+static inline uint32_t VmaNextPow2(uint32_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+
+static inline uint64_t VmaNextPow2(uint64_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ v++;
+ return v;
+}
+
+// Returns largest power of 2 less or equal to v.
+static inline uint32_t VmaPrevPow2(uint32_t v)
+{
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v = v ^ (v >> 1);
+ return v;
+}
+
+static inline uint64_t VmaPrevPow2(uint64_t v)
+{
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ v = v ^ (v >> 1);
+ return v;
+}
+
+static inline bool VmaStrIsEmpty(const char* pStr)
+{
+ return pStr == VMA_NULL || *pStr == '\0';
+}
+
+#ifndef VMA_SORT
+template
+Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
+{
+ Iterator centerValue = end; --centerValue;
+ Iterator insertIndex = beg;
+ for (Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
+ {
+ if (cmp(*memTypeIndex, *centerValue))
+ {
+ if (insertIndex != memTypeIndex)
+ {
+ VMA_SWAP(*memTypeIndex, *insertIndex);
+ }
+ ++insertIndex;
+ }
+ }
+ if (insertIndex != centerValue)
+ {
+ VMA_SWAP(*insertIndex, *centerValue);
+ }
+ return insertIndex;
+}
+
+template
+void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
+{
+ if (beg < end)
+ {
+ Iterator it = VmaQuickSortPartition(beg, end, cmp);
+ VmaQuickSort(beg, it, cmp);
+ VmaQuickSort(it + 1, end, cmp);
+ }
+}
+
+#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
+#endif // VMA_SORT
+
+/*
+Returns true if two memory blocks occupy overlapping pages.
+ResourceA must be in less memory offset than ResourceB.
+
+Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
+chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
+*/
+static inline bool VmaBlocksOnSamePage(
+ VkDeviceSize resourceAOffset,
+ VkDeviceSize resourceASize,
+ VkDeviceSize resourceBOffset,
+ VkDeviceSize pageSize)
+{
+ VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
+ VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
+ VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
+ VkDeviceSize resourceBStart = resourceBOffset;
+ VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
+ return resourceAEndPage == resourceBStartPage;
+}
+
+/*
+Returns true if given suballocation types could conflict and must respect
+VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
+or linear image and another one is optimal image. If type is unknown, behave
+conservatively.
+*/
+static inline bool VmaIsBufferImageGranularityConflict(
+ VmaSuballocationType suballocType1,
+ VmaSuballocationType suballocType2)
+{
+ if (suballocType1 > suballocType2)
+ {
+ VMA_SWAP(suballocType1, suballocType2);
+ }
+
+ switch (suballocType1)
+ {
+ case VMA_SUBALLOCATION_TYPE_FREE:
+ return false;
+ case VMA_SUBALLOCATION_TYPE_UNKNOWN:
+ return true;
+ case VMA_SUBALLOCATION_TYPE_BUFFER:
+ return
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
+ return
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
+ return
+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+ case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
+ return false;
+ default:
+ VMA_ASSERT(0);
+ return true;
+ }
+}
+
+static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
+{
+#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+ uint32_t* pDst = (uint32_t*)((char*)pData + offset);
+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+ for (size_t i = 0; i < numberCount; ++i, ++pDst)
+ {
+ *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
+ }
+#else
+ // no-op
+#endif
+}
+
+static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
+{
+#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+ const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+ for (size_t i = 0; i < numberCount; ++i, ++pSrc)
+ {
+ if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
+ {
+ return false;
+ }
+ }
+#endif
+ return true;
+}
+
+/*
+Fills structure with parameters of an example buffer to be used for transfers
+during GPU memory defragmentation.
+*/
+static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+{
+ memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
+ outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
+}
+
+
+/*
+Performs binary search and returns iterator to first element that is greater or
+equal to (key), according to comparison (cmp).
+
+Cmp should return true if first argument is less than second argument.
+
+Returned value is the found element, if present in the collection or place where
+new element with value (key) should be inserted.
+*/
+template
+static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
+{
+ size_t down = 0, up = (end - beg);
+ while (down < up)
+ {
+ const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
+ if (cmp(*(beg + mid), key))
+ {
+ down = mid + 1;
+ }
+ else
+ {
+ up = mid;
+ }
+ }
+ return beg + down;
+}
+
+template
+IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
+{
+ IterT it = VmaBinaryFindFirstNotLess(
+ beg, end, value, cmp);
+ if (it == end ||
+ (!cmp(*it, value) && !cmp(value, *it)))
+ {
+ return it;
+ }
+ return end;
+}
+
+/*
+Returns true if all pointers in the array are not-null and unique.
+Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
+T must be pointer type, e.g. VmaAllocation, VmaPool.
+*/
+template
+static bool VmaValidatePointerArray(uint32_t count, const T* arr)
+{
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ const T iPtr = arr[i];
+ if (iPtr == VMA_NULL)
+ {
+ return false;
+ }
+ for (uint32_t j = i + 1; j < count; ++j)
+ {
+ if (iPtr == arr[j])
+ {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+template
+static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+{
+ newStruct->pNext = mainStruct->pNext;
+ mainStruct->pNext = newStruct;
+}
+
+// This is the main algorithm that guides the selection of a memory type best for an allocation -
+// converts usage to required/preferred/not preferred flags.
+static bool FindMemoryPreferences(
+ bool isIntegratedGPU,
+ const VmaAllocationCreateInfo& allocCreateInfo,
+ VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
+ VkMemoryPropertyFlags& outRequiredFlags,
+ VkMemoryPropertyFlags& outPreferredFlags,
+ VkMemoryPropertyFlags& outNotPreferredFlags)
+{
+ outRequiredFlags = allocCreateInfo.requiredFlags;
+ outPreferredFlags = allocCreateInfo.preferredFlags;
+ outNotPreferredFlags = 0;
+
+ switch(allocCreateInfo.usage)
+ {
+ case VMA_MEMORY_USAGE_UNKNOWN:
+ break;
+ case VMA_MEMORY_USAGE_GPU_ONLY:
+ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+ {
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ break;
+ case VMA_MEMORY_USAGE_CPU_ONLY:
+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ break;
+ case VMA_MEMORY_USAGE_CPU_TO_GPU:
+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+ {
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ break;
+ case VMA_MEMORY_USAGE_GPU_TO_CPU:
+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ break;
+ case VMA_MEMORY_USAGE_CPU_COPY:
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ break;
+ case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
+ outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+ break;
+ case VMA_MEMORY_USAGE_AUTO:
+ case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
+ case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
+ {
+ if(bufImgUsage == UINT32_MAX)
+ {
+ VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
+ return false;
+ }
+ // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
+ const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
+ const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
+ const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
+ const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
+ const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
+ const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
+
+ // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
+ if(hostAccessRandom)
+ {
+ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
+ {
+ // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
+ // Omitting HOST_VISIBLE here is intentional.
+ // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
+ // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ }
+ else
+ {
+ // Always CPU memory, cached.
+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+ }
+ }
+ // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
+ else if(hostAccessSequentialWrite)
+ {
+ // Want uncached and write-combined.
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+
+ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
+ {
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ }
+ else
+ {
+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
+ if(deviceAccess)
+ {
+ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
+ if(preferHost)
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ else
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
+ else
+ {
+ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
+ if(preferDevice)
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ else
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ }
+ }
+ // No CPU access
+ else
+ {
+ // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory
+ if(deviceAccess)
+ {
+ // ...unless there is a clear preference from the user not to do so.
+ if(preferHost)
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ else
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ // No direct GPU access, no CPU access, just transfers.
+ // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
+ // a "swap file" copy to free some GPU memory (then better CPU memory).
+ // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
+ if(preferHost)
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ else
+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+ break;
+ }
+ default:
+ VMA_ASSERT(0);
+ }
+
+ // Avoid DEVICE_COHERENT unless explicitly requested.
+ if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
+ (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
+ {
+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Memory allocation
+
+static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
+{
+ void* result = VMA_NULL;
+ if ((pAllocationCallbacks != VMA_NULL) &&
+ (pAllocationCallbacks->pfnAllocation != VMA_NULL))
+ {
+ result = (*pAllocationCallbacks->pfnAllocation)(
+ pAllocationCallbacks->pUserData,
+ size,
+ alignment,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ }
+ else
+ {
+ result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
+ }
+ VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
+ return result;
+}
+
+static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
+{
+ if ((pAllocationCallbacks != VMA_NULL) &&
+ (pAllocationCallbacks->pfnFree != VMA_NULL))
+ {
+ (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+ }
+ else
+ {
+ VMA_SYSTEM_ALIGNED_FREE(ptr);
+ }
+}
+
+template
+static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
+{
+ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
+}
+
+template
+static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
+{
+ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
+}
+
+#define vma_new(allocator, type) new(VmaAllocate(allocator))(type)
+
+#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type)
+
+template
+static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+{
+ ptr->~T();
+ VmaFree(pAllocationCallbacks, ptr);
+}
+
+template
+static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
+{
+ if (ptr != VMA_NULL)
+ {
+ for (size_t i = count; i--; )
+ {
+ ptr[i].~T();
+ }
+ VmaFree(pAllocationCallbacks, ptr);
+ }
+}
+
+static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
+{
+ if (srcStr != VMA_NULL)
+ {
+ const size_t len = strlen(srcStr);
+ char* const result = vma_new_array(allocs, char, len + 1);
+ memcpy(result, srcStr, len + 1);
+ return result;
+ }
+ return VMA_NULL;
+}
+
+#if VMA_STATS_STRING_ENABLED
+static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
+{
+ if (srcStr != VMA_NULL)
+ {
+ char* const result = vma_new_array(allocs, char, strLen + 1);
+ memcpy(result, srcStr, strLen);
+ result[strLen] = '\0';
+ return result;
+ }
+ return VMA_NULL;
+}
+#endif // VMA_STATS_STRING_ENABLED
+
+static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
+{
+ if (str != VMA_NULL)
+ {
+ const size_t len = strlen(str);
+ vma_delete_array(allocs, str, len + 1);
+ }
+}
+
+template
+size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
+{
+ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+ vector.data(),
+ vector.data() + vector.size(),
+ value,
+ CmpLess()) - vector.data();
+ VmaVectorInsert(vector, indexToInsert, value);
+ return indexToInsert;
+}
+
+template
+bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
+{
+ CmpLess comparator;
+ typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
+ vector.begin(),
+ vector.end(),
+ value,
+ comparator);
+ if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
+ {
+ size_t indexToRemove = it - vector.begin();
+ VmaVectorRemove(vector, indexToRemove);
+ return true;
+ }
+ return false;
+}
+#endif // _VMA_FUNCTIONS
+
+#ifndef _VMA_STATISTICS_FUNCTIONS
+
+static void VmaClearStatistics(VmaStatistics& outStats)
+{
+ outStats.blockCount = 0;
+ outStats.allocationCount = 0;
+ outStats.blockBytes = 0;
+ outStats.allocationBytes = 0;
+}
+
+static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
+{
+ inoutStats.blockCount += src.blockCount;
+ inoutStats.allocationCount += src.allocationCount;
+ inoutStats.blockBytes += src.blockBytes;
+ inoutStats.allocationBytes += src.allocationBytes;
+}
+
+static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
+{
+ VmaClearStatistics(outStats.statistics);
+ outStats.unusedRangeCount = 0;
+ outStats.allocationSizeMin = VK_WHOLE_SIZE;
+ outStats.allocationSizeMax = 0;
+ outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
+ outStats.unusedRangeSizeMax = 0;
+}
+
+static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+{
+ inoutStats.statistics.allocationCount++;
+ inoutStats.statistics.allocationBytes += size;
+ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
+ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
+}
+
+static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+{
+ inoutStats.unusedRangeCount++;
+ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
+}
+
+static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
+{
+ VmaAddStatistics(inoutStats.statistics, src.statistics);
+ inoutStats.unusedRangeCount += src.unusedRangeCount;
+ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
+ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
+ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
+}
+
+#endif // _VMA_STATISTICS_FUNCTIONS
+
+#ifndef _VMA_MUTEX_LOCK
+// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
+struct VmaMutexLock
+{
+ VMA_CLASS_NO_COPY(VmaMutexLock)
+public:
+ VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
+ m_pMutex(useMutex ? &mutex : VMA_NULL)
+ {
+ if (m_pMutex) { m_pMutex->Lock(); }
+ }
+ ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
+
+private:
+ VMA_MUTEX* m_pMutex;
+};
+
+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
+struct VmaMutexLockRead
+{
+ VMA_CLASS_NO_COPY(VmaMutexLockRead)
+public:
+ VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
+ m_pMutex(useMutex ? &mutex : VMA_NULL)
+ {
+ if (m_pMutex) { m_pMutex->LockRead(); }
+ }
+ ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
+
+private:
+ VMA_RW_MUTEX* m_pMutex;
+};
+
+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
+struct VmaMutexLockWrite
+{
+ VMA_CLASS_NO_COPY(VmaMutexLockWrite)
+public:
+ VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
+ : m_pMutex(useMutex ? &mutex : VMA_NULL)
+ {
+ if (m_pMutex) { m_pMutex->LockWrite(); }
+ }
+ ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
+
+private:
+ VMA_RW_MUTEX* m_pMutex;
+};
+
+#if VMA_DEBUG_GLOBAL_MUTEX
+ static VMA_MUTEX gDebugGlobalMutex;
+ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
+#else
+ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
+#endif
+#endif // _VMA_MUTEX_LOCK
+
+#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
+// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
+template
+struct AtomicTransactionalIncrement
+{
+public:
+ typedef std::atomic AtomicT;
+
+ ~AtomicTransactionalIncrement()
+ {
+ if(m_Atomic)
+ --(*m_Atomic);
+ }
+
+ void Commit() { m_Atomic = nullptr; }
+ T Increment(AtomicT* atomic)
+ {
+ m_Atomic = atomic;
+ return m_Atomic->fetch_add(1);
+ }
+
+private:
+ AtomicT* m_Atomic = nullptr;
+};
+#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
+
+#ifndef _VMA_STL_ALLOCATOR
+// STL-compatible allocator.
+template
+struct VmaStlAllocator
+{
+ const VkAllocationCallbacks* const m_pCallbacks;
+ typedef T value_type;
+
+ VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
+ template
+ VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {}
+ VmaStlAllocator(const VmaStlAllocator&) = default;
+ VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
+
+ T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); }
+ void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
+
+ template
+ bool operator==(const VmaStlAllocator& rhs) const
+ {
+ return m_pCallbacks == rhs.m_pCallbacks;
+ }
+ template
+ bool operator!=(const VmaStlAllocator& rhs) const
+ {
+ return m_pCallbacks != rhs.m_pCallbacks;
+ }
+};
+#endif // _VMA_STL_ALLOCATOR
+
+#ifndef _VMA_VECTOR
+/* Class with interface compatible with subset of std::vector.
+T must be POD because constructors and destructors are not called and memcpy is
+used for these objects. */
+template
+class VmaVector
+{
+public:
+ typedef T value_type;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ VmaVector(const AllocatorT& allocator);
+ VmaVector(size_t count, const AllocatorT& allocator);
+ // This version of the constructor is here for compatibility with pre-C++14 std::vector.
+ // value is unused.
+ VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
+ VmaVector(const VmaVector& src);
+ VmaVector& operator=(const VmaVector& rhs);
+ ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
+
+ bool empty() const { return m_Count == 0; }
+ size_t size() const { return m_Count; }
+ T* data() { return m_pArray; }
+ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
+ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
+ const T* data() const { return m_pArray; }
+ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
+ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
+
+ iterator begin() { return m_pArray; }
+ iterator end() { return m_pArray + m_Count; }
+ const_iterator cbegin() const { return m_pArray; }
+ const_iterator cend() const { return m_pArray + m_Count; }
+ const_iterator begin() const { return cbegin(); }
+ const_iterator end() const { return cend(); }
+
+ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
+ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
+ void push_front(const T& src) { insert(0, src); }
+
+ void push_back(const T& src);
+ void reserve(size_t newCapacity, bool freeMemory = false);
+ void resize(size_t newCount);
+ void clear() { resize(0); }
+ void shrink_to_fit();
+ void insert(size_t index, const T& src);
+ void remove(size_t index);
+
+ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
+ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
+
+private:
+ AllocatorT m_Allocator;
+ T* m_pArray;
+ size_t m_Count;
+ size_t m_Capacity;
+};
+
+#ifndef _VMA_VECTOR_FUNCTIONS
+template
+VmaVector::VmaVector(const AllocatorT& allocator)
+ : m_Allocator(allocator),
+ m_pArray(VMA_NULL),
+ m_Count(0),
+ m_Capacity(0) {}
+
+template
+VmaVector::VmaVector(size_t count, const AllocatorT& allocator)
+ : m_Allocator(allocator),
+ m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL),
+ m_Count(count),
+ m_Capacity(count) {}
+
+template
+VmaVector::VmaVector(const VmaVector& src)
+ : m_Allocator(src.m_Allocator),
+ m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
+ m_Count(src.m_Count),
+ m_Capacity(src.m_Count)
+{
+ if (m_Count != 0)
+ {
+ memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
+ }
+}
+
+template
+VmaVector& VmaVector::operator=(const VmaVector& rhs)
+{
+ if (&rhs != this)
+ {
+ resize(rhs.m_Count);
+ if (m_Count != 0)
+ {
+ memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
+ }
+ }
+ return *this;
+}
+
+template
+void VmaVector::push_back(const T& src)
+{
+ const size_t newIndex = size();
+ resize(newIndex + 1);
+ m_pArray[newIndex] = src;
+}
+
+template
+void VmaVector::reserve(size_t newCapacity, bool freeMemory)
+{
+ newCapacity = VMA_MAX(newCapacity, m_Count);
+
+ if ((newCapacity < m_Capacity) && !freeMemory)
+ {
+ newCapacity = m_Capacity;
+ }
+
+ if (newCapacity != m_Capacity)
+ {
+ T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL;
+ if (m_Count != 0)
+ {
+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = newCapacity;
+ m_pArray = newArray;
+ }
+}
+
+template
+void VmaVector::resize(size_t newCount)
+{
+ size_t newCapacity = m_Capacity;
+ if (newCount > m_Capacity)
+ {
+ newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
+ }
+
+ if (newCapacity != m_Capacity)
+ {
+ T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
+ const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
+ if (elementsToCopy != 0)
+ {
+ memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = newCapacity;
+ m_pArray = newArray;
+ }
+
+ m_Count = newCount;
+}
+
+template
+void VmaVector::shrink_to_fit()
+{
+ if (m_Capacity > m_Count)
+ {
+ T* newArray = VMA_NULL;
+ if (m_Count > 0)
+ {
+ newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count);
+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
+ }
+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+ m_Capacity = m_Count;
+ m_pArray = newArray;
+ }
+}
+
+template
+void VmaVector::insert(size_t index, const T& src)
+{
+ VMA_HEAVY_ASSERT(index <= m_Count);
+ const size_t oldCount = size();
+ resize(oldCount + 1);
+ if (index < oldCount)
+ {
+ memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
+ }
+ m_pArray[index] = src;
+}
+
+template
+void VmaVector::remove(size_t index)
+{
+ VMA_HEAVY_ASSERT(index < m_Count);
+ const size_t oldCount = size();
+ if (index < oldCount - 1)
+ {
+ memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
+ }
+ resize(oldCount - 1);
+}
+#endif // _VMA_VECTOR_FUNCTIONS
+
+template
+static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item)
+{
+ vec.insert(index, item);
+}
+
+template
+static void VmaVectorRemove(VmaVector& vec, size_t index)
+{
+ vec.remove(index);
+}
+#endif // _VMA_VECTOR
+
+#ifndef _VMA_SMALL_VECTOR
+/*
+This is a vector (a variable-sized array), optimized for the case when the array is small.
+
+It contains some number of elements in-place, which allows it to avoid heap allocation
+when the actual number of elements is below that threshold. This allows normal "small"
+cases to be fast without losing generality for large inputs.
+*/
+template
+class VmaSmallVector
+{
+public:
+ typedef T value_type;
+ typedef T* iterator;
+
+ VmaSmallVector(const AllocatorT& allocator);
+ VmaSmallVector(size_t count, const AllocatorT& allocator);
+ template
+ VmaSmallVector(const VmaSmallVector&) = delete;
+ template
+ VmaSmallVector& operator=(const VmaSmallVector&) = delete;
+ ~VmaSmallVector() = default;
+
+ bool empty() const { return m_Count == 0; }
+ size_t size() const { return m_Count; }
+ T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
+ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
+ const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
+ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
+
+ iterator begin() { return data(); }
+ iterator end() { return data() + m_Count; }
+
+ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
+ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
+ void push_front(const T& src) { insert(0, src); }
+
+ void push_back(const T& src);
+ void resize(size_t newCount, bool freeMemory = false);
+ void clear(bool freeMemory = false);
+ void insert(size_t index, const T& src);
+ void remove(size_t index);
+
+ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
+ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
+
+private:
+ size_t m_Count;
+ T m_StaticArray[N]; // Used when m_Size <= N
+ VmaVector m_DynamicArray; // Used when m_Size > N
+};
+
+#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
+template
+VmaSmallVector::VmaSmallVector(const AllocatorT& allocator)
+ : m_Count(0),
+ m_DynamicArray(allocator) {}
+
+template
+VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator)
+ : m_Count(count),
+ m_DynamicArray(count > N ? count : 0, allocator) {}
+
+template
+void VmaSmallVector::push_back(const T& src)
+{
+ const size_t newIndex = size();
+ resize(newIndex + 1);
+ data()[newIndex] = src;
+}
+
+template
+void VmaSmallVector::resize(size_t newCount, bool freeMemory)
+{
+ if (newCount > N && m_Count > N)
+ {
+ // Any direction, staying in m_DynamicArray
+ m_DynamicArray.resize(newCount);
+ if (freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ }
+ else if (newCount > N && m_Count <= N)
+ {
+ // Growing, moving from m_StaticArray to m_DynamicArray
+ m_DynamicArray.resize(newCount);
+ if (m_Count > 0)
+ {
+ memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
+ }
+ }
+ else if (newCount <= N && m_Count > N)
+ {
+ // Shrinking, moving from m_DynamicArray to m_StaticArray
+ if (newCount > 0)
+ {
+ memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
+ }
+ m_DynamicArray.resize(0);
+ if (freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ }
+ else
+ {
+ // Any direction, staying in m_StaticArray - nothing to do here
+ }
+ m_Count = newCount;
+}
+
+template
+void VmaSmallVector::clear(bool freeMemory)
+{
+ m_DynamicArray.clear();
+ if (freeMemory)
+ {
+ m_DynamicArray.shrink_to_fit();
+ }
+ m_Count = 0;
+}
+
+template
+void VmaSmallVector::insert(size_t index, const T& src)
+{
+ VMA_HEAVY_ASSERT(index <= m_Count);
+ const size_t oldCount = size();
+ resize(oldCount + 1);
+ T* const dataPtr = data();
+ if (index < oldCount)
+ {
+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
+ memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
+ }
+ dataPtr[index] = src;
+}
+
+template
+void VmaSmallVector::remove(size_t index)
+{
+ VMA_HEAVY_ASSERT(index < m_Count);
+ const size_t oldCount = size();
+ if (index < oldCount - 1)
+ {
+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
+ T* const dataPtr = data();
+ memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
+ }
+ resize(oldCount - 1);
+}
+#endif // _VMA_SMALL_VECTOR_FUNCTIONS
+#endif // _VMA_SMALL_VECTOR
+
+#ifndef _VMA_POOL_ALLOCATOR
+/*
+Allocator for objects of type T using a list of arrays (pools) to speed up
+allocation. Number of elements that can be allocated is not bounded because
+allocator can create multiple blocks.
+*/
+template
+class VmaPoolAllocator
+{
+ VMA_CLASS_NO_COPY(VmaPoolAllocator)
+public:
+ VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
+ ~VmaPoolAllocator();
+ template T* Alloc(Types&&... args);
+ void Free(T* ptr);
+
+private:
+ union Item
+ {
+ uint32_t NextFreeIndex;
+ alignas(T) char Value[sizeof(T)];
+ };
+ struct ItemBlock
+ {
+ Item* pItems;
+ uint32_t Capacity;
+ uint32_t FirstFreeIndex;
+ };
+
+ const VkAllocationCallbacks* m_pAllocationCallbacks;
+ const uint32_t m_FirstBlockCapacity;
+ VmaVector> m_ItemBlocks;
+
+ ItemBlock& CreateNewBlock();
+};
+
+#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
+template
+VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
+ : m_pAllocationCallbacks(pAllocationCallbacks),
+ m_FirstBlockCapacity(firstBlockCapacity),
+ m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks))
+{
+ VMA_ASSERT(m_FirstBlockCapacity > 1);
+}
+
+template
+VmaPoolAllocator::~VmaPoolAllocator()
+{
+ for (size_t i = m_ItemBlocks.size(); i--;)
+ vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
+ m_ItemBlocks.clear();
+}
+
+template
+template T* VmaPoolAllocator::Alloc(Types&&... args)
+{
+ for (size_t i = m_ItemBlocks.size(); i--; )
+ {
+ ItemBlock& block = m_ItemBlocks[i];
+ // This block has some free items: Use first one.
+ if (block.FirstFreeIndex != UINT32_MAX)
+ {
+ Item* const pItem = &block.pItems[block.FirstFreeIndex];
+ block.FirstFreeIndex = pItem->NextFreeIndex;
+ T* result = (T*)&pItem->Value;
+ new(result)T(std::forward(args)...); // Explicit constructor call.
+ return result;
+ }
+ }
+
+ // No block has free item: Create new one and use it.
+ ItemBlock& newBlock = CreateNewBlock();
+ Item* const pItem = &newBlock.pItems[0];
+ newBlock.FirstFreeIndex = pItem->NextFreeIndex;
+ T* result = (T*)&pItem->Value;
+ new(result) T(std::forward(args)...); // Explicit constructor call.
+ return result;
+}
+
+template
+void VmaPoolAllocator::Free(T* ptr)
+{
+ // Search all memory blocks to find ptr.
+ for (size_t i = m_ItemBlocks.size(); i--; )
+ {
+ ItemBlock& block = m_ItemBlocks[i];
+
+ // Casting to union.
+ Item* pItemPtr;
+ memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
+
+ // Check if pItemPtr is in address range of this block.
+ if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
+ {
+ ptr->~T(); // Explicit destructor call.
+ const uint32_t index = static_cast(pItemPtr - block.pItems);
+ pItemPtr->NextFreeIndex = block.FirstFreeIndex;
+ block.FirstFreeIndex = index;
+ return;
+ }
+ }
+ VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
+}
+
+template
+typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock()
+{
+ const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
+ m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
+
+ const ItemBlock newBlock =
+ {
+ vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
+ newBlockCapacity,
+ 0
+ };
+
+ m_ItemBlocks.push_back(newBlock);
+
+ // Setup singly-linked list of all free items in this block.
+ for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
+ newBlock.pItems[i].NextFreeIndex = i + 1;
+ newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
+ return m_ItemBlocks.back();
+}
+#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
+#endif // _VMA_POOL_ALLOCATOR
+
+#ifndef _VMA_RAW_LIST
+template
+struct VmaListItem
+{
+ VmaListItem* pPrev;
+ VmaListItem* pNext;
+ T Value;
+};
+
+// Doubly linked list.
+template
+class VmaRawList
+{
+ VMA_CLASS_NO_COPY(VmaRawList)
+public:
+ typedef VmaListItem ItemType;
+
+ VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
+ // Intentionally not calling Clear, because that would be unnecessary
+ // computations to return all items to m_ItemAllocator as free.
+ ~VmaRawList() = default;
+
+ size_t GetCount() const { return m_Count; }
+ bool IsEmpty() const { return m_Count == 0; }
+
+ ItemType* Front() { return m_pFront; }
+ ItemType* Back() { return m_pBack; }
+ const ItemType* Front() const { return m_pFront; }
+ const ItemType* Back() const { return m_pBack; }
+
+ ItemType* PushFront();
+ ItemType* PushBack();
+ ItemType* PushFront(const T& value);
+ ItemType* PushBack(const T& value);
+ void PopFront();
+ void PopBack();
+
+ // Item can be null - it means PushBack.
+ ItemType* InsertBefore(ItemType* pItem);
+ // Item can be null - it means PushFront.
+ ItemType* InsertAfter(ItemType* pItem);
+ ItemType* InsertBefore(ItemType* pItem, const T& value);
+ ItemType* InsertAfter(ItemType* pItem, const T& value);
+
+ void Clear();
+ void Remove(ItemType* pItem);
+
+private:
+ const VkAllocationCallbacks* const m_pAllocationCallbacks;
+ VmaPoolAllocator m_ItemAllocator;
+ ItemType* m_pFront;
+ ItemType* m_pBack;
+ size_t m_Count;
+};
+
+#ifndef _VMA_RAW_LIST_FUNCTIONS
+template
+VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
+ : m_pAllocationCallbacks(pAllocationCallbacks),
+ m_ItemAllocator(pAllocationCallbacks, 128),
+ m_pFront(VMA_NULL),
+ m_pBack(VMA_NULL),
+ m_Count(0) {}
+
+template
+VmaListItem* VmaRawList::PushFront()
+{
+ ItemType* const pNewItem = m_ItemAllocator.Alloc();
+ pNewItem->pPrev = VMA_NULL;
+ if (IsEmpty())
+ {
+ pNewItem->pNext = VMA_NULL;
+ m_pFront = pNewItem;
+ m_pBack = pNewItem;
+ m_Count = 1;
+ }
+ else
+ {
+ pNewItem->pNext = m_pFront;
+ m_pFront->pPrev = pNewItem;
+ m_pFront = pNewItem;
+ ++m_Count;
+ }
+ return pNewItem;
+}
+
+template
+VmaListItem* VmaRawList::PushBack()
+{
+ ItemType* const pNewItem = m_ItemAllocator.Alloc();
+ pNewItem->pNext = VMA_NULL;
+ if(IsEmpty())
+ {
+ pNewItem->pPrev = VMA_NULL;
+ m_pFront = pNewItem;
+ m_pBack = pNewItem;
+ m_Count = 1;
+ }
+ else
+ {
+ pNewItem->pPrev = m_pBack;
+ m_pBack->pNext = pNewItem;
+ m_pBack = pNewItem;
+ ++m_Count;
+ }
+ return pNewItem;
+}
+
+template
+VmaListItem* VmaRawList::PushFront(const T& value)
+{
+ ItemType* const pNewItem = PushFront();
+ pNewItem->Value = value;
+ return pNewItem;
+}
+
+template
+VmaListItem* VmaRawList::PushBack(const T& value)
+{
+ ItemType* const pNewItem = PushBack();
+ pNewItem->Value = value;
+ return pNewItem;
+}
+
+template
+void VmaRawList::PopFront()
+{
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const pFrontItem = m_pFront;
+ ItemType* const pNextItem = pFrontItem->pNext;
+ if (pNextItem != VMA_NULL)
+ {
+ pNextItem->pPrev = VMA_NULL;
+ }
+ m_pFront = pNextItem;
+ m_ItemAllocator.Free(pFrontItem);
+ --m_Count;
+}
+
+template
+void VmaRawList::PopBack()
+{
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const pBackItem = m_pBack;
+ ItemType* const pPrevItem = pBackItem->pPrev;
+ if(pPrevItem != VMA_NULL)
+ {
+ pPrevItem->pNext = VMA_NULL;
+ }
+ m_pBack = pPrevItem;
+ m_ItemAllocator.Free(pBackItem);
+ --m_Count;
+}
+
+template
+void VmaRawList::Clear()
+{
+ if (IsEmpty() == false)
+ {
+ ItemType* pItem = m_pBack;
+ while (pItem != VMA_NULL)
+ {
+ ItemType* const pPrevItem = pItem->pPrev;
+ m_ItemAllocator.Free(pItem);
+ pItem = pPrevItem;
+ }
+ m_pFront = VMA_NULL;
+ m_pBack = VMA_NULL;
+ m_Count = 0;
+ }
+}
+
+template
+void VmaRawList::Remove(ItemType* pItem)
+{
+ VMA_HEAVY_ASSERT(pItem != VMA_NULL);
+ VMA_HEAVY_ASSERT(m_Count > 0);
+
+ if(pItem->pPrev != VMA_NULL)
+ {
+ pItem->pPrev->pNext = pItem->pNext;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_pFront == pItem);
+ m_pFront = pItem->pNext;
+ }
+
+ if(pItem->pNext != VMA_NULL)
+ {
+ pItem->pNext->pPrev = pItem->pPrev;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_pBack == pItem);
+ m_pBack = pItem->pPrev;
+ }
+
+ m_ItemAllocator.Free(pItem);
+ --m_Count;
+}
+
+template
+VmaListItem* VmaRawList::InsertBefore(ItemType* pItem)
+{
+ if(pItem != VMA_NULL)
+ {
+ ItemType* const prevItem = pItem->pPrev;
+ ItemType* const newItem = m_ItemAllocator.Alloc();
+ newItem->pPrev = prevItem;
+ newItem->pNext = pItem;
+ pItem->pPrev = newItem;
+ if(prevItem != VMA_NULL)
+ {
+ prevItem->pNext = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_pFront == pItem);
+ m_pFront = newItem;
+ }
+ ++m_Count;
+ return newItem;
+ }
+ else
+ return PushBack();
+}
+
+template
+VmaListItem* VmaRawList::InsertAfter(ItemType* pItem)
+{
+ if(pItem != VMA_NULL)
+ {
+ ItemType* const nextItem = pItem->pNext;
+ ItemType* const newItem = m_ItemAllocator.Alloc();
+ newItem->pNext = nextItem;
+ newItem->pPrev = pItem;
+ pItem->pNext = newItem;
+ if(nextItem != VMA_NULL)
+ {
+ nextItem->pPrev = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_pBack == pItem);
+ m_pBack = newItem;
+ }
+ ++m_Count;
+ return newItem;
+ }
+ else
+ return PushFront();
+}
+
+template
+VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value)
+{
+ ItemType* const newItem = InsertBefore(pItem);
+ newItem->Value = value;
+ return newItem;
+}
+
+template
+VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value)
+{
+ ItemType* const newItem = InsertAfter(pItem);
+ newItem->Value = value;
+ return newItem;
+}
+#endif // _VMA_RAW_LIST_FUNCTIONS
+#endif // _VMA_RAW_LIST
+
+#ifndef _VMA_LIST
+template
+class VmaList
+{
+ VMA_CLASS_NO_COPY(VmaList)
+public:
+ class reverse_iterator;
+ class const_iterator;
+ class const_reverse_iterator;
+
+ class iterator
+ {
+ friend class const_iterator;
+ friend class VmaList;
+ public:
+ iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+ iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+
+ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+
+ bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+ bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+
+ iterator operator++(int) { iterator result = *this; ++*this; return result; }
+ iterator operator--(int) { iterator result = *this; --*this; return result; }
+
+ iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
+ iterator& operator--();
+
+ private:
+ VmaRawList* m_pList;
+ VmaListItem* m_pItem;
+
+ iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {}
+ };
+ class reverse_iterator
+ {
+ friend class const_reverse_iterator;
+ friend class VmaList;
+ public:
+ reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+ reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+
+ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+
+ bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+ bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+
+ reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
+ reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
+
+ reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
+ reverse_iterator& operator--();
+
+ private:
+ VmaRawList* m_pList;
+ VmaListItem* m_pItem;
+
+ reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {}
+ };
+ class const_iterator
+ {
+ friend class VmaList;
+ public:
+ const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+ const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+ const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+
+ iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; }
+
+ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+
+ bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+ bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+
+ const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
+ const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
+
+ const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
+ const_iterator& operator--();
+
+ private:
+ const VmaRawList* m_pList;
+ const VmaListItem* m_pItem;
+
+ const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {}
+ };
+ class const_reverse_iterator
+ {
+ friend class VmaList;
+ public:
+ const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+ const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+ const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+
+ reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; }
+
+ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+
+ bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+ bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+
+ const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
+ const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
+
+ const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
+ const_reverse_iterator& operator--();
+
+ private:
+ const VmaRawList* m_pList;
+ const VmaListItem* m_pItem;
+
+ const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {}
+ };
+
+ VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
+
+ bool empty() const { return m_RawList.IsEmpty(); }
+ size_t size() const { return m_RawList.GetCount(); }
+
+ iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
+ iterator end() { return iterator(&m_RawList, VMA_NULL); }
+
+ const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
+ const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
+
+ const_iterator begin() const { return cbegin(); }
+ const_iterator end() const { return cend(); }
+
+ reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
+ reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
+
+ const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
+ const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
+
+ const_reverse_iterator rbegin() const { return crbegin(); }
+ const_reverse_iterator rend() const { return crend(); }
+
+ void push_back(const T& value) { m_RawList.PushBack(value); }
+ iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+
+ void clear() { m_RawList.Clear(); }
+ void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
+
+private:
+ VmaRawList m_RawList;
+};
+
+#ifndef _VMA_LIST_FUNCTIONS
+template
+typename VmaList::iterator& VmaList::iterator::operator--()
+{
+ if (m_pItem != VMA_NULL)
+ {
+ m_pItem = m_pItem->pPrev;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Back();
+ }
+ return *this;
+}
+
+template
+typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--()
+{
+ if (m_pItem != VMA_NULL)
+ {
+ m_pItem = m_pItem->pNext;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Front();
+ }
+ return *this;
+}
+
+template
+typename VmaList::const_iterator& VmaList::const_iterator::operator--()
+{
+ if (m_pItem != VMA_NULL)
+ {
+ m_pItem = m_pItem->pPrev;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Back();
+ }
+ return *this;
+}
+
+template
+typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--()
+{
+ if (m_pItem != VMA_NULL)
+ {
+ m_pItem = m_pItem->pNext;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+ m_pItem = m_pList->Back();
+ }
+ return *this;
+}
+#endif // _VMA_LIST_FUNCTIONS
+#endif // _VMA_LIST
+
+#ifndef _VMA_INTRUSIVE_LINKED_LIST
+/*
+Expected interface of ItemTypeTraits:
+struct MyItemTypeTraits
+{
+ typedef MyItem ItemType;
+ static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
+ static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
+ static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
+ static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
+};
+*/
+template
+class VmaIntrusiveLinkedList
+{
+public:
+ typedef typename ItemTypeTraits::ItemType ItemType;
+ static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
+ static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
+
+ // Movable, not copyable.
+ VmaIntrusiveLinkedList() = default;
+ VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
+ VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
+ VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
+ VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
+ ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
+
+ size_t GetCount() const { return m_Count; }
+ bool IsEmpty() const { return m_Count == 0; }
+ ItemType* Front() { return m_Front; }
+ ItemType* Back() { return m_Back; }
+ const ItemType* Front() const { return m_Front; }
+ const ItemType* Back() const { return m_Back; }
+
+ void PushBack(ItemType* item);
+ void PushFront(ItemType* item);
+ ItemType* PopBack();
+ ItemType* PopFront();
+
+ // MyItem can be null - it means PushBack.
+ void InsertBefore(ItemType* existingItem, ItemType* newItem);
+ // MyItem can be null - it means PushFront.
+ void InsertAfter(ItemType* existingItem, ItemType* newItem);
+ void Remove(ItemType* item);
+ void RemoveAll();
+
+private:
+ ItemType* m_Front = VMA_NULL;
+ ItemType* m_Back = VMA_NULL;
+ size_t m_Count = 0;
+};
+
+#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
+template
+VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
+ : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
+{
+ src.m_Front = src.m_Back = VMA_NULL;
+ src.m_Count = 0;
+}
+
+template
+VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src)
+{
+ if (&src != this)
+ {
+ VMA_HEAVY_ASSERT(IsEmpty());
+ m_Front = src.m_Front;
+ m_Back = src.m_Back;
+ m_Count = src.m_Count;
+ src.m_Front = src.m_Back = VMA_NULL;
+ src.m_Count = 0;
+ }
+ return *this;
+}
+
+template
+void VmaIntrusiveLinkedList::PushBack(ItemType* item)
+{
+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+ if (IsEmpty())
+ {
+ m_Front = item;
+ m_Back = item;
+ m_Count = 1;
+ }
+ else
+ {
+ ItemTypeTraits::AccessPrev(item) = m_Back;
+ ItemTypeTraits::AccessNext(m_Back) = item;
+ m_Back = item;
+ ++m_Count;
+ }
+}
+
+template
+void VmaIntrusiveLinkedList::PushFront(ItemType* item)
+{
+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+ if (IsEmpty())
+ {
+ m_Front = item;
+ m_Back = item;
+ m_Count = 1;
+ }
+ else
+ {
+ ItemTypeTraits::AccessNext(item) = m_Front;
+ ItemTypeTraits::AccessPrev(m_Front) = item;
+ m_Front = item;
+ ++m_Count;
+ }
+}
+
+template
+typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack()
+{
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const backItem = m_Back;
+ ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
+ if (prevItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
+ }
+ m_Back = prevItem;
+ --m_Count;
+ ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
+ ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
+ return backItem;
+}
+
+template
+typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront()
+{
+ VMA_HEAVY_ASSERT(m_Count > 0);
+ ItemType* const frontItem = m_Front;
+ ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
+ if (nextItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
+ }
+ m_Front = nextItem;
+ --m_Count;
+ ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
+ ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
+ return frontItem;
+}
+
+template
+void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem)
+{
+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ if (existingItem != VMA_NULL)
+ {
+ ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
+ ItemTypeTraits::AccessPrev(newItem) = prevItem;
+ ItemTypeTraits::AccessNext(newItem) = existingItem;
+ ItemTypeTraits::AccessPrev(existingItem) = newItem;
+ if (prevItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(prevItem) = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Front == existingItem);
+ m_Front = newItem;
+ }
+ ++m_Count;
+ }
+ else
+ PushBack(newItem);
+}
+
+template
+void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem)
+{
+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+ if (existingItem != VMA_NULL)
+ {
+ ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
+ ItemTypeTraits::AccessNext(newItem) = nextItem;
+ ItemTypeTraits::AccessPrev(newItem) = existingItem;
+ ItemTypeTraits::AccessNext(existingItem) = newItem;
+ if (nextItem != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(nextItem) = newItem;
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Back == existingItem);
+ m_Back = newItem;
+ }
+ ++m_Count;
+ }
+ else
+ return PushFront(newItem);
+}
+
+template
+void VmaIntrusiveLinkedList::Remove(ItemType* item)
+{
+ VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
+ if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
+ {
+ ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Front == item);
+ m_Front = ItemTypeTraits::GetNext(item);
+ }
+
+ if (ItemTypeTraits::GetNext(item) != VMA_NULL)
+ {
+ ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
+ }
+ else
+ {
+ VMA_HEAVY_ASSERT(m_Back == item);
+ m_Back = ItemTypeTraits::GetPrev(item);
+ }
+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
+ --m_Count;
+}
+
+template
+void VmaIntrusiveLinkedList::RemoveAll()
+{
+ if (!IsEmpty())
+ {
+ ItemType* item = m_Back;
+ while (item != VMA_NULL)
+ {
+ ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
+ item = prevItem;
+ }
+ m_Front = VMA_NULL;
+ m_Back = VMA_NULL;
+ m_Count = 0;
+ }
+}
+#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
+#endif // _VMA_INTRUSIVE_LINKED_LIST
+
+// Unused in this version.
+#if 0
+
+#ifndef _VMA_PAIR
+template
+struct VmaPair
+{
+ T1 first;
+ T2 second;
+
+ VmaPair() : first(), second() {}
+ VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
+};
+
+template
+struct VmaPairFirstLess
+{
+ bool operator()(const VmaPair& lhs, const VmaPair