From 3e6fed9492d0a66c426f06a7e583625346e195c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20S=C3=BC=C3=9Fenbach?= Date: Tue, 16 Jan 2024 13:48:53 +0100 Subject: [PATCH] Introduce implicit cast operators for vk::UniqueHandles, vk::SharedHandles and vk::raii::Handles (#1771) --- .../04_InitCommandBuffer.cpp | 2 +- .../05_InitSwapchain/05_InitSwapchain.cpp | 12 +- .../06_InitDepthBuffer/06_InitDepthBuffer.cpp | 4 +- .../07_InitUniformBuffer.cpp | 2 +- .../09_InitDescriptorSet.cpp | 6 +- .../10_InitRenderPass/10_InitRenderPass.cpp | 2 +- .../12_InitFrameBuffers.cpp | 6 +- .../13_InitVertexBuffer.cpp | 8 +- .../14_InitPipeline/14_InitPipeline.cpp | 10 +- RAII_Samples/15_DrawCube/15_DrawCube.cpp | 16 +- RAII_Samples/CopyBlitImage/CopyBlitImage.cpp | 22 +- .../DebugUtilsObjectName.cpp | 3 +- .../DrawTexturedCube/DrawTexturedCube.cpp | 18 +- .../DynamicUniform/DynamicUniform.cpp | 46 +-- RAII_Samples/Events/Events.cpp | 22 +- .../ImmutableSampler/ImmutableSampler.cpp | 26 +- RAII_Samples/InitTexture/InitTexture.cpp | 14 +- .../InputAttachment/InputAttachment.cpp | 22 +- RAII_Samples/MultipleSets/MultipleSets.cpp | 30 +- .../OcclusionQuery/OcclusionQuery.cpp | 34 +- RAII_Samples/PipelineCache/PipelineCache.cpp | 21 +- .../PipelineDerivative/PipelineDerivative.cpp | 30 +- RAII_Samples/PushConstants/PushConstants.cpp | 26 +- .../PushDescriptors/PushDescriptors.cpp | 24 +- .../SecondaryCommandBuffer.cpp | 24 +- .../SeparateImageSampler.cpp | 32 +- .../SurfaceCapabilities.cpp | 13 +- .../SurfaceFormats/SurfaceFormats.cpp | 2 +- RAII_Samples/Template/Template.cpp | 23 +- RAII_Samples/TexelBuffer/TexelBuffer.cpp | 18 +- RAII_Samples/utils/utils.hpp | 69 ++-- README.md | 4 + VulkanHppGenerator.cpp | 7 +- XMLHelper.hpp | 2 +- samples/SharedHandles/SharedHandles.cpp | 12 +- samples/utils/utils.cpp | 4 +- snippets/SharedHandle.hpp | 12 +- snippets/UniqueHandle.hpp | 218 +++++------ tests/NoExceptionsRAII/NoExceptionsRAII.cpp | 9 +- tests/UniqueHandle/UniqueHandle.cpp | 124 +++++-- vulkan/vulkan.hpp | 7 + vulkan/vulkan_raii.hpp | 340 +++++++++++++++--- vulkan/vulkan_shared.hpp | 7 + vulkan/vulkansc.hpp | 7 + vulkan/vulkansc_raii.hpp | 221 ++++++++++-- vulkan/vulkansc_shared.hpp | 7 + 46 files changed, 1062 insertions(+), 506 deletions(-) diff --git a/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp b/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp index f8ea9c6..a73e4a6 100644 --- a/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp +++ b/RAII_Samples/04_InitCommandBuffer/04_InitCommandBuffer.cpp @@ -43,7 +43,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::CommandPool commandPool( device, commandPoolCreateInfo ); // allocate a CommandBuffer from the CommandPool - vk::CommandBufferAllocateInfo commandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::ePrimary, 1 ); + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ); vk::raii::CommandBuffer commandBuffer = std::move( vk::raii::CommandBuffers( device, commandBufferAllocateInfo ).front() ); /* VULKAN_HPP_KEY_END */ diff --git a/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp b/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp index 51e0e3a..98ed47d 100644 --- a/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp +++ b/RAII_Samples/05_InitSwapchain/05_InitSwapchain.cpp @@ -47,7 +47,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // determine a queueFamilyIndex that suports present // first check if the graphicsQueueFamiliyIndex is good enough - uint32_t presentQueueFamilyIndex = physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, *surface ) + uint32_t presentQueueFamilyIndex = physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, surface ) ? graphicsQueueFamilyIndex : vk::su::checked_cast( queueFamilyProperties.size() ); if ( presentQueueFamilyIndex == queueFamilyProperties.size() ) @@ -57,7 +57,7 @@ int main( int /*argc*/, char ** /*argv*/ ) for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && - physicalDevice.getSurfaceSupportKHR( vk::su::checked_cast( i ), *surface ) ) + physicalDevice.getSurfaceSupportKHR( vk::su::checked_cast( i ), surface ) ) { graphicsQueueFamilyIndex = vk::su::checked_cast( i ); presentQueueFamilyIndex = graphicsQueueFamilyIndex; @@ -70,7 +70,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // family index that supports present for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { - if ( physicalDevice.getSurfaceSupportKHR( vk::su::checked_cast( i ), *surface ) ) + if ( physicalDevice.getSurfaceSupportKHR( vk::su::checked_cast( i ), surface ) ) { presentQueueFamilyIndex = vk::su::checked_cast( i ); break; @@ -87,11 +87,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsQueueFamilyIndex, vk::su::getDeviceExtensions() ); // get the supported VkFormats - std::vector formats = physicalDevice.getSurfaceFormatsKHR( *surface ); + std::vector formats = physicalDevice.getSurfaceFormatsKHR( surface ); assert( !formats.empty() ); vk::Format format = ( formats[0].format == vk::Format::eUndefined ) ? vk::Format::eB8G8R8A8Unorm : formats[0].format; - vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( *surface ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface ); vk::Extent2D swapchainExtent; if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) { @@ -119,7 +119,7 @@ int main( int /*argc*/, char ** /*argv*/ ) : vk::CompositeAlphaFlagBitsKHR::eOpaque; vk::SwapchainCreateInfoKHR swapChainCreateInfo( vk::SwapchainCreateFlagsKHR(), - *surface, + surface, vk::su::clamp( 3u, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount ), format, vk::ColorSpaceKHR::eSrgbNonlinear, diff --git a/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp b/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp index 77b2fa5..5b0d40c 100644 --- a/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp +++ b/RAII_Samples/06_InitDepthBuffer/06_InitDepthBuffer.cpp @@ -87,9 +87,9 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, typeIndex ); vk::raii::DeviceMemory depthMemory( device, memoryAllocateInfo ); - depthImage.bindMemory( *depthMemory, 0 ); + depthImage.bindMemory( depthMemory, 0 ); - vk::ImageViewCreateInfo imageViewCreateInfo( {}, *depthImage, vk::ImageViewType::e2D, depthFormat, {}, { vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 } ); + vk::ImageViewCreateInfo imageViewCreateInfo( {}, depthImage, vk::ImageViewType::e2D, depthFormat, {}, { vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 } ); vk::raii::ImageView depthView( device, imageViewCreateInfo ); // while all vk::raii objects are automatically destroyed on scope leave, the Image should to be destroyed before the bound DeviceMemory diff --git a/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp b/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp index 099a228..632e9ec 100644 --- a/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp +++ b/RAII_Samples/07_InitUniformBuffer/07_InitUniformBuffer.cpp @@ -76,7 +76,7 @@ int main( int /*argc*/, char ** /*argv*/ ) memcpy( pData, &mvpc, sizeof( mvpc ) ); uniformDataMemory.unmapMemory(); - uniformDataBuffer.bindMemory( *uniformDataMemory, 0 ); + uniformDataBuffer.bindMemory( uniformDataMemory, 0 ); // while all vk::raii objects are automatically destroyed on scope leave, the Buffer should to be destroyed before the bound DeviceMemory // but the standard destruction order would destroy the DeviceMemory before the Buffer, so destroy the Buffer here diff --git a/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp b/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp index 7bb1d91..da7705e 100644 --- a/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp +++ b/RAII_Samples/09_InitDescriptorSet/09_InitDescriptorSet.cpp @@ -64,11 +64,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); // allocate a descriptor set - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, *descriptorSetLayout ); vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ); - vk::DescriptorBufferInfo descriptorBufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::WriteDescriptorSet writeDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, descriptorBufferInfo ); + vk::DescriptorBufferInfo descriptorBufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, descriptorBufferInfo ); device.updateDescriptorSets( writeDescriptorSet, nullptr ); /* VULKAN_HPP_KEY_END */ diff --git a/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp b/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp index 3e68a24..7a1e5ce 100644 --- a/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp +++ b/RAII_Samples/10_InitRenderPass/10_InitRenderPass.cpp @@ -51,7 +51,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::Format depthFormat = vk::Format::eD16Unorm; /* VULKAN_HPP_KEY_START */ diff --git a/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp b/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp index 497705c..5de43e0 100644 --- a/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp +++ b/RAII_Samples/12_InitFrameBuffers/12_InitFrameBuffers.cpp @@ -55,14 +55,14 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ std::array attachments; - attachments[1] = *depthBufferData.imageView; + attachments[1] = depthBufferData.imageView; std::vector framebuffers; framebuffers.reserve( swapChainData.imageViews.size() ); for ( auto const & view : swapChainData.imageViews ) { - attachments[0] = *view; - vk::FramebufferCreateInfo framebufferCreateInfo( {}, *renderPass, attachments, surfaceData.extent.width, surfaceData.extent.height, 1 ); + attachments[0] = view; + vk::FramebufferCreateInfo framebufferCreateInfo( {}, renderPass, attachments, surfaceData.extent.width, surfaceData.extent.height, 1 ); framebuffers.push_back( vk::raii::Framebuffer( device, framebufferCreateInfo ) ); } diff --git a/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp b/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp index 4e752e3..1e955a0 100644 --- a/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp +++ b/RAII_Samples/13_InitVertexBuffer/13_InitVertexBuffer.cpp @@ -94,13 +94,13 @@ int main( int /*argc*/, char ** /*argv*/ ) deviceMemory.unmapMemory(); // and bind the device memory to the vertex buffer - vertexBuffer.bindMemory( *deviceMemory, 0 ); + vertexBuffer.bindMemory( deviceMemory, 0 ); vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -110,10 +110,10 @@ int main( int /*argc*/, char ** /*argv*/ ) commandBuffer.begin( {} ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindVertexBuffers( 0, { *vertexBuffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBuffer }, { 0 } ); commandBuffer.endRenderPass(); commandBuffer.end(); diff --git a/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp b/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp index 0e3fb93..5572e82 100644 --- a/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp +++ b/RAII_Samples/14_InitPipeline/14_InitPipeline.cpp @@ -53,7 +53,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, vk::Format::eD16Unorm ); vk::raii::DescriptorSetLayout descriptorSetLayout = @@ -68,8 +68,8 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ std::array pipelineShaderStageCreateInfos = { - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, *vertexShaderModule, "main" ), - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule, "main" ) + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main" ), + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main" ) }; vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( coloredCubeData[0] ) ); @@ -147,8 +147,8 @@ int main( int /*argc*/, char ** /*argv*/ ) &pipelineDepthStencilStateCreateInfo, // pDepthStencilState &pipelineColorBlendStateCreateInfo, // pColorBlendState &pipelineDynamicStateCreateInfo, // pDynamicState - *pipelineLayout, // layout - *renderPass // renderPass + pipelineLayout, // layout + renderPass // renderPass ); vk::raii::Pipeline pipeline( device, nullptr, graphicsPipelineCreateInfo ); diff --git a/RAII_Samples/15_DrawCube/15_DrawCube.cpp b/RAII_Samples/15_DrawCube/15_DrawCube.cpp index 91c2acd..aceca22 100644 --- a/RAII_Samples/15_DrawCube/15_DrawCube.cpp +++ b/RAII_Samples/15_DrawCube/15_DrawCube.cpp @@ -79,7 +79,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::makeDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -94,7 +94,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::copyToDevice( vertexBufferData.deviceMemory, coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, {} ); @@ -120,7 +120,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -129,12 +129,12 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -149,7 +149,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp b/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp index 25a08b1..2816e71 100644 --- a/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp +++ b/RAII_Samples/CopyBlitImage/CopyBlitImage.cpp @@ -35,7 +35,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::SurfaceData surfaceData( instance, AppName, vk::Extent2D( 640, 640 ) ); - vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( *surfaceData.surface ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surfaceData.surface ); if ( !( surfaceCapabilities.supportedUsageFlags & vk::ImageUsageFlagBits::eTransferDst ) ) { std::cout << "Surface cannot be destination of blit - abort \n"; @@ -73,7 +73,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Get the index of the next available swapchain image: vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -105,9 +105,9 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); deviceMemory = vk::raii::DeviceMemory( device, memoryAllocateInfo ); - blitSourceImage.bindMemory( *deviceMemory, 0 ); + blitSourceImage.bindMemory( deviceMemory, 0 ); - vk::raii::su::setImageLayout( commandBuffer, *blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eGeneral ); + vk::raii::su::setImageLayout( commandBuffer, blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eGeneral ); commandBuffer.end(); @@ -118,7 +118,7 @@ int main( int /*argc*/, char ** /*argv*/ ) graphicsQueue.submit( submitInfo, *commandFence ); /* Make sure command buffer is finished before mapping */ - while ( device.waitForFences( { *commandFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + while ( device.waitForFences( { commandFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) ; unsigned char * pImageMemory = static_cast( deviceMemory.mapMemory( 0, memoryRequirements.size ) ); @@ -138,7 +138,7 @@ int main( int /*argc*/, char ** /*argv*/ ) } // Flush the mapped memory and then unmap it. Assume it isn't coherent since we didn't really confirm - vk::MappedMemoryRange mappedMemoryRange( *deviceMemory, 0, memoryRequirements.size ); + vk::MappedMemoryRange mappedMemoryRange( deviceMemory, 0, memoryRequirements.size ); device.flushMappedMemoryRanges( mappedMemoryRange ); deviceMemory.unmapMemory(); @@ -148,7 +148,7 @@ int main( int /*argc*/, char ** /*argv*/ ) commandBuffer.begin( vk::CommandBufferBeginInfo() ); // Intend to blit from this image, set the layout accordingly - vk::raii::su::setImageLayout( commandBuffer, *blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eGeneral, vk::ImageLayout::eTransferSrcOptimal ); + vk::raii::su::setImageLayout( commandBuffer, blitSourceImage, swapChainData.colorFormat, vk::ImageLayout::eGeneral, vk::ImageLayout::eTransferSrcOptimal ); vk::Image blitDestinationImage = static_cast( swapChainData.images[imageIndex] ); @@ -159,7 +159,7 @@ int main( int /*argc*/, char ** /*argv*/ ) imageSubresourceLayers, { { vk::Offset3D( 0, 0, 0 ), vk::Offset3D( surfaceData.extent.width, surfaceData.extent.height, 1 ) } } ); commandBuffer.blitImage( - *blitSourceImage, vk::ImageLayout::eTransferSrcOptimal, blitDestinationImage, vk::ImageLayout::eTransferDstOptimal, imageBlit, vk::Filter::eLinear ); + blitSourceImage, vk::ImageLayout::eTransferSrcOptimal, blitDestinationImage, vk::ImageLayout::eTransferDstOptimal, imageBlit, vk::Filter::eLinear ); // Use a barrier to make sure the blit is finished before the copy starts // Note: for a layout of vk::ImageLayout::eTransferDstOptimal, the access mask is supposed to be vk::AccessFlagBits::eTransferWrite @@ -175,7 +175,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Do a image copy to part of the dst image - checks should stay small vk::ImageCopy imageCopy( imageSubresourceLayers, vk::Offset3D(), imageSubresourceLayers, vk::Offset3D( 256, 256, 0 ), vk::Extent3D( 128, 128, 1 ) ); - commandBuffer.copyImage( *blitSourceImage, vk::ImageLayout::eTransferSrcOptimal, blitDestinationImage, vk::ImageLayout::eTransferDstOptimal, imageCopy ); + commandBuffer.copyImage( blitSourceImage, vk::ImageLayout::eTransferSrcOptimal, blitDestinationImage, vk::ImageLayout::eTransferDstOptimal, imageCopy ); // Note: for a layout of vk::ImageLayout::ePresentSrcKHR, the access mask is supposed to be empty vk::ImageMemoryBarrier prePresentBarrier( vk::AccessFlagBits::eTransferWrite, @@ -191,11 +191,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Fence drawFence( device, vk::FenceCreateInfo() ); submitInfo = vk::SubmitInfo( {}, {}, *commandBuffer ); - graphicsQueue.submit( submitInfo, *drawFence ); + graphicsQueue.submit( submitInfo, drawFence ); graphicsQueue.waitIdle(); /* Make sure command buffer is finished before presenting */ - while ( device.waitForFences( { *drawFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) + while ( device.waitForFences( { drawFence }, true, vk::su::FenceTimeout ) == vk::Result::eTimeout ) ; /* Now present the image in the window */ diff --git a/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp b/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp index 23a9516..76c9d69 100644 --- a/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp +++ b/RAII_Samples/DebugUtilsObjectName/DebugUtilsObjectName.cpp @@ -47,7 +47,8 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ - vk::DebugUtilsObjectNameInfoEXT debugUtilsObjectNameInfo( vk::ObjectType::eImage, NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( VkImage, *image ), "Image name" ); + vk::DebugUtilsObjectNameInfoEXT debugUtilsObjectNameInfo( + vk::ObjectType::eImage, NON_DISPATCHABLE_HANDLE_TO_UINT64_CAST( VkImage, static_cast( image ) ), "Image name" ); device.setDebugUtilsObjectNameEXT( debugUtilsObjectNameInfo ); /* VULKAN_KEY_END */ diff --git a/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp b/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp index 1676521..6bca822 100644 --- a/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp +++ b/RAII_Samples/DrawTexturedCube/DrawTexturedCube.cpp @@ -76,7 +76,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -92,7 +92,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, { textureData } ); @@ -105,7 +105,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -117,7 +117,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -126,13 +126,13 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -147,7 +147,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/DynamicUniform/DynamicUniform.cpp b/RAII_Samples/DynamicUniform/DynamicUniform.cpp index 6142954..201e098 100644 --- a/RAII_Samples/DynamicUniform/DynamicUniform.cpp +++ b/RAII_Samples/DynamicUniform/DynamicUniform.cpp @@ -65,7 +65,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::DepthBufferData depthBufferData( physicalDevice, device, vk::Format::eD16Unorm, surfaceData.extent ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -120,29 +120,31 @@ int main( int /*argc*/, char ** /*argv*/ ) // create a DescriptorPool with vk::DescriptorType::eUniformBufferDynamic vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBufferDynamic, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); - vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBufferDynamic, uniformBufferData.buffer, bufferSize, nullptr } }, {} ); + vk::raii::su::updateDescriptorSets( + device, descriptorSet, { { vk::DescriptorType::eUniformBufferDynamic, uniformBufferData.buffer, bufferSize, nullptr } }, {} ); vk::raii::PipelineCache pipelineCache( device, vk::PipelineCacheCreateInfo() ); - vk::raii::Pipeline graphicsPipeline = vk::raii::su::makeGraphicsPipeline( device, - pipelineCache, - vertexShaderModule, - nullptr, - fragmentShaderModule, - nullptr, - sizeof( coloredCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, - vk::FrontFace::eClockwise, - true, - pipelineLayout, - renderPass ); + vk::raii::Pipeline graphicsPipeline = + vk::raii::su::makeGraphicsPipeline( device, + pipelineCache, + vertexShaderModule, + nullptr, + fragmentShaderModule, + nullptr, + sizeof( coloredCubeData[0] ), + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32B32A32Sfloat, 16 } }, + vk::FrontFace::eClockwise, + true, + pipelineLayout, + renderPass ); // Get the index of the next available swapchain image: vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -151,9 +153,9 @@ int main( int /*argc*/, char ** /*argv*/ ) std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); @@ -161,14 +163,14 @@ int main( int /*argc*/, char ** /*argv*/ ) /* The first draw should use the first matrix in the buffer */ uint32_t dynamicOffset = 0; - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, dynamicOffset ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, dynamicOffset ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.draw( 12 * 3, 1, 0, 0 ); // the second draw should use the second matrix in the buffer; dynamicOffset = (uint32_t)bufferSize; - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, dynamicOffset ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, dynamicOffset ); commandBuffer.draw( 12 * 3, 1, 0, 0 ); commandBuffer.endRenderPass(); @@ -180,7 +182,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/Events/Events.cpp b/RAII_Samples/Events/Events.cpp index c556e31..fbb153a 100644 --- a/RAII_Samples/Events/Events.cpp +++ b/RAII_Samples/Events/Events.cpp @@ -51,14 +51,14 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Fence fence( device, vk::FenceCreateInfo() ); vk::SubmitInfo submitInfo( {}, {}, *commandBuffer ); - graphicsQueue.submit( submitInfo, *fence ); + graphicsQueue.submit( submitInfo, fence ); // Make sure timeout is long enough for a simple command buffer without waiting for an event vk::Result result; int timeouts = -1; do { - result = device.waitForFences( { *fence }, true, vk::su::FenceTimeout ); + result = device.waitForFences( { fence }, true, vk::su::FenceTimeout ); timeouts++; } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); @@ -75,16 +75,16 @@ int main( int /*argc*/, char ** /*argv*/ ) commandPool.reset(); commandBuffer.begin( vk::CommandBufferBeginInfo() ); - commandBuffer.waitEvents( { *event }, vk::PipelineStageFlagBits::eHost, vk::PipelineStageFlagBits::eBottomOfPipe, nullptr, nullptr, nullptr ); + commandBuffer.waitEvents( { event }, vk::PipelineStageFlagBits::eHost, vk::PipelineStageFlagBits::eBottomOfPipe, nullptr, nullptr, nullptr ); commandBuffer.end(); - device.resetFences( { *fence } ); + device.resetFences( { fence } ); // Note that stepping through this code in the debugger is a bad idea because the GPU can TDR waiting for the event. // Execute the code from vk::Queue::submit() through vk::Device::setEvent() without breakpoints - graphicsQueue.submit( submitInfo, *fence ); + graphicsQueue.submit( submitInfo, fence ); // We should timeout waiting for the fence because the GPU should be waiting on the event - result = device.waitForFences( { *fence }, true, vk::su::FenceTimeout ); + result = device.waitForFences( { fence }, true, vk::su::FenceTimeout ); if ( result != vk::Result::eTimeout ) { std::cout << "Didn't get expected timeout in vk::Device::waitForFences, exiting\n"; @@ -96,11 +96,11 @@ int main( int /*argc*/, char ** /*argv*/ ) event.set(); do { - result = device.waitForFences( { *fence }, true, vk::su::FenceTimeout ); + result = device.waitForFences( { fence }, true, vk::su::FenceTimeout ); } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); - device.resetFences( { *fence } ); + device.resetFences( { fence } ); event.reset(); // reset the command buffer by resetting the complete command pool @@ -108,7 +108,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Now set the event from the GPU and wait on the CPU commandBuffer.begin( vk::CommandBufferBeginInfo() ); - commandBuffer.setEvent( *event, vk::PipelineStageFlagBits::eBottomOfPipe ); + commandBuffer.setEvent( event, vk::PipelineStageFlagBits::eBottomOfPipe ); commandBuffer.end(); // Look for the event on the CPU. It should be vk::Result::eEventReset since we haven't sent the command buffer yet. @@ -116,7 +116,7 @@ int main( int /*argc*/, char ** /*argv*/ ) assert( result == vk::Result::eEventReset ); // Send the command buffer and loop waiting for the event - graphicsQueue.submit( submitInfo, *fence ); + graphicsQueue.submit( submitInfo, fence ); int polls = 0; do @@ -128,7 +128,7 @@ int main( int /*argc*/, char ** /*argv*/ ) do { - result = device.waitForFences( { *fence }, true, vk::su::FenceTimeout ); + result = device.waitForFences( { fence }, true, vk::su::FenceTimeout ); } while ( result == vk::Result::eTimeout ); assert( result == vk::Result::eSuccess ); diff --git a/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp b/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp index 032e400..2682b3e 100644 --- a/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp +++ b/RAII_Samples/ImmutableSampler/ImmutableSampler.cpp @@ -56,7 +56,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -77,7 +77,7 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); vk::raii::su::copyToDevice( uniformBufferData.deviceMemory, mvpcMatrix ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -116,14 +116,14 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); // Populate descriptor sets - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, *descriptorSetLayout ); vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ); - vk::DescriptorBufferInfo bufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( *textureData.sampler, *textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( textureData.sampler, textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); std::array writeDescriptorSets = { - vk::WriteDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), - vk::WriteDescriptorSet( *descriptorSet, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) + vk::WriteDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( descriptorSet, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) }; device.updateDescriptorSets( writeDescriptorSets, nullptr ); @@ -137,7 +137,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -146,19 +146,19 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); diff --git a/RAII_Samples/InitTexture/InitTexture.cpp b/RAII_Samples/InitTexture/InitTexture.cpp index b197b14..ac51f99 100644 --- a/RAII_Samples/InitTexture/InitTexture.cpp +++ b/RAII_Samples/InitTexture/InitTexture.cpp @@ -95,7 +95,7 @@ int main( int /*argc*/, char ** /*argv*/ ) imageMemory = vk::raii::DeviceMemory( device, memoryAllocateInfo ); // bind memory - image.bindMemory( *imageMemory, 0 ); + image.bindMemory( imageMemory, 0 ); vk::raii::Buffer textureBuffer = nullptr; vk::raii::DeviceMemory textureBufferMemory = nullptr; @@ -115,7 +115,7 @@ int main( int /*argc*/, char ** /*argv*/ ) textureBufferMemory = vk::raii::DeviceMemory( device, memoryAllocateInfo ); // bind memory - textureBuffer.bindMemory( *textureBufferMemory, 0 ); + textureBuffer.bindMemory( textureBufferMemory, 0 ); } else { @@ -146,21 +146,21 @@ int main( int /*argc*/, char ** /*argv*/ ) if ( needsStaging ) { // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal - vk::raii::su::setImageLayout( commandBuffer, *image, format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + vk::raii::su::setImageLayout( commandBuffer, image, format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); vk::BufferImageCopy copyRegion( 0, surfaceData.extent.width, surfaceData.extent.height, vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), vk::Offset3D( 0, 0, 0 ), vk::Extent3D( surfaceData.extent, 1 ) ); - commandBuffer.copyBufferToImage( *textureBuffer, *image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); + commandBuffer.copyBufferToImage( textureBuffer, image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); // Set the layout for the texture image from eTransferDstOptimal to SHADER_READ_ONLY - vk::raii::su::setImageLayout( commandBuffer, *image, format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::raii::su::setImageLayout( commandBuffer, image, format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); } else { // If we can use the linear tiled image as a texture, just do it - vk::raii::su::setImageLayout( commandBuffer, *image, format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::raii::su::setImageLayout( commandBuffer, image, format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); } commandBuffer.end(); @@ -183,7 +183,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::BorderColor::eFloatOpaqueWhite ); vk::raii::Sampler sampler( device, samplerCreateInfo ); - vk::ImageViewCreateInfo imageViewCreateInfo( {}, *image, vk::ImageViewType::e2D, format, {}, { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } ); + vk::ImageViewCreateInfo imageViewCreateInfo( {}, image, vk::ImageViewType::e2D, format, {}, { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } ); vk::raii::ImageView imageView( device, imageViewCreateInfo ); /* VULKAN_KEY_END */ diff --git a/RAII_Samples/InputAttachment/InputAttachment.cpp b/RAII_Samples/InputAttachment/InputAttachment.cpp index 8d3e5a0..0e50671 100644 --- a/RAII_Samples/InputAttachment/InputAttachment.cpp +++ b/RAII_Samples/InputAttachment/InputAttachment.cpp @@ -131,13 +131,13 @@ int main( int /*argc*/, char ** /*argv*/ ) uint32_t memoryTypeIndex = vk::su::findMemoryType( physicalDevice.getMemoryProperties(), memoryRequirements.memoryTypeBits, {} ); vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); inputMemory = vk::raii::DeviceMemory( device, memoryAllocateInfo ); - inputImage.bindMemory( *inputMemory, 0 ); + inputImage.bindMemory( inputMemory, 0 ); // Set the image layout to TRANSFER_DST_OPTIMAL to be ready for clear commandBuffer.begin( vk::CommandBufferBeginInfo() ); - vk::raii::su::setImageLayout( commandBuffer, *inputImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + vk::raii::su::setImageLayout( commandBuffer, inputImage, swapChainData.colorFormat, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); - commandBuffer.clearColorImage( *inputImage, + commandBuffer.clearColorImage( inputImage, vk::ImageLayout::eTransferDstOptimal, { std::array( { { 1.0f, 1.0f, 0.0f, 0.0f } } ) }, { { vk::ImageAspectFlagBits::eColor, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS } } ); @@ -146,7 +146,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // RenderPassCreateInfo below vk::ImageViewCreateInfo imageViewCreateInfo( - {}, *inputImage, vk::ImageViewType::e2D, swapChainData.colorFormat, {}, { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } ); + {}, inputImage, vk::ImageViewType::e2D, swapChainData.colorFormat, {}, { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } ); vk::raii::ImageView inputAttachmentView( device, imageViewCreateInfo ); vk::DescriptorSetLayoutBinding layoutBinding( 0, vk::DescriptorType::eInputAttachment, 1, vk::ShaderStageFlagBits::eFragment ); @@ -208,11 +208,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo( vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize ); vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, *descriptorSetLayout ); vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ); - vk::DescriptorImageInfo inputImageInfo( nullptr, *inputAttachmentView, vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::WriteDescriptorSet writeDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eInputAttachment, inputImageInfo ); + vk::DescriptorImageInfo inputImageInfo( nullptr, inputAttachmentView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::WriteDescriptorSet writeDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eInputAttachment, inputImageInfo ); device.updateDescriptorSets( writeDescriptorSet, nullptr ); vk::raii::PipelineCache pipelineCache( device, vk::PipelineCacheCreateInfo() ); @@ -222,16 +222,16 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); vk::ClearValue clearValue; clearValue.color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); diff --git a/RAII_Samples/MultipleSets/MultipleSets.cpp b/RAII_Samples/MultipleSets/MultipleSets.cpp index 06689ce..17ace1d 100644 --- a/RAII_Samples/MultipleSets/MultipleSets.cpp +++ b/RAII_Samples/MultipleSets/MultipleSets.cpp @@ -113,7 +113,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -139,7 +139,7 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); vk::raii::su::copyToDevice( uniformBufferData.deviceMemory, mvpcMatrix ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -166,7 +166,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorSetLayout samplerLayout( device, descriptorSetLayoutCreateInfo ); // Create pipeline layout with multiple descriptor sets - std::array descriptorSetLayouts = { *uniformLayout, *samplerLayout }; + std::array descriptorSetLayouts = { uniformLayout, samplerLayout }; vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo( {}, descriptorSetLayouts ); vk::raii::PipelineLayout pipelineLayout( device, pipelineLayoutCreateInfo ); @@ -177,15 +177,15 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); // Populate descriptor sets - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, descriptorSetLayouts ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, descriptorSetLayouts ); vk::raii::DescriptorSets descriptorSets( device, descriptorSetAllocateInfo ); // Populate with info about our uniform buffer - vk::DescriptorBufferInfo uniformBufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo textureImageInfo( *textureData.sampler, *textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorBufferInfo uniformBufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo textureImageInfo( textureData.sampler, textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); std::array writeDescriptorSets = { - { vk::WriteDescriptorSet( *descriptorSets[0], 0, 0, vk::DescriptorType::eUniformBuffer, {}, uniformBufferInfo ), - vk::WriteDescriptorSet( *descriptorSets[1], 0, 0, vk::DescriptorType::eCombinedImageSampler, textureImageInfo ) } + { vk::WriteDescriptorSet( descriptorSets[0], 0, 0, vk::DescriptorType::eUniformBuffer, {}, uniformBufferInfo ), + vk::WriteDescriptorSet( descriptorSets[1], 0, 0, vk::DescriptorType::eCombinedImageSampler, textureImageInfo ) } }; device.updateDescriptorSets( writeDescriptorSets, nullptr ); @@ -199,7 +199,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -209,19 +209,19 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSets[0], *descriptorSets[1] }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSets[0], descriptorSets[1] }, nullptr ); - vk::Buffer buffer = *vertexBufferData.buffer; + vk::Buffer buffer = vertexBufferData.buffer; commandBuffer.bindVertexBuffers( 0, buffer, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); @@ -237,7 +237,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp b/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp index 3e10ddc..df5301a 100644 --- a/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp +++ b/RAII_Samples/OcclusionQuery/OcclusionQuery.cpp @@ -51,7 +51,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -76,7 +76,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::makeDescriptorSetLayout( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -91,7 +91,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::copyToDevice( vertexBufferData.deviceMemory, coloredCubeData, sizeof( coloredCubeData ) / sizeof( coloredCubeData[0] ) ); vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, {} ); @@ -104,7 +104,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( coloredCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -117,7 +117,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Get the index of the next available swapchain image: vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -135,39 +135,39 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::MemoryAllocateInfo memoryAllocateInfo( memoryRequirements.size, memoryTypeIndex ); queryResultMemory = vk::raii::DeviceMemory( device, memoryAllocateInfo ); - queryResultBuffer.bindMemory( *queryResultMemory, 0 ); + queryResultBuffer.bindMemory( queryResultMemory, 0 ); vk::QueryPoolCreateInfo queryPoolCreateInfo( {}, vk::QueryType::eOcclusion, 2, {} ); vk::raii::QueryPool queryPool( device, queryPoolCreateInfo ); commandBuffer.begin( {} ); - commandBuffer.resetQueryPool( *queryPool, 0, 2 ); + commandBuffer.resetQueryPool( queryPool, 0, 2 ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); commandBuffer.beginRenderPass( - vk::RenderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::RenderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, {} ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, {} ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); - commandBuffer.beginQuery( *queryPool, 0, vk::QueryControlFlags() ); - commandBuffer.endQuery( *queryPool, 0 ); + commandBuffer.beginQuery( queryPool, 0, vk::QueryControlFlags() ); + commandBuffer.endQuery( queryPool, 0 ); - commandBuffer.beginQuery( *queryPool, 1, vk::QueryControlFlags() ); + commandBuffer.beginQuery( queryPool, 1, vk::QueryControlFlags() ); commandBuffer.draw( 12 * 3, 1, 0, 0 ); commandBuffer.endRenderPass(); - commandBuffer.endQuery( *queryPool, 1 ); + commandBuffer.endQuery( queryPool, 1 ); commandBuffer.copyQueryPoolResults( - *queryPool, 0, 2, *queryResultBuffer, 0, sizeof( uint64_t ), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); + queryPool, 0, 2, queryResultBuffer, 0, sizeof( uint64_t ), vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait ); commandBuffer.end(); vk::raii::Fence drawFence( device, vk::FenceCreateInfo() ); @@ -201,7 +201,7 @@ int main( int /*argc*/, char ** /*argv*/ ) queryResultMemory.unmapMemory(); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/PipelineCache/PipelineCache.cpp b/RAII_Samples/PipelineCache/PipelineCache.cpp index cdb6156..d96bc07 100644 --- a/RAII_Samples/PipelineCache/PipelineCache.cpp +++ b/RAII_Samples/PipelineCache/PipelineCache.cpp @@ -42,7 +42,8 @@ #endif typedef unsigned long long timestamp_t; -timestamp_t getMilliseconds() + +timestamp_t getMilliseconds() { #ifdef WIN32 LARGE_INTEGER frequency; @@ -85,7 +86,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -117,7 +118,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -133,7 +134,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, { textureData } ); @@ -300,7 +301,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Get the index of the next available swapchain image: vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -309,12 +310,12 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); commandBuffer.beginRenderPass( - vk::RenderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::RenderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, {} ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, {} ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -329,7 +330,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp b/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp index b6c188d..f3bd017 100644 --- a/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp +++ b/RAII_Samples/PipelineDerivative/PipelineDerivative.cpp @@ -52,7 +52,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -84,7 +84,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -100,7 +100,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, { textureData } ); @@ -115,8 +115,8 @@ int main( int /*argc*/, char ** /*argv*/ ) // Second pipeline has a modified fragment shader and sets the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag. std::array pipelineShaderStageCreateInfos = { - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, *vertexShaderModule, "main" ), - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule, "main" ) + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main" ), + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main" ) }; vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( texturedCubeData[0] ) ); @@ -166,8 +166,8 @@ int main( int /*argc*/, char ** /*argv*/ ) &pipelineDepthStencilStateCreateInfo, &pipelineColorBlendStateCreateInfo, &pipelineDynamicStateCreateInfo, - *pipelineLayout, - *renderPass ); + pipelineLayout, + renderPass ); vk::raii::Pipeline basePipeline( device, pipelineCache, graphicsPipelineCreateInfo ); switch ( basePipeline.getConstructorSuccessCode() ) @@ -200,9 +200,9 @@ void main() glslang::FinalizeProcess(); // Modify pipeline info to reflect derivation - pipelineShaderStageCreateInfos[1] = vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule2, "main" ); + pipelineShaderStageCreateInfos[1] = vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, fragmentShaderModule2, "main" ); graphicsPipelineCreateInfo.flags = vk::PipelineCreateFlagBits::eDerivative; - graphicsPipelineCreateInfo.basePipelineHandle = *basePipeline; + graphicsPipelineCreateInfo.basePipelineHandle = basePipeline; graphicsPipelineCreateInfo.basePipelineIndex = -1; // And create the derived pipeline @@ -223,7 +223,7 @@ void main() // Get the index of the next available swapchain image vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -232,12 +232,12 @@ void main() clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); commandBuffer.beginRenderPass( - vk::RenderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), + vk::RenderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D(), surfaceData.extent ), clearValues ), vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *derivedPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, {} ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, derivedPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, {} ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -252,7 +252,7 @@ void main() vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/PushConstants/PushConstants.cpp b/RAII_Samples/PushConstants/PushConstants.cpp index a8826b1..d1f88ee 100644 --- a/RAII_Samples/PushConstants/PushConstants.cpp +++ b/RAII_Samples/PushConstants/PushConstants.cpp @@ -94,7 +94,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -115,7 +115,7 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); vk::raii::su::copyToDevice( uniformBufferData.deviceMemory, mvpcMatrix ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -148,12 +148,12 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); // Populate descriptor sets - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, *descriptorSetLayout ); vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ); // Populate with info about our uniform buffer for MVP - vk::DescriptorBufferInfo bufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::WriteDescriptorSet writeDescriptorSet( *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::WriteDescriptorSet writeDescriptorSet( descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ); device.updateDescriptorSets( writeDescriptorSet, nullptr ); // Create our push constant data, which matches shader expectations @@ -162,7 +162,7 @@ int main( int /*argc*/, char ** /*argv*/ ) // Ensure we have enough room for push constant data assert( ( sizeof( pushConstants ) <= physicalDevice.getProperties().limits.maxPushConstantsSize ) && "Too many push constants" ); commandBuffer.begin( vk::CommandBufferBeginInfo() ); - commandBuffer.pushConstants( *pipelineLayout, vk::ShaderStageFlagBits::eFragment, 0, pushConstants ); + commandBuffer.pushConstants( pipelineLayout, vk::ShaderStageFlagBits::eFragment, 0, pushConstants ); /* VULKAN_KEY_END */ @@ -174,7 +174,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -183,7 +183,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -191,12 +191,12 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -211,7 +211,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/PushDescriptors/PushDescriptors.cpp b/RAII_Samples/PushDescriptors/PushDescriptors.cpp index c5e0b0f..5e3c29c 100644 --- a/RAII_Samples/PushDescriptors/PushDescriptors.cpp +++ b/RAII_Samples/PushDescriptors/PushDescriptors.cpp @@ -77,7 +77,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, deviceExtensions ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -111,7 +111,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -133,7 +133,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -143,26 +143,26 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); - vk::DescriptorBufferInfo bufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( *textureData.sampler, *textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( textureData.sampler, textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); vk::WriteDescriptorSet writeDescriptorSets[2] = { vk::WriteDescriptorSet( {}, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), - vk::WriteDescriptorSet( {}, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) }; + vk::WriteDescriptorSet( {}, 1, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo ) }; // this call is from an extension and needs the dynamic dispatcher !! - commandBuffer.pushDescriptorSetKHR( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { 2, writeDescriptorSets } ); + commandBuffer.pushDescriptorSetKHR( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { 2, writeDescriptorSets } ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -177,7 +177,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp b/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp index b9be71a..fbc3059 100644 --- a/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp +++ b/RAII_Samples/SecondaryCommandBuffer/SecondaryCommandBuffer.cpp @@ -81,7 +81,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format, vk::AttachmentLoadOp::eClear, vk::ImageLayout::eColorAttachmentOptimal ); @@ -122,8 +122,8 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 2 }, { vk::DescriptorType::eCombinedImageSampler, 2 } } ); - std::array layouts = { *descriptorSetLayout, *descriptorSetLayout }; - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, layouts ); + std::array layouts = { descriptorSetLayout, descriptorSetLayout }; + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, layouts ); vk::raii::DescriptorSets descriptorSets( device, descriptorSetAllocateInfo ); assert( descriptorSets.size() == 2 ); @@ -135,14 +135,14 @@ int main( int /*argc*/, char ** /*argv*/ ) /* VULKAN_KEY_START */ // create four secondary command buffers, for each quadrant of the screen - vk::CommandBufferAllocateInfo commandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::eSecondary, 4 ); + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::eSecondary, 4 ); vk::raii::CommandBuffers secondaryCommandBuffers( device, commandBufferAllocateInfo ); // Get the index of the next available swapchain image: vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -157,7 +157,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::Rect2D scissor( vk::Offset2D( 0, 0 ), vk::Extent2D( surfaceData.extent ) ); // now we record four separate command buffers, one for each quadrant of the screen - vk::CommandBufferInheritanceInfo commandBufferInheritanceInfo( *renderPass, 0, *framebuffers[imageIndex] ); + vk::CommandBufferInheritanceInfo commandBufferInheritanceInfo( renderPass, 0, framebuffers[imageIndex] ); vk::CommandBufferBeginInfo secondaryBeginInfo( vk::CommandBufferUsageFlagBits::eOneTimeSubmit | vk::CommandBufferUsageFlagBits::eRenderPassContinue, &commandBufferInheritanceInfo ); @@ -168,21 +168,21 @@ int main( int /*argc*/, char ** /*argv*/ ) viewport.y = 25.0f + 250.0f * ( i / 2 ); secondaryCommandBuffers[i].begin( secondaryBeginInfo ); - secondaryCommandBuffers[i].bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - secondaryCommandBuffers[i].bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSets[i == 0 || i == 3] }, nullptr ); - secondaryCommandBuffers[i].bindVertexBuffers( 0, { *vertexBufferData.buffer }, offset ); + secondaryCommandBuffers[i].bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + secondaryCommandBuffers[i].bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSets[i == 0 || i == 3] }, nullptr ); + secondaryCommandBuffers[i].bindVertexBuffers( 0, { vertexBufferData.buffer }, offset ); secondaryCommandBuffers[i].setViewport( 0, viewport ); secondaryCommandBuffers[i].setScissor( 0, scissor ); secondaryCommandBuffers[i].draw( 12 * 3, 1, 0, 0 ); secondaryCommandBuffers[i].end(); - executeCommandBuffers[i] = *secondaryCommandBuffers[i]; + executeCommandBuffers[i] = secondaryCommandBuffers[i]; } std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); // specifying VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS means this render pass may ONLY call // vkCmdExecuteCommands @@ -209,7 +209,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; result = presentQueue.presentKHR( vk::PresentInfoKHR( {}, *swapChainData.swapChain, imageIndex, {} ) ); diff --git a/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp b/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp index 59fe18e..18806bd 100644 --- a/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp +++ b/RAII_Samples/SeparateImageSampler/SeparateImageSampler.cpp @@ -82,7 +82,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -103,7 +103,7 @@ int main( int /*argc*/, char ** /*argv*/ ) glm::mat4x4 mvpcMatrix = vk::su::createModelViewProjectionClipMatrix( surfaceData.extent ); vk::raii::su::copyToDevice( uniformBufferData.deviceMemory, mvpcMatrix ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -167,16 +167,16 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool( device, descriptorPoolCreateInfo ); // Populate descriptor sets - vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( *descriptorPool, *descriptorSetLayout ); + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo( descriptorPool, *descriptorSetLayout ); vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, descriptorSetAllocateInfo ).front() ); - vk::DescriptorBufferInfo bufferInfo( *uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); - vk::DescriptorImageInfo imageInfo( *textureData.sampler, *textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); - vk::DescriptorImageInfo samplerInfo( *sampler, {}, {} ); + vk::DescriptorBufferInfo bufferInfo( uniformBufferData.buffer, 0, sizeof( glm::mat4x4 ) ); + vk::DescriptorImageInfo imageInfo( textureData.sampler, textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + vk::DescriptorImageInfo samplerInfo( sampler, {}, {} ); std::array descriptorWrites = { { vk::WriteDescriptorSet( - *descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), - vk::WriteDescriptorSet( *descriptorSet, 1, 0, vk::DescriptorType::eSampledImage, imageInfo ), - vk::WriteDescriptorSet( *descriptorSet, 2, 0, vk::DescriptorType::eSampler, samplerInfo ) } }; + descriptorSet, 0, 0, vk::DescriptorType::eUniformBuffer, {}, bufferInfo ), + vk::WriteDescriptorSet( descriptorSet, 1, 0, vk::DescriptorType::eSampledImage, imageInfo ), + vk::WriteDescriptorSet( descriptorSet, 2, 0, vk::DescriptorType::eSampler, samplerInfo ) } }; device.updateDescriptorSets( descriptorWrites, nullptr ); /* VULKAN_KEY_END */ @@ -189,7 +189,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -199,7 +199,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -207,13 +207,13 @@ int main( int /*argc*/, char ** /*argv*/ ) clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -228,7 +228,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp b/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp index 78f00f8..86fa85b 100644 --- a/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp +++ b/RAII_Samples/SurfaceCapabilities/SurfaceCapabilities.cpp @@ -84,11 +84,12 @@ int main( int /*argc*/, char ** /*argv*/ ) std::cout << "PhysicalDevice " << i << "\n"; if ( supportsGetSurfaceCapabilities2 ) { - auto surfaceCapabilities2 = physicalDevices[i] - .getSurfaceCapabilities2KHR( { *surfaceData.surface } ); + auto surfaceCapabilities2 = + physicalDevices[i] + .getSurfaceCapabilities2KHR( { static_cast( surfaceData.surface ) } ); vk::SurfaceCapabilitiesKHR const & surfaceCapabilities = surfaceCapabilities2.get().surfaceCapabilities; cout( surfaceCapabilities ); @@ -121,7 +122,7 @@ int main( int /*argc*/, char ** /*argv*/ ) } else { - vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevices[i].getSurfaceCapabilitiesKHR( *surfaceData.surface ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevices[i].getSurfaceCapabilitiesKHR( surfaceData.surface ); cout( surfaceCapabilities ); } } diff --git a/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp b/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp index d3e71ad..5e3ddd8 100644 --- a/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp +++ b/RAII_Samples/SurfaceFormats/SurfaceFormats.cpp @@ -46,7 +46,7 @@ int main( int /*argc*/, char ** /*argv*/ ) for ( size_t i = 0; i < physicalDevices.size(); i++ ) { std::cout << "PhysicalDevice " << i << "\n"; - std::vector surfaceFormats = physicalDevices[i].getSurfaceFormatsKHR( *surfaceData.surface ); + std::vector surfaceFormats = physicalDevices[i].getSurfaceFormatsKHR( surfaceData.surface ); for ( size_t j = 0; j < surfaceFormats.size(); j++ ) { std::cout << std::string( "\t" ) << "Format " << j << "\n"; diff --git a/RAII_Samples/Template/Template.cpp b/RAII_Samples/Template/Template.cpp index c0d5cf6..d6859f5 100644 --- a/RAII_Samples/Template/Template.cpp +++ b/RAII_Samples/Template/Template.cpp @@ -44,7 +44,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -76,7 +76,7 @@ int main( int /*argc*/, char ** /*argv*/ ) { vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, depthBufferData.format ); glslang::InitializeProcess(); @@ -92,9 +92,10 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformBuffer, 1 }, { vk::DescriptorType::eCombinedImageSampler, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); - vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, { textureData } ); + vk::raii::su::updateDescriptorSets( + device, descriptorSet, { { vk::DescriptorType::eUniformBuffer, uniformBufferData.buffer, VK_WHOLE_SIZE, nullptr } }, { textureData } ); vk::raii::PipelineCache pipelineCache( device, vk::PipelineCacheCreateInfo() ); vk::raii::Pipeline graphicsPipeline = vk::raii::su::makeGraphicsPipeline( device, @@ -104,7 +105,7 @@ int main( int /*argc*/, char ** /*argv*/ ) fragmentShaderModule, nullptr, sizeof( texturedCubeData[0] ), - { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, + { { vk::Format::eR32G32B32A32Sfloat, 0 }, { vk::Format::eR32G32Sfloat, 16 } }, vk::FrontFace::eClockwise, true, pipelineLayout, @@ -114,20 +115,20 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); std::array clearValues; clearValues[0].color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); clearValues[1].depthStencil = vk::ClearDepthStencilValue( 1.0f, 0 ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValues ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); - commandBuffer.bindVertexBuffers( 0, { *vertexBufferData.buffer }, { 0 } ); + commandBuffer.bindVertexBuffers( 0, { vertexBufferData.buffer }, { 0 } ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); commandBuffer.setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ) ); @@ -142,7 +143,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/TexelBuffer/TexelBuffer.cpp b/RAII_Samples/TexelBuffer/TexelBuffer.cpp index 32897d6..f57c93c 100644 --- a/RAII_Samples/TexelBuffer/TexelBuffer.cpp +++ b/RAII_Samples/TexelBuffer/TexelBuffer.cpp @@ -86,7 +86,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::findGraphicsAndPresentQueueFamilyIndex( physicalDevice, surfaceData.surface ); vk::raii::Device device = vk::raii::su::makeDevice( physicalDevice, graphicsAndPresentQueueFamilyIndex.first, vk::su::getDeviceExtensions() ); - vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); + vk::raii::CommandPool commandPool = vk::raii::CommandPool( device, { {}, graphicsAndPresentQueueFamilyIndex.first } ); vk::raii::CommandBuffer commandBuffer = vk::raii::su::makeCommandBuffer( device, commandPool ); vk::raii::Queue graphicsQueue( device, graphicsAndPresentQueueFamilyIndex.first, 0 ); @@ -104,14 +104,14 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::BufferData texelBufferData( physicalDevice, device, sizeof( texels ), vk::BufferUsageFlagBits::eUniformTexelBuffer ); texelBufferData.upload( texels ); - vk::BufferViewCreateInfo bufferViewCreateInfo( {}, *texelBufferData.buffer, texelFormat, 0, sizeof( texels ) ); + vk::BufferViewCreateInfo bufferViewCreateInfo( {}, texelBufferData.buffer, texelFormat, 0, sizeof( texels ) ); vk::raii::BufferView texelBufferView( device, bufferViewCreateInfo ); vk::raii::DescriptorSetLayout descriptorSetLayout = vk::raii::su::makeDescriptorSetLayout( device, { { vk::DescriptorType::eUniformTexelBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); vk::raii::PipelineLayout pipelineLayout( device, { {}, *descriptorSetLayout } ); - vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surfaceData.surface ) ).format; + vk::Format colorFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surfaceData.surface ) ).format; vk::raii::RenderPass renderPass = vk::raii::su::makeRenderPass( device, colorFormat, vk::Format::eUndefined ); glslang::InitializeProcess(); @@ -123,7 +123,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::su::makeFramebuffers( device, renderPass, swapChainData.imageViews, nullptr, surfaceData.extent ); vk::raii::DescriptorPool descriptorPool = vk::raii::su::makeDescriptorPool( device, { { vk::DescriptorType::eUniformTexelBuffer, 1 } } ); - vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { *descriptorPool, *descriptorSetLayout } ).front() ); + vk::raii::DescriptorSet descriptorSet = std::move( vk::raii::DescriptorSets( device, { descriptorPool, *descriptorSetLayout } ).front() ); vk::raii::su::updateDescriptorSets( device, descriptorSet, { { vk::DescriptorType::eUniformTexelBuffer, texelBufferData.buffer, VK_WHOLE_SIZE, &texelBufferView } }, {} ); @@ -138,7 +138,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::raii::Semaphore imageAcquiredSemaphore( device, vk::SemaphoreCreateInfo() ); vk::Result result; uint32_t imageIndex; - std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, *imageAcquiredSemaphore ); + std::tie( result, imageIndex ) = swapChainData.swapChain.acquireNextImage( vk::su::FenceTimeout, imageAcquiredSemaphore ); assert( result == vk::Result::eSuccess ); assert( imageIndex < swapChainData.images.size() ); @@ -146,11 +146,11 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::ClearValue clearValue; clearValue.color = vk::ClearColorValue( 0.2f, 0.2f, 0.2f, 0.2f ); - vk::RenderPassBeginInfo renderPassBeginInfo( *renderPass, *framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); + vk::RenderPassBeginInfo renderPassBeginInfo( renderPass, framebuffers[imageIndex], vk::Rect2D( vk::Offset2D( 0, 0 ), surfaceData.extent ), clearValue ); commandBuffer.beginRenderPass( renderPassBeginInfo, vk::SubpassContents::eInline ); - commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, *graphicsPipeline ); - commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, { *descriptorSet }, nullptr ); + commandBuffer.bindPipeline( vk::PipelineBindPoint::eGraphics, graphicsPipeline ); + commandBuffer.bindDescriptorSets( vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, { descriptorSet }, nullptr ); commandBuffer.setViewport( 0, vk::Viewport( 0.0f, 0.0f, static_cast( surfaceData.extent.width ), static_cast( surfaceData.extent.height ), 0.0f, 1.0f ) ); @@ -166,7 +166,7 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::SubmitInfo submitInfo( *imageAcquiredSemaphore, waitDestinationStageMask, *commandBuffer ); graphicsQueue.submit( submitInfo, *drawFence ); - while ( vk::Result::eTimeout == device.waitForFences( { *drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) + while ( vk::Result::eTimeout == device.waitForFences( { drawFence }, VK_TRUE, vk::su::FenceTimeout ) ) ; vk::PresentInfoKHR presentInfoKHR( nullptr, *swapChainData.swapChain, imageIndex ); diff --git a/RAII_Samples/utils/utils.hpp b/RAII_Samples/utils/utils.hpp index 22f390e..21f2f41 100644 --- a/RAII_Samples/utils/utils.hpp +++ b/RAII_Samples/utils/utils.hpp @@ -186,7 +186,7 @@ namespace vk #endif { deviceMemory = vk::raii::su::allocateDeviceMemory( device, physicalDevice.getMemoryProperties(), buffer.getMemoryRequirements(), propertyFlags ); - buffer.bindMemory( *deviceMemory, 0 ); + buffer.bindMemory( deviceMemory, 0 ); } BufferData( std::nullptr_t ) {} @@ -279,8 +279,8 @@ namespace vk initialLayout } ) { deviceMemory = vk::raii::su::allocateDeviceMemory( device, physicalDevice.getMemoryProperties(), image.getMemoryRequirements(), memoryProperties ); - image.bindMemory( *deviceMemory, 0 ); - imageView = vk::raii::ImageView( device, vk::ImageViewCreateInfo( {}, *image, vk::ImageViewType::e2D, format, {}, { aspectMask, 0, 1, 0, 1 } ) ); + image.bindMemory( deviceMemory, 0 ); + imageView = vk::raii::ImageView( device, vk::ImageViewCreateInfo( {}, image, vk::ImageViewType::e2D, format, {}, { aspectMask, 0, 1, 0, 1 } ) ); } ImageData( std::nullptr_t ) {} @@ -337,10 +337,10 @@ namespace vk uint32_t graphicsQueueFamilyIndex, uint32_t presentQueueFamilyIndex ) { - vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( *surface ) ); + vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surface ) ); colorFormat = surfaceFormat.format; - vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( *surface ); + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface ); vk::Extent2D swapchainExtent; if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) { @@ -361,9 +361,9 @@ namespace vk : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) ? vk::CompositeAlphaFlagBitsKHR::eInherit : vk::CompositeAlphaFlagBitsKHR::eOpaque; - vk::PresentModeKHR presentMode = vk::su::pickPresentMode( physicalDevice.getSurfacePresentModesKHR( *surface ) ); + vk::PresentModeKHR presentMode = vk::su::pickPresentMode( physicalDevice.getSurfacePresentModesKHR( surface ) ); vk::SwapchainCreateInfoKHR swapChainCreateInfo( {}, - *surface, + surface, vk::su::clamp( 3u, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount ), colorFormat, surfaceFormat.colorSpace, @@ -477,24 +477,23 @@ namespace vk if ( needsStaging ) { // Since we're going to blit to the texture image, set its layout to eTransferDstOptimal - vk::raii::su::setImageLayout( - commandBuffer, *imageData.image, imageData.format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); + vk::raii::su::setImageLayout( commandBuffer, imageData.image, imageData.format, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal ); vk::BufferImageCopy copyRegion( 0, extent.width, extent.height, vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, 0, 0, 1 ), vk::Offset3D( 0, 0, 0 ), vk::Extent3D( extent, 1 ) ); - commandBuffer.copyBufferToImage( *stagingBufferData.buffer, *imageData.image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); + commandBuffer.copyBufferToImage( stagingBufferData.buffer, imageData.image, vk::ImageLayout::eTransferDstOptimal, copyRegion ); // Set the layout for the texture image from eTransferDstOptimal to eShaderReadOnlyOptimal vk::raii::su::setImageLayout( - commandBuffer, *imageData.image, imageData.format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); + commandBuffer, imageData.image, imageData.format, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal ); } else { // If we can use the linear tiled image as a texture, just do it vk::raii::su::setImageLayout( - commandBuffer, *imageData.image, imageData.format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); + commandBuffer, imageData.image, imageData.format, vk::ImageLayout::ePreinitialized, vk::ImageLayout::eShaderReadOnlyOptimal ); } } @@ -513,7 +512,7 @@ namespace vk assert( queueFamilyProperties.size() < std::numeric_limits::max() ); uint32_t graphicsQueueFamilyIndex = vk::su::findGraphicsQueueFamilyIndex( queueFamilyProperties ); - if ( physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, *surface ) ) + if ( physicalDevice.getSurfaceSupportKHR( graphicsQueueFamilyIndex, surface ) ) { return std::make_pair( graphicsQueueFamilyIndex, graphicsQueueFamilyIndex ); // the first graphicsQueueFamilyIndex does also support presents @@ -524,7 +523,7 @@ namespace vk for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && - physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) { return std::make_pair( static_cast( i ), static_cast( i ) ); } @@ -534,7 +533,7 @@ namespace vk // family index that supports present for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) { - if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), surface ) ) { return std::make_pair( graphicsQueueFamilyIndex, static_cast( i ) ); } @@ -545,7 +544,7 @@ namespace vk vk::raii::CommandBuffer makeCommandBuffer( vk::raii::Device const & device, vk::raii::CommandPool const & commandPool ) { - vk::CommandBufferAllocateInfo commandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::ePrimary, 1 ); + vk::CommandBufferAllocateInfo commandBufferAllocateInfo( commandPool, vk::CommandBufferLevel::ePrimary, 1 ); return std::move( vk::raii::CommandBuffers( device, commandBufferAllocateInfo ).front() ); } @@ -607,15 +606,15 @@ namespace vk vk::Extent2D const & extent ) { vk::ImageView attachments[2]; - attachments[1] = pDepthImageView ? **pDepthImageView : vk::ImageView(); + attachments[1] = pDepthImageView ? *pDepthImageView : vk::ImageView(); vk::FramebufferCreateInfo framebufferCreateInfo( - vk::FramebufferCreateFlags(), *renderPass, pDepthImageView ? 2 : 1, attachments, extent.width, extent.height, 1 ); + vk::FramebufferCreateFlags(), renderPass, pDepthImageView ? 2 : 1, attachments, extent.width, extent.height, 1 ); std::vector framebuffers; framebuffers.reserve( imageViews.size() ); for ( auto const & imageView : imageViews ) { - attachments[0] = *imageView; + attachments[0] = imageView; framebuffers.push_back( vk::raii::Framebuffer( device, framebufferCreateInfo ) ); } @@ -636,8 +635,8 @@ namespace vk vk::raii::RenderPass const & renderPass ) { std::array pipelineShaderStageCreateInfos = { - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, *vertexShaderModule, "main", vertexShaderSpecializationInfo ), - vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule, "main", fragmentShaderSpecializationInfo ) + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main", vertexShaderSpecializationInfo ), + vk::PipelineShaderStageCreateInfo( {}, vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main", fragmentShaderSpecializationInfo ) }; std::vector vertexInputAttributeDescriptions; @@ -705,8 +704,8 @@ namespace vk &pipelineDepthStencilStateCreateInfo, &pipelineColorBlendStateCreateInfo, &pipelineDynamicStateCreateInfo, - *pipelineLayout, - *renderPass ); + pipelineLayout, + renderPass ); return vk::raii::Pipeline( device, pipelineCache, graphicsPipelineCreateInfo ); } @@ -814,8 +813,8 @@ namespace vk void submitAndWait( vk::raii::Device const & device, vk::raii::Queue const & queue, vk::raii::CommandBuffer const & commandBuffer ) { vk::raii::Fence fence( device, vk::FenceCreateInfo() ); - queue.submit( vk::SubmitInfo( nullptr, nullptr, *commandBuffer ), *fence ); - while ( vk::Result::eTimeout == device.waitForFences( { *fence }, VK_TRUE, vk::su::FenceTimeout ) ) + queue.submit( vk::SubmitInfo( nullptr, nullptr, *commandBuffer ), fence ); + while ( vk::Result::eTimeout == device.waitForFences( { fence }, VK_TRUE, vk::su::FenceTimeout ) ) ; } @@ -834,18 +833,18 @@ namespace vk uint32_t dstBinding = bindingOffset; for ( auto const & bd : bufferData ) { - bufferInfos.emplace_back( *std::get<1>( bd ), 0, std::get<2>( bd ) ); + bufferInfos.emplace_back( std::get<1>( bd ), 0, std::get<2>( bd ) ); vk::BufferView bufferView; if ( std::get<3>( bd ) ) { - bufferView = **std::get<3>( bd ); + bufferView = *std::get<3>( bd ); } writeDescriptorSets.emplace_back( - *descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), std::get<3>( bd ) ? &bufferView : nullptr ); + descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), std::get<3>( bd ) ? &bufferView : nullptr ); } - vk::DescriptorImageInfo imageInfo( *textureData.sampler, *textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); - writeDescriptorSets.emplace_back( *descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, nullptr, nullptr ); + vk::DescriptorImageInfo imageInfo( textureData.sampler, textureData.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + writeDescriptorSets.emplace_back( descriptorSet, dstBinding, 0, vk::DescriptorType::eCombinedImageSampler, imageInfo, nullptr, nullptr ); device.updateDescriptorSets( writeDescriptorSets, nullptr ); } @@ -865,14 +864,14 @@ namespace vk uint32_t dstBinding = bindingOffset; for ( auto const & bd : bufferData ) { - bufferInfos.emplace_back( *std::get<1>( bd ), 0, std::get<2>( bd ) ); + bufferInfos.emplace_back( std::get<1>( bd ), 0, std::get<2>( bd ) ); vk::BufferView bufferView; if ( std::get<3>( bd ) ) { - bufferView = **std::get<3>( bd ); + bufferView = *std::get<3>( bd ); } writeDescriptorSets.emplace_back( - *descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), std::get<3>( bd ) ? &bufferView : nullptr ); + descriptorSet, dstBinding++, 0, 1, std::get<0>( bd ), nullptr, &bufferInfos.back(), std::get<3>( bd ) ? &bufferView : nullptr ); } std::vector imageInfos; @@ -881,9 +880,9 @@ namespace vk imageInfos.reserve( textureData.size() ); for ( auto const & thd : textureData ) { - imageInfos.emplace_back( *thd.sampler, *thd.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); + imageInfos.emplace_back( thd.sampler, thd.imageData.imageView, vk::ImageLayout::eShaderReadOnlyOptimal ); } - writeDescriptorSets.emplace_back( *descriptorSet, + writeDescriptorSets.emplace_back( descriptorSet, dstBinding, 0, vk::su::checked_cast( imageInfos.size() ), diff --git a/README.md b/README.md index 8d34763..4449a58 100644 --- a/README.md +++ b/README.md @@ -934,6 +934,10 @@ With this define, you can disable these declarations, but you will have to decla If both, VULKAN_HPP_NO_EXCEPTIONS and VULKAN_HPP_EXPECTED are defined, the vk::raii-classes don't throw exceptions. That is, the actual constructors are not available, but the creation-functions must be used. For more details have a look at the vk_raii_ProgrammingGuide.md. +#### VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST + +Even though the ```vk::UniqueHandles``` and the ```vk::SharedHandles``` are semantically close to pointers, an implicit cast operator to the underlying ```vk::Handle``` might be handy. You can add that implicit cast operator by defining ```VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST```. + #### VULKAN_HPP_STORAGE_API With this define you can specify whether the ```DispatchLoaderDynamic``` is imported or exported (see ```VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE```). If ```VULKAN_HPP_STORAGE_API``` is not defined externally, and ```VULKAN_HPP_STORAGE_SHARED``` is defined, depending on the ```VULKAN_HPP_STORAGE_SHARED_EXPORT``` being defined, ```VULKAN_HPP_STORAGE_API``` is either set to ```__declspec( dllexport )``` (for MSVC) / ```__attribute__( ( visibility( "default" ) ) )``` (for gcc or clang) or ```__declspec( dllimport )``` (for MSVC), respectively. For other compilers, you might specify the corresponding storage by defining ```VULKAN_HPP_STORAGE_API``` on your own. diff --git a/VulkanHppGenerator.cpp b/VulkanHppGenerator.cpp index 1af2be3..735a487 100644 --- a/VulkanHppGenerator.cpp +++ b/VulkanHppGenerator.cpp @@ -8404,6 +8404,11 @@ ${moveAssignmentInstructions} return m_${handleName}; } + operator VULKAN_HPP_NAMESPACE::${handleType}() const VULKAN_HPP_NOEXCEPT + { + return m_${handleName}; + } + void clear() VULKAN_HPP_NOEXCEPT { ${clearMembers} @@ -9016,7 +9021,7 @@ std::string { if ( destructorParam.type.type == "Vk" + parentType ) { - initializationList += "m_" + parentName + "( *" + parentName + " ), "; + initializationList += "m_" + parentName + "( " + parentName + " ), "; } else if ( destructorParam.type.type == handle.first ) { diff --git a/XMLHelper.hpp b/XMLHelper.hpp index 4c2a8b3..9861864 100644 --- a/XMLHelper.hpp +++ b/XMLHelper.hpp @@ -274,7 +274,7 @@ inline std::vector getChildElements( ElementContai return childElements; } -inline bool isHexNumber(std::string const& name) +inline bool isHexNumber( std::string const & name ) { return name.starts_with( "0x" ) && ( name.find_first_not_of( "0123456789ABCDEF", 2 ) == std::string::npos ); } diff --git a/samples/SharedHandles/SharedHandles.cpp b/samples/SharedHandles/SharedHandles.cpp index 62fefee..6af090b 100644 --- a/samples/SharedHandles/SharedHandles.cpp +++ b/samples/SharedHandles/SharedHandles.cpp @@ -15,6 +15,8 @@ // VulkanHpp Samples : SharedHandles // Draw a textured cube using shared handles for resource management and correct order of destruction +#define VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST + #include "../utils/geometries.hpp" #include "../utils/math.hpp" #include "../utils/shaders.hpp" @@ -34,9 +36,16 @@ std::vector makeSharedFramebuffers( const vk::SharedDevic const vk::SharedImageView & depthImageView, const vk::Extent2D & extent ) { + // show the simplified usage with VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST defined +#if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + auto renderPassHandle = renderPass.get(); // lvalue reference is required for the capture below + std::vector sharedFramebuffers; + std::vector framebuffers = vk::su::createFramebuffers( device, renderPassHandle, imageViews, depthImageView, extent ); +#else auto renderPassHandle = renderPass.get(); // lvalue reference is required for the capture below std::vector sharedFramebuffers; std::vector framebuffers = vk::su::createFramebuffers( device.get(), renderPassHandle, imageViews, depthImageView.get(), extent ); +#endif sharedFramebuffers.reserve( framebuffers.size() ); for ( auto & framebuffer : framebuffers ) { @@ -74,7 +83,7 @@ public: void createDeviceAndSwapChain( const vk::su::WindowData & window ) { VkSurfaceKHR surface; - VkResult err = glfwCreateWindowSurface( static_cast( instance.get() ), window.handle, nullptr, &surface ); + VkResult err = glfwCreateWindowSurface( static_cast( instance.get() ), window.handle, nullptr, &surface ); if ( err != VK_SUCCESS ) throw std::runtime_error( "Failed to create window!" ); vk::SharedSurfaceKHR sharedSurface{ surface, instance }; @@ -115,7 +124,6 @@ public: device }; graphicsQueue = vk::SharedQueue{ device->getQueue( graphicsAndPresentQueueFamilyIndex.first, 0 ), device }; - presentQueue = vk::SharedQueue{ device->getQueue( graphicsAndPresentQueueFamilyIndex.second, 0 ), device }; depthFormat = vk::Format::eD16Unorm; diff --git a/samples/utils/utils.cpp b/samples/utils/utils.cpp index 7b4e49d..64eb1bc 100644 --- a/samples/utils/utils.cpp +++ b/samples/utils/utils.cpp @@ -354,13 +354,13 @@ namespace vk void * /*pUserData*/ ) { #if !defined( NDEBUG ) - if ( static_cast(pCallbackData->messageIdNumber) == 0x822806fa ) + if ( static_cast( pCallbackData->messageIdNumber ) == 0x822806fa ) { // Validation Warning: vkCreateInstance(): to enable extension VK_EXT_debug_utils, but this extension is intended to support use by applications when // debugging and it is strongly recommended that it be otherwise avoided. return vk::False; } - else if ( static_cast(pCallbackData->messageIdNumber) == 0xe8d1a9fe ) + else if ( static_cast( pCallbackData->messageIdNumber ) == 0xe8d1a9fe ) { // Validation Performance Warning: Using debug builds of the validation layers *will* adversely affect performance. return vk::False; diff --git a/snippets/SharedHandle.hpp b/snippets/SharedHandle.hpp index 871831c..336d079 100644 --- a/snippets/SharedHandle.hpp +++ b/snippets/SharedHandle.hpp @@ -69,7 +69,10 @@ class ReferenceCounter { public: template - ReferenceCounter( Args &&... control_args ) : m_header( std::forward( control_args )... ){} + ReferenceCounter( Args &&... control_args ) : m_header( std::forward( control_args )... ) + { + } + ReferenceCounter( const ReferenceCounter & ) = delete; ReferenceCounter & operator=( const ReferenceCounter & ) = delete; @@ -163,6 +166,13 @@ public: return bool( m_handle ); } +#if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator HandleType() const VULKAN_HPP_NOEXCEPT + { + return m_handle; + } +#endif + const HandleType * operator->() const VULKAN_HPP_NOEXCEPT { return &m_handle; diff --git a/snippets/UniqueHandle.hpp b/snippets/UniqueHandle.hpp index c268ce5..6876521 100644 --- a/snippets/UniqueHandle.hpp +++ b/snippets/UniqueHandle.hpp @@ -1,127 +1,131 @@ #if !defined( VULKAN_HPP_NO_SMART_HANDLE ) - template - class UniqueHandleTraits; +template +class UniqueHandleTraits; - template - class UniqueHandle : public UniqueHandleTraits::deleter +template +class UniqueHandle : public UniqueHandleTraits::deleter +{ +private: + using Deleter = typename UniqueHandleTraits::deleter; + +public: + using element_type = Type; + + UniqueHandle() : Deleter(), m_value() {} + + explicit UniqueHandle( Type const & value, Deleter const & deleter = Deleter() ) VULKAN_HPP_NOEXCEPT + : Deleter( deleter ) + , m_value( value ) { - private: - using Deleter = typename UniqueHandleTraits::deleter; + } - public: - using element_type = Type; + UniqueHandle( UniqueHandle const & ) = delete; - UniqueHandle() - : Deleter() - , m_value() - {} + UniqueHandle( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT + : Deleter( std::move( static_cast( other ) ) ) + , m_value( other.release() ) + { + } - explicit UniqueHandle( Type const & value, Deleter const & deleter = Deleter() ) VULKAN_HPP_NOEXCEPT - : Deleter( deleter ) - , m_value( value ) - {} + ~UniqueHandle() VULKAN_HPP_NOEXCEPT + { + if ( m_value ) + { + this->destroy( m_value ); + } + } - UniqueHandle( UniqueHandle const & ) = delete; + UniqueHandle & operator=( UniqueHandle const & ) = delete; - UniqueHandle( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT - : Deleter( std::move( static_cast( other ) ) ) - , m_value( other.release() ) - {} + UniqueHandle & operator=( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT + { + reset( other.release() ); + *static_cast( this ) = std::move( static_cast( other ) ); + return *this; + } - ~UniqueHandle() VULKAN_HPP_NOEXCEPT + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_value.operator bool(); + } + +# if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator Type() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } +# endif + + Type const * operator->() const VULKAN_HPP_NOEXCEPT + { + return &m_value; + } + + Type * operator->() VULKAN_HPP_NOEXCEPT + { + return &m_value; + } + + Type const & operator*() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + Type & operator*() VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + const Type & get() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + Type & get() VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + void reset( Type const & value = Type() ) VULKAN_HPP_NOEXCEPT + { + if ( m_value != value ) { if ( m_value ) { this->destroy( m_value ); } + m_value = value; } - - UniqueHandle & operator=( UniqueHandle const & ) = delete; - - UniqueHandle & operator=( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT - { - reset( other.release() ); - *static_cast( this ) = std::move( static_cast( other ) ); - return *this; - } - - explicit operator bool() const VULKAN_HPP_NOEXCEPT - { - return m_value.operator bool(); - } - - Type const * operator->() const VULKAN_HPP_NOEXCEPT - { - return &m_value; - } - - Type * operator->() VULKAN_HPP_NOEXCEPT - { - return &m_value; - } - - Type const & operator*() const VULKAN_HPP_NOEXCEPT - { - return m_value; - } - - Type & operator*() VULKAN_HPP_NOEXCEPT - { - return m_value; - } - - const Type & get() const VULKAN_HPP_NOEXCEPT - { - return m_value; - } - - Type & get() VULKAN_HPP_NOEXCEPT - { - return m_value; - } - - void reset( Type const & value = Type() ) VULKAN_HPP_NOEXCEPT - { - if ( m_value != value ) - { - if ( m_value ) - { - this->destroy( m_value ); - } - m_value = value; - } - } - - Type release() VULKAN_HPP_NOEXCEPT - { - Type value = m_value; - m_value = nullptr; - return value; - } - - void swap( UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT - { - std::swap( m_value, rhs.m_value ); - std::swap( static_cast( *this ), static_cast( rhs ) ); - } - - private: - Type m_value; - }; - - template - VULKAN_HPP_INLINE std::vector - uniqueToRaw( std::vector const & handles ) - { - std::vector newBuffer( handles.size() ); - std::transform( handles.begin(), handles.end(), newBuffer.begin(), []( UniqueType const & handle ) { return handle.get(); } ); - return newBuffer; } - template - VULKAN_HPP_INLINE void swap( UniqueHandle & lhs, - UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT + Type release() VULKAN_HPP_NOEXCEPT { - lhs.swap( rhs ); + Type value = m_value; + m_value = nullptr; + return value; } + + void swap( UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT + { + std::swap( m_value, rhs.m_value ); + std::swap( static_cast( *this ), static_cast( rhs ) ); + } + +private: + Type m_value; +}; + +template +VULKAN_HPP_INLINE std::vector uniqueToRaw( std::vector const & handles ) +{ + std::vector newBuffer( handles.size() ); + std::transform( handles.begin(), handles.end(), newBuffer.begin(), []( UniqueType const & handle ) { return handle.get(); } ); + return newBuffer; +} + +template +VULKAN_HPP_INLINE void swap( UniqueHandle & lhs, UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT +{ + lhs.swap( rhs ); +} #endif \ No newline at end of file diff --git a/tests/NoExceptionsRAII/NoExceptionsRAII.cpp b/tests/NoExceptionsRAII/NoExceptionsRAII.cpp index 528f948..8f1ac0e 100644 --- a/tests/NoExceptionsRAII/NoExceptionsRAII.cpp +++ b/tests/NoExceptionsRAII/NoExceptionsRAII.cpp @@ -23,7 +23,7 @@ // only if VULKAN_HPP_RAII_NO_EXCEPTIONS really is defined, this test is meaningfull and needs to compile and run. #if defined( VULKAN_HPP_RAII_NO_EXCEPTIONS ) -#include +# include static char const * AppName = "NoExceptions"; static char const * EngineName = "Vulkan.hpp"; @@ -56,16 +56,15 @@ int main( int /*argc*/, char ** /*argv*/ ) // create a Device float queuePriority = 0.0f; vk::DeviceQueueCreateInfo deviceQueueCreateInfo( vk::DeviceQueueCreateFlags(), static_cast( graphicsQueueFamilyIndex ), 1, &queuePriority ); - auto device = physicalDevice.createDevice( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); + auto device = physicalDevice.createDevice( vk::DeviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ) ); assert( device.has_value() ); // create a CommandPool to allocate a CommandBuffer from - auto commandPool = - device->createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), deviceQueueCreateInfo.queueFamilyIndex ) ); + auto commandPool = device->createCommandPool( vk::CommandPoolCreateInfo( vk::CommandPoolCreateFlags(), deviceQueueCreateInfo.queueFamilyIndex ) ); assert( commandPool.has_value() ); // allocate a CommandBuffer from the CommandPool - auto commandBuffers = device->allocateCommandBuffers( vk::CommandBufferAllocateInfo( **commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ); + auto commandBuffers = device->allocateCommandBuffers( vk::CommandBufferAllocateInfo( *commandPool, vk::CommandBufferLevel::ePrimary, 1 ) ); assert( commandBuffers.has_value() ); auto commandBuffer = std::move( commandBuffers->front() ); diff --git a/tests/UniqueHandle/UniqueHandle.cpp b/tests/UniqueHandle/UniqueHandle.cpp index 381086a..2655256 100644 --- a/tests/UniqueHandle/UniqueHandle.cpp +++ b/tests/UniqueHandle/UniqueHandle.cpp @@ -14,6 +14,8 @@ // // VulkanHpp Test: Compile test for Unique handles +#define VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST + #include "../../samples/utils/geometries.hpp" #include "../../samples/utils/shaders.hpp" #include "../../samples/utils/utils.hpp" @@ -42,7 +44,7 @@ public: }; }; -vk::UniqueDescriptorSetLayout createDescriptorSetLayoutUnique( vk::Device const & device, +vk::UniqueDescriptorSetLayout createDescriptorSetLayoutUnique( vk::UniqueDevice const & device, std::vector> const & bindingData, vk::DescriptorSetLayoutCreateFlags flags = {} ) { @@ -52,7 +54,7 @@ vk::UniqueDescriptorSetLayout createDescriptorSetLayoutUnique( vk::Device const bindings[i] = vk::DescriptorSetLayoutBinding( vk::su::checked_cast( i ), std::get<0>( bindingData[i] ), std::get<1>( bindingData[i] ), std::get<2>( bindingData[i] ) ); } - return device.createDescriptorSetLayoutUnique( vk::DescriptorSetLayoutCreateInfo( flags, bindings ) ); + return device->createDescriptorSetLayoutUnique( vk::DescriptorSetLayoutCreateInfo( flags, bindings ) ); } vk::UniqueInstance createInstanceUnique( std::string const & appName, @@ -90,11 +92,36 @@ vk::UniqueInstance createInstanceUnique( std::string const & appNam return instance; } -vk::UniqueRenderPass createRenderPassUnique( vk::Device const & device, - vk::Format colorFormat, - vk::Format depthFormat, - vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, - vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ) +vk::UniqueDevice createDeviceUnique( vk::PhysicalDevice const & physicalDevice, + uint32_t queueFamilyIndex, + std::vector const & extensions, + vk::PhysicalDeviceFeatures const * physicalDeviceFeatures = nullptr, + void const * pNext = nullptr ) +{ + std::vector enabledExtensions; + enabledExtensions.reserve( extensions.size() ); + for ( auto const & ext : extensions ) + { + enabledExtensions.push_back( ext.data() ); + } + + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo( {}, queueFamilyIndex, 1, &queuePriority ); + vk::DeviceCreateInfo deviceCreateInfo( {}, deviceQueueCreateInfo, {}, enabledExtensions, physicalDeviceFeatures, pNext ); + + vk::UniqueDevice device = physicalDevice.createDeviceUnique( deviceCreateInfo ); +#if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + // initialize function pointers for instance + VULKAN_HPP_DEFAULT_DISPATCHER.init( *device ); +#endif + return device; +} + +vk::UniqueRenderPass createRenderPassUnique( vk::UniqueDevice const & device, + vk::Format colorFormat, + vk::Format depthFormat, + vk::AttachmentLoadOp loadOp = vk::AttachmentLoadOp::eClear, + vk::ImageLayout colorFinalLayout = vk::ImageLayout::ePresentSrcKHR ) { std::vector attachmentDescriptions; assert( colorFormat != vk::Format::eUndefined ); @@ -123,14 +150,14 @@ vk::UniqueRenderPass createRenderPassUnique( vk::Device const & device, vk::AttachmentReference depthAttachment( 1, vk::ImageLayout::eDepthStencilAttachmentOptimal ); vk::SubpassDescription subpassDescription( vk::SubpassDescriptionFlags(), vk::PipelineBindPoint::eGraphics, - {}, + {}, colorAttachment, - {}, + {}, ( depthFormat != vk::Format::eUndefined ) ? &depthAttachment : nullptr ); - return device.createRenderPassUnique( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachmentDescriptions, subpassDescription ) ); + return device->createRenderPassUnique( vk::RenderPassCreateInfo( vk::RenderPassCreateFlags(), attachmentDescriptions, subpassDescription ) ); } -vk::UniqueShaderModule createShaderModuleUnique( vk::Device const & device, vk::ShaderStageFlagBits shaderStage, std::string const & shaderText ) +vk::UniqueShaderModule createShaderModuleUnique( vk::UniqueDevice const & device, vk::ShaderStageFlagBits shaderStage, std::string const & shaderText ) { std::vector shaderSPV; if ( !vk::su::GLSLtoSPV( shaderStage, shaderText, shaderSPV ) ) @@ -138,7 +165,49 @@ vk::UniqueShaderModule createShaderModuleUnique( vk::Device const & device, vk:: throw std::runtime_error( "Could not convert glsl shader to spir-v -> terminating" ); } - return device.createShaderModuleUnique( vk::ShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), shaderSPV ) ); + return device->createShaderModuleUnique( vk::ShaderModuleCreateInfo( vk::ShaderModuleCreateFlags(), shaderSPV ) ); +} + +vk::UniqueSwapchainKHR createSwapchainKHRUnique( vk::PhysicalDevice physicalDevice, vk::UniqueDevice const & device, vk::SurfaceKHR surface ) +{ + vk::SurfaceCapabilitiesKHR surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR( surface ); + vk::SurfaceFormatKHR surfaceFormat = vk::su::pickSurfaceFormat( physicalDevice.getSurfaceFormatsKHR( surface ) ); + vk::Extent2D swapchainExtent; + if ( surfaceCapabilities.currentExtent.width == std::numeric_limits::max() ) + { + // If the surface size is undefined, the size is set to the size of the images requested. + swapchainExtent.width = vk::su::clamp( 64, surfaceCapabilities.minImageExtent.width, surfaceCapabilities.maxImageExtent.width ); + swapchainExtent.height = vk::su::clamp( 64, surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.height ); + } + else + { + // If the surface size is defined, the swap chain size must match + swapchainExtent = surfaceCapabilities.currentExtent; + } + vk::SurfaceTransformFlagBitsKHR preTransform = ( surfaceCapabilities.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity ) + ? vk::SurfaceTransformFlagBitsKHR::eIdentity + : surfaceCapabilities.currentTransform; + vk::CompositeAlphaFlagBitsKHR compositeAlpha = + ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePreMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePreMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::ePostMultiplied ) ? vk::CompositeAlphaFlagBitsKHR::ePostMultiplied + : ( surfaceCapabilities.supportedCompositeAlpha & vk::CompositeAlphaFlagBitsKHR::eInherit ) ? vk::CompositeAlphaFlagBitsKHR::eInherit + : vk::CompositeAlphaFlagBitsKHR::eOpaque; + vk::SwapchainCreateInfoKHR swapChainCreateInfo( {}, + surface, + vk::su::clamp( 3u, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount ), + surfaceFormat.format, + surfaceFormat.colorSpace, + swapchainExtent, + 1, + vk::ImageUsageFlagBits::eColorAttachment, + vk::SharingMode::eExclusive, + {}, + preTransform, + compositeAlpha, + vk::PresentModeKHR::eFifo, + true, + nullptr ); + return device->createSwapchainKHRUnique( swapChainCreateInfo ); } int main( int /*argc*/, char ** /*argv*/ ) @@ -161,25 +230,30 @@ int main( int /*argc*/, char ** /*argv*/ ) assert( graphicsQueueFamilyIndex < queueFamilyProperties.size() ); // create a Device - float queuePriority = 0.0f; - vk::DeviceQueueCreateInfo deviceQueueCreateInfo( vk::DeviceQueueCreateFlags(), static_cast( graphicsQueueFamilyIndex ), 1, &queuePriority ); - vk::DeviceCreateInfo deviceCreateInfo( vk::DeviceCreateFlags(), deviceQueueCreateInfo ); - vk::UniqueDevice device = physicalDevices[0].createDeviceUnique( deviceCreateInfo ); + vk::UniqueDevice device = createDeviceUnique( physicalDevices[0], static_cast( graphicsQueueFamilyIndex ), vk::su::getDeviceExtensions() ); // create a PipelineCache vk::UniquePipelineCache pipelineCache = device->createPipelineCacheUnique( vk::PipelineCacheCreateInfo() ); // get some vk::ShaderModules glslang::InitializeProcess(); - vk::UniqueShaderModule vertexShaderModule = createShaderModuleUnique( *device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); - vk::UniqueShaderModule fragmentShaderModule = createShaderModuleUnique( *device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); + vk::UniqueShaderModule vertexShaderModule = createShaderModuleUnique( device, vk::ShaderStageFlagBits::eVertex, vertexShaderText_PC_C ); + vk::UniqueShaderModule fragmentShaderModule = createShaderModuleUnique( device, vk::ShaderStageFlagBits::eFragment, fragmentShaderText_C_C ); glslang::FinalizeProcess(); // initialize an array of vk::PipelineShaderStageCreateInfos + // showing the simplified usage when VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST is defined +#if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + std::array pipelineShaderStageCreateInfos = { + vk::PipelineShaderStageCreateInfo( vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, vertexShaderModule, "main" ), + vk::PipelineShaderStageCreateInfo( vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, fragmentShaderModule, "main" ) + }; +#else std::array pipelineShaderStageCreateInfos = { vk::PipelineShaderStageCreateInfo( vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eVertex, *vertexShaderModule, "main" ), vk::PipelineShaderStageCreateInfo( vk::PipelineShaderStageCreateFlags(), vk::ShaderStageFlagBits::eFragment, *fragmentShaderModule, "main" ) }; +#endif vk::VertexInputBindingDescription vertexInputBindingDescription( 0, sizeof( coloredCubeData[0] ) ); std::array vertexInputAttributeDescriptions = { @@ -247,14 +321,14 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::PipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo( vk::PipelineDynamicStateCreateFlags(), dynamicStates ); vk::UniqueDescriptorSetLayout descriptorSetLayout = - createDescriptorSetLayoutUnique( *device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); + createDescriptorSetLayoutUnique( device, { { vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex } } ); vk::UniquePipelineLayout pipelineLayout = device->createPipelineLayoutUnique( vk::PipelineLayoutCreateInfo( vk::PipelineLayoutCreateFlags(), *descriptorSetLayout ) ); vk::su::SurfaceData surfaceData( *instance, AppName, vk::Extent2D( 500, 500 ) ); vk::UniqueRenderPass renderPass = createRenderPassUnique( - *device, vk::su::pickSurfaceFormat( physicalDevices[0].getSurfaceFormatsKHR( surfaceData.surface ) ).format, vk::Format::eD16Unorm ); + device, vk::su::pickSurfaceFormat( physicalDevices[0].getSurfaceFormatsKHR( surfaceData.surface ) ).format, vk::Format::eD16Unorm ); // initialize the vk::GraphicsPipelineCreateInfo vk::GraphicsPipelineCreateInfo graphicsPipelineCreateInfo( vk::PipelineCreateFlags(), // flags @@ -289,11 +363,15 @@ int main( int /*argc*/, char ** /*argv*/ ) vk::UniquePipeline graphicsPipeline3 = std::move( device->createGraphicsPipelinesUnique( *pipelineCache, graphicsPipelineCreateInfo ).value[0] ); - std::vector descriptorSets = device->allocateDescriptorSetsUnique( {} ); + vk::DescriptorPoolSize poolSize( vk::DescriptorType::eUniformBuffer, 1 ); + vk::UniqueDescriptorPool descriptorPool = device->createDescriptorPoolUnique( { vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, 1, poolSize } ); - vk::UniqueSwapchainKHR swapchain = device->createSharedSwapchainKHRUnique( {} ); + std::vector descriptorSets = device->allocateDescriptorSetsUnique( { *descriptorPool, *descriptorSetLayout } ); - // destroy the non-Unique surface used here + vk::UniqueSwapchainKHR swapchain = createSwapchainKHRUnique( physicalDevices[0], device, surfaceData.surface ); + + // destroy the non-Unique surface used here, but swapchain needs to be destroyed first + swapchain.reset(); instance->destroySurfaceKHR( surfaceData.surface ); } catch ( vk::SystemError & err ) diff --git a/vulkan/vulkan.hpp b/vulkan/vulkan.hpp index e0bc003..130c79b 100644 --- a/vulkan/vulkan.hpp +++ b/vulkan/vulkan.hpp @@ -919,6 +919,13 @@ namespace VULKAN_HPP_NAMESPACE return m_value.operator bool(); } +# if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator Type() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } +# endif + Type const * operator->() const VULKAN_HPP_NOEXCEPT { return &m_value; diff --git a/vulkan/vulkan_raii.hpp b/vulkan/vulkan_raii.hpp index 49af076..2cb68b8 100644 --- a/vulkan/vulkan_raii.hpp +++ b/vulkan/vulkan_raii.hpp @@ -2833,6 +2833,11 @@ namespace VULKAN_HPP_NAMESPACE return m_instance; } + operator VULKAN_HPP_NAMESPACE::Instance() const VULKAN_HPP_NOEXCEPT + { + return m_instance; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_instance ) @@ -3100,6 +3105,11 @@ namespace VULKAN_HPP_NAMESPACE return m_physicalDevice; } + operator VULKAN_HPP_NAMESPACE::PhysicalDevice() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice; + } + void clear() VULKAN_HPP_NOEXCEPT { m_physicalDevice = nullptr; @@ -3561,6 +3571,11 @@ namespace VULKAN_HPP_NAMESPACE return m_device; } + operator VULKAN_HPP_NAMESPACE::Device() const VULKAN_HPP_NOEXCEPT + { + return m_device; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_device ) @@ -4584,7 +4599,7 @@ namespace VULKAN_HPP_NAMESPACE AccelerationStructureKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkAccelerationStructureKHR accelerationStructure, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_accelerationStructure( accelerationStructure ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4628,6 +4643,11 @@ namespace VULKAN_HPP_NAMESPACE return m_accelerationStructure; } + operator VULKAN_HPP_NAMESPACE::AccelerationStructureKHR() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructure; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_accelerationStructure ) @@ -4699,7 +4719,7 @@ namespace VULKAN_HPP_NAMESPACE AccelerationStructureNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkAccelerationStructureNV accelerationStructure, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_accelerationStructure( accelerationStructure ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4743,6 +4763,11 @@ namespace VULKAN_HPP_NAMESPACE return m_accelerationStructure; } + operator VULKAN_HPP_NAMESPACE::AccelerationStructureNV() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructure; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_accelerationStructure ) @@ -4822,7 +4847,7 @@ namespace VULKAN_HPP_NAMESPACE Buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkBuffer buffer, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_buffer( buffer ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4866,6 +4891,11 @@ namespace VULKAN_HPP_NAMESPACE return m_buffer; } + operator VULKAN_HPP_NAMESPACE::Buffer() const VULKAN_HPP_NOEXCEPT + { + return m_buffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_buffer ) @@ -4943,7 +4973,7 @@ namespace VULKAN_HPP_NAMESPACE BufferCollectionFUCHSIA( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkBufferCollectionFUCHSIA collection, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_collection( collection ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4987,6 +5017,11 @@ namespace VULKAN_HPP_NAMESPACE return m_collection; } + operator VULKAN_HPP_NAMESPACE::BufferCollectionFUCHSIA() const VULKAN_HPP_NOEXCEPT + { + return m_collection; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_collection ) @@ -5067,7 +5102,7 @@ namespace VULKAN_HPP_NAMESPACE BufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkBufferView bufferView, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_bufferView( bufferView ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5111,6 +5146,11 @@ namespace VULKAN_HPP_NAMESPACE return m_bufferView; } + operator VULKAN_HPP_NAMESPACE::BufferView() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_bufferView ) @@ -5181,7 +5221,7 @@ namespace VULKAN_HPP_NAMESPACE CommandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCommandPool commandPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_commandPool( commandPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5225,6 +5265,11 @@ namespace VULKAN_HPP_NAMESPACE return m_commandPool; } + operator VULKAN_HPP_NAMESPACE::CommandPool() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_commandPool ) @@ -5296,7 +5341,7 @@ namespace VULKAN_HPP_NAMESPACE public: CommandBuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCommandBuffer commandBuffer, VkCommandPool commandPool ) - : m_device( *device ), m_commandPool( commandPool ), m_commandBuffer( commandBuffer ), m_dispatcher( device.getDispatcher() ) + : m_device( device ), m_commandPool( commandPool ), m_commandBuffer( commandBuffer ), m_dispatcher( device.getDispatcher() ) { } @@ -5337,6 +5382,11 @@ namespace VULKAN_HPP_NAMESPACE return m_commandBuffer; } + operator VULKAN_HPP_NAMESPACE::CommandBuffer() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_commandBuffer ) @@ -6362,7 +6412,7 @@ namespace VULKAN_HPP_NAMESPACE CuFunctionNVX( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCuFunctionNVX function, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_function( function ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -6406,6 +6456,11 @@ namespace VULKAN_HPP_NAMESPACE return m_function; } + operator VULKAN_HPP_NAMESPACE::CuFunctionNVX() const VULKAN_HPP_NOEXCEPT + { + return m_function; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_function ) @@ -6476,7 +6531,7 @@ namespace VULKAN_HPP_NAMESPACE CuModuleNVX( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCuModuleNVX module, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_module( module ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -6520,6 +6575,11 @@ namespace VULKAN_HPP_NAMESPACE return m_module; } + operator VULKAN_HPP_NAMESPACE::CuModuleNVX() const VULKAN_HPP_NOEXCEPT + { + return m_module; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_module ) @@ -6591,7 +6651,7 @@ namespace VULKAN_HPP_NAMESPACE CudaFunctionNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCudaFunctionNV function, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_function( function ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -6635,6 +6695,11 @@ namespace VULKAN_HPP_NAMESPACE return m_function; } + operator VULKAN_HPP_NAMESPACE::CudaFunctionNV() const VULKAN_HPP_NOEXCEPT + { + return m_function; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_function ) @@ -6707,7 +6772,7 @@ namespace VULKAN_HPP_NAMESPACE CudaModuleNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCudaModuleNV module, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_module( module ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -6751,6 +6816,11 @@ namespace VULKAN_HPP_NAMESPACE return m_module; } + operator VULKAN_HPP_NAMESPACE::CudaModuleNV() const VULKAN_HPP_NOEXCEPT + { + return m_module; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_module ) @@ -6826,7 +6896,7 @@ namespace VULKAN_HPP_NAMESPACE DebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, VkDebugReportCallbackEXT callback, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_instance( *instance ) + : m_instance( instance ) , m_callback( callback ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( instance.getDispatcher() ) @@ -6870,6 +6940,11 @@ namespace VULKAN_HPP_NAMESPACE return m_callback; } + operator VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT() const VULKAN_HPP_NOEXCEPT + { + return m_callback; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_callback ) @@ -6941,7 +7016,7 @@ namespace VULKAN_HPP_NAMESPACE DebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, VkDebugUtilsMessengerEXT messenger, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_instance( *instance ) + : m_instance( instance ) , m_messenger( messenger ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( instance.getDispatcher() ) @@ -6985,6 +7060,11 @@ namespace VULKAN_HPP_NAMESPACE return m_messenger; } + operator VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT() const VULKAN_HPP_NOEXCEPT + { + return m_messenger; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_messenger ) @@ -7055,7 +7135,7 @@ namespace VULKAN_HPP_NAMESPACE DeferredOperationKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDeferredOperationKHR operation, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_operation( operation ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -7099,6 +7179,11 @@ namespace VULKAN_HPP_NAMESPACE return m_operation; } + operator VULKAN_HPP_NAMESPACE::DeferredOperationKHR() const VULKAN_HPP_NOEXCEPT + { + return m_operation; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_operation ) @@ -7178,7 +7263,7 @@ namespace VULKAN_HPP_NAMESPACE DescriptorPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_descriptorPool( descriptorPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -7222,6 +7307,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorPool; } + operator VULKAN_HPP_NAMESPACE::DescriptorPool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorPool ) @@ -7286,7 +7376,7 @@ namespace VULKAN_HPP_NAMESPACE public: DescriptorSet( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorSet descriptorSet, VkDescriptorPool descriptorPool ) - : m_device( *device ), m_descriptorPool( descriptorPool ), m_descriptorSet( descriptorSet ), m_dispatcher( device.getDispatcher() ) + : m_device( device ), m_descriptorPool( descriptorPool ), m_descriptorSet( descriptorSet ), m_dispatcher( device.getDispatcher() ) { } @@ -7327,6 +7417,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorSet; } + operator VULKAN_HPP_NAMESPACE::DescriptorSet() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorSet ) @@ -7439,7 +7534,7 @@ namespace VULKAN_HPP_NAMESPACE DescriptorSetLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorSetLayout descriptorSetLayout, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_descriptorSetLayout( descriptorSetLayout ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -7483,6 +7578,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorSetLayout; } + operator VULKAN_HPP_NAMESPACE::DescriptorSetLayout() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorSetLayout ) @@ -7560,7 +7660,7 @@ namespace VULKAN_HPP_NAMESPACE DescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_descriptorUpdateTemplate( descriptorUpdateTemplate ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -7604,6 +7704,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorUpdateTemplate; } + operator VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorUpdateTemplate ) @@ -7675,7 +7780,7 @@ namespace VULKAN_HPP_NAMESPACE DeviceMemory( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDeviceMemory memory, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_memory( memory ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -7719,6 +7824,11 @@ namespace VULKAN_HPP_NAMESPACE return m_memory; } + operator VULKAN_HPP_NAMESPACE::DeviceMemory() const VULKAN_HPP_NOEXCEPT + { + return m_memory; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_memory ) @@ -7823,7 +7933,7 @@ namespace VULKAN_HPP_NAMESPACE # endif DisplayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, VkDisplayKHR display ) - : m_physicalDevice( *physicalDevice ), m_display( display ), m_dispatcher( physicalDevice.getDispatcher() ) + : m_physicalDevice( physicalDevice ), m_display( display ), m_dispatcher( physicalDevice.getDispatcher() ) { } @@ -7862,6 +7972,11 @@ namespace VULKAN_HPP_NAMESPACE return m_display; } + operator VULKAN_HPP_NAMESPACE::DisplayKHR() const VULKAN_HPP_NOEXCEPT + { + return m_display; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_display ) @@ -8014,6 +8129,11 @@ namespace VULKAN_HPP_NAMESPACE return m_displayModeKHR; } + operator VULKAN_HPP_NAMESPACE::DisplayModeKHR() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR; + } + void clear() VULKAN_HPP_NOEXCEPT { m_physicalDevice = nullptr; @@ -8074,7 +8194,7 @@ namespace VULKAN_HPP_NAMESPACE Event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkEvent event, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_event( event ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8118,6 +8238,11 @@ namespace VULKAN_HPP_NAMESPACE return m_event; } + operator VULKAN_HPP_NAMESPACE::Event() const VULKAN_HPP_NOEXCEPT + { + return m_event; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_event ) @@ -8215,7 +8340,7 @@ namespace VULKAN_HPP_NAMESPACE Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkFence fence, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_fence( fence ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8259,6 +8384,11 @@ namespace VULKAN_HPP_NAMESPACE return m_fence; } + operator VULKAN_HPP_NAMESPACE::Fence() const VULKAN_HPP_NOEXCEPT + { + return m_fence; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_fence ) @@ -8333,7 +8463,7 @@ namespace VULKAN_HPP_NAMESPACE Framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkFramebuffer framebuffer, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_framebuffer( framebuffer ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8377,6 +8507,11 @@ namespace VULKAN_HPP_NAMESPACE return m_framebuffer; } + operator VULKAN_HPP_NAMESPACE::Framebuffer() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_framebuffer ) @@ -8451,7 +8586,7 @@ namespace VULKAN_HPP_NAMESPACE Image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkImage image, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_image( image ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8495,6 +8630,11 @@ namespace VULKAN_HPP_NAMESPACE return m_image; } + operator VULKAN_HPP_NAMESPACE::Image() const VULKAN_HPP_NOEXCEPT + { + return m_image; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_image ) @@ -8598,7 +8738,7 @@ namespace VULKAN_HPP_NAMESPACE ImageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkImageView imageView, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_imageView( imageView ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8642,6 +8782,11 @@ namespace VULKAN_HPP_NAMESPACE return m_imageView; } + operator VULKAN_HPP_NAMESPACE::ImageView() const VULKAN_HPP_NOEXCEPT + { + return m_imageView; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_imageView ) @@ -8716,7 +8861,7 @@ namespace VULKAN_HPP_NAMESPACE IndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkIndirectCommandsLayoutNV indirectCommandsLayout, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_indirectCommandsLayout( indirectCommandsLayout ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8760,6 +8905,11 @@ namespace VULKAN_HPP_NAMESPACE return m_indirectCommandsLayout; } + operator VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV() const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayout; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_indirectCommandsLayout ) @@ -8831,7 +8981,7 @@ namespace VULKAN_HPP_NAMESPACE MicromapEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkMicromapEXT micromap, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_micromap( micromap ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8875,6 +9025,11 @@ namespace VULKAN_HPP_NAMESPACE return m_micromap; } + operator VULKAN_HPP_NAMESPACE::MicromapEXT() const VULKAN_HPP_NOEXCEPT + { + return m_micromap; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_micromap ) @@ -8945,7 +9100,7 @@ namespace VULKAN_HPP_NAMESPACE OpticalFlowSessionNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkOpticalFlowSessionNV session, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_session( session ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -8989,6 +9144,11 @@ namespace VULKAN_HPP_NAMESPACE return m_session; } + operator VULKAN_HPP_NAMESPACE::OpticalFlowSessionNV() const VULKAN_HPP_NOEXCEPT + { + return m_session; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_session ) @@ -9063,7 +9223,7 @@ namespace VULKAN_HPP_NAMESPACE # endif PerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPerformanceConfigurationINTEL configuration ) - : m_device( *device ), m_configuration( configuration ), m_dispatcher( device.getDispatcher() ) + : m_device( device ), m_configuration( configuration ), m_dispatcher( device.getDispatcher() ) { } @@ -9102,6 +9262,11 @@ namespace VULKAN_HPP_NAMESPACE return m_configuration; } + operator VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL() const VULKAN_HPP_NOEXCEPT + { + return m_configuration; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_configuration ) @@ -9168,7 +9333,7 @@ namespace VULKAN_HPP_NAMESPACE PipelineCache( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPipelineCache pipelineCache, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_pipelineCache( pipelineCache ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -9212,6 +9377,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipelineCache; } + operator VULKAN_HPP_NAMESPACE::PipelineCache() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipelineCache ) @@ -9334,7 +9504,7 @@ namespace VULKAN_HPP_NAMESPACE VkPipeline pipeline, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr, VULKAN_HPP_NAMESPACE::Result successCode = VULKAN_HPP_NAMESPACE::Result::eSuccess ) - : m_device( *device ) + : m_device( device ) , m_pipeline( pipeline ) , m_allocator( static_cast( allocator ) ) , m_constructorSuccessCode( successCode ) @@ -9381,6 +9551,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipeline; } + operator VULKAN_HPP_NAMESPACE::Pipeline() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipeline ) @@ -9572,7 +9747,7 @@ namespace VULKAN_HPP_NAMESPACE PipelineLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPipelineLayout pipelineLayout, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_pipelineLayout( pipelineLayout ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -9616,6 +9791,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipelineLayout; } + operator VULKAN_HPP_NAMESPACE::PipelineLayout() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipelineLayout ) @@ -9687,7 +9867,7 @@ namespace VULKAN_HPP_NAMESPACE PrivateDataSlot( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPrivateDataSlot privateDataSlot, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_privateDataSlot( privateDataSlot ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -9731,6 +9911,11 @@ namespace VULKAN_HPP_NAMESPACE return m_privateDataSlot; } + operator VULKAN_HPP_NAMESPACE::PrivateDataSlot() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlot; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_privateDataSlot ) @@ -9802,7 +9987,7 @@ namespace VULKAN_HPP_NAMESPACE QueryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkQueryPool queryPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_queryPool( queryPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -9846,6 +10031,11 @@ namespace VULKAN_HPP_NAMESPACE return m_queryPool; } + operator VULKAN_HPP_NAMESPACE::QueryPool() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_queryPool ) @@ -9986,6 +10176,11 @@ namespace VULKAN_HPP_NAMESPACE return m_queue; } + operator VULKAN_HPP_NAMESPACE::Queue() const VULKAN_HPP_NOEXCEPT + { + return m_queue; + } + void clear() VULKAN_HPP_NOEXCEPT { m_queue = nullptr; @@ -10093,7 +10288,7 @@ namespace VULKAN_HPP_NAMESPACE RenderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkRenderPass renderPass, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_renderPass( renderPass ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10137,6 +10332,11 @@ namespace VULKAN_HPP_NAMESPACE return m_renderPass; } + operator VULKAN_HPP_NAMESPACE::RenderPass() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_renderPass ) @@ -10215,7 +10415,7 @@ namespace VULKAN_HPP_NAMESPACE Sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSampler sampler, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_sampler( sampler ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10259,6 +10459,11 @@ namespace VULKAN_HPP_NAMESPACE return m_sampler; } + operator VULKAN_HPP_NAMESPACE::Sampler() const VULKAN_HPP_NOEXCEPT + { + return m_sampler; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_sampler ) @@ -10329,7 +10534,7 @@ namespace VULKAN_HPP_NAMESPACE SamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSamplerYcbcrConversion ycbcrConversion, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_ycbcrConversion( ycbcrConversion ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10373,6 +10578,11 @@ namespace VULKAN_HPP_NAMESPACE return m_ycbcrConversion; } + operator VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion() const VULKAN_HPP_NOEXCEPT + { + return m_ycbcrConversion; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_ycbcrConversion ) @@ -10444,7 +10654,7 @@ namespace VULKAN_HPP_NAMESPACE Semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSemaphore semaphore, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_semaphore( semaphore ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10488,6 +10698,11 @@ namespace VULKAN_HPP_NAMESPACE return m_semaphore; } + operator VULKAN_HPP_NAMESPACE::Semaphore() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_semaphore ) @@ -10566,7 +10781,7 @@ namespace VULKAN_HPP_NAMESPACE ShaderEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkShaderEXT shader, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_shader( shader ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10610,6 +10825,11 @@ namespace VULKAN_HPP_NAMESPACE return m_shader; } + operator VULKAN_HPP_NAMESPACE::ShaderEXT() const VULKAN_HPP_NOEXCEPT + { + return m_shader; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_shader ) @@ -10711,7 +10931,7 @@ namespace VULKAN_HPP_NAMESPACE ShaderModule( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkShaderModule shaderModule, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_shaderModule( shaderModule ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -10755,6 +10975,11 @@ namespace VULKAN_HPP_NAMESPACE return m_shaderModule; } + operator VULKAN_HPP_NAMESPACE::ShaderModule() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_shaderModule ) @@ -10981,7 +11206,7 @@ namespace VULKAN_HPP_NAMESPACE SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, VkSurfaceKHR surface, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_instance( *instance ) + : m_instance( instance ) , m_surface( surface ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( instance.getDispatcher() ) @@ -11025,6 +11250,11 @@ namespace VULKAN_HPP_NAMESPACE return m_surface; } + operator VULKAN_HPP_NAMESPACE::SurfaceKHR() const VULKAN_HPP_NOEXCEPT + { + return m_surface; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_surface ) @@ -11095,7 +11325,7 @@ namespace VULKAN_HPP_NAMESPACE SwapchainKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_swapchain( swapchain ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -11139,6 +11369,11 @@ namespace VULKAN_HPP_NAMESPACE return m_swapchain; } + operator VULKAN_HPP_NAMESPACE::SwapchainKHR() const VULKAN_HPP_NOEXCEPT + { + return m_swapchain; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_swapchain ) @@ -11285,7 +11520,7 @@ namespace VULKAN_HPP_NAMESPACE ValidationCacheEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkValidationCacheEXT validationCache, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_validationCache( validationCache ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -11329,6 +11564,11 @@ namespace VULKAN_HPP_NAMESPACE return m_validationCache; } + operator VULKAN_HPP_NAMESPACE::ValidationCacheEXT() const VULKAN_HPP_NOEXCEPT + { + return m_validationCache; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_validationCache ) @@ -11406,7 +11646,7 @@ namespace VULKAN_HPP_NAMESPACE VideoSessionKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkVideoSessionKHR videoSession, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_videoSession( videoSession ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -11450,6 +11690,11 @@ namespace VULKAN_HPP_NAMESPACE return m_videoSession; } + operator VULKAN_HPP_NAMESPACE::VideoSessionKHR() const VULKAN_HPP_NOEXCEPT + { + return m_videoSession; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_videoSession ) @@ -11527,7 +11772,7 @@ namespace VULKAN_HPP_NAMESPACE VideoSessionParametersKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkVideoSessionParametersKHR videoSessionParameters, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_videoSessionParameters( videoSessionParameters ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -11571,6 +11816,11 @@ namespace VULKAN_HPP_NAMESPACE return m_videoSessionParameters; } + operator VULKAN_HPP_NAMESPACE::VideoSessionParametersKHR() const VULKAN_HPP_NOEXCEPT + { + return m_videoSessionParameters; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_videoSessionParameters ) diff --git a/vulkan/vulkan_shared.hpp b/vulkan/vulkan_shared.hpp index 0843fe2..8b2697a 100644 --- a/vulkan/vulkan_shared.hpp +++ b/vulkan/vulkan_shared.hpp @@ -182,6 +182,13 @@ namespace VULKAN_HPP_NAMESPACE return bool( m_handle ); } +# if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator HandleType() const VULKAN_HPP_NOEXCEPT + { + return m_handle; + } +# endif + const HandleType * operator->() const VULKAN_HPP_NOEXCEPT { return &m_handle; diff --git a/vulkan/vulkansc.hpp b/vulkan/vulkansc.hpp index 619f761..85465aa 100644 --- a/vulkan/vulkansc.hpp +++ b/vulkan/vulkansc.hpp @@ -919,6 +919,13 @@ namespace VULKAN_HPP_NAMESPACE return m_value.operator bool(); } +# if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator Type() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } +# endif + Type const * operator->() const VULKAN_HPP_NOEXCEPT { return &m_value; diff --git a/vulkan/vulkansc_raii.hpp b/vulkan/vulkansc_raii.hpp index 7ea388a..61806f1 100644 --- a/vulkan/vulkansc_raii.hpp +++ b/vulkan/vulkansc_raii.hpp @@ -1256,6 +1256,11 @@ namespace VULKAN_HPP_NAMESPACE return m_instance; } + operator VULKAN_HPP_NAMESPACE::Instance() const VULKAN_HPP_NOEXCEPT + { + return m_instance; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_instance ) @@ -1386,6 +1391,11 @@ namespace VULKAN_HPP_NAMESPACE return m_physicalDevice; } + operator VULKAN_HPP_NAMESPACE::PhysicalDevice() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice; + } + void clear() VULKAN_HPP_NOEXCEPT { m_physicalDevice = nullptr; @@ -1676,6 +1686,11 @@ namespace VULKAN_HPP_NAMESPACE return m_device; } + operator VULKAN_HPP_NAMESPACE::Device() const VULKAN_HPP_NOEXCEPT + { + return m_device; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_device ) @@ -2091,7 +2106,7 @@ namespace VULKAN_HPP_NAMESPACE Buffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkBuffer buffer, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_buffer( buffer ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -2135,6 +2150,11 @@ namespace VULKAN_HPP_NAMESPACE return m_buffer; } + operator VULKAN_HPP_NAMESPACE::Buffer() const VULKAN_HPP_NOEXCEPT + { + return m_buffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_buffer ) @@ -2211,7 +2231,7 @@ namespace VULKAN_HPP_NAMESPACE BufferView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkBufferView bufferView, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_bufferView( bufferView ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -2255,6 +2275,11 @@ namespace VULKAN_HPP_NAMESPACE return m_bufferView; } + operator VULKAN_HPP_NAMESPACE::BufferView() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_bufferView ) @@ -2325,7 +2350,7 @@ namespace VULKAN_HPP_NAMESPACE CommandPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCommandPool commandPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_commandPool( commandPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -2369,6 +2394,11 @@ namespace VULKAN_HPP_NAMESPACE return m_commandPool; } + operator VULKAN_HPP_NAMESPACE::CommandPool() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_commandPool ) @@ -2437,7 +2467,7 @@ namespace VULKAN_HPP_NAMESPACE public: CommandBuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkCommandBuffer commandBuffer, VkCommandPool commandPool ) - : m_device( *device ), m_commandPool( commandPool ), m_commandBuffer( commandBuffer ), m_dispatcher( device.getDispatcher() ) + : m_device( device ), m_commandPool( commandPool ), m_commandBuffer( commandBuffer ), m_dispatcher( device.getDispatcher() ) { } @@ -2478,6 +2508,11 @@ namespace VULKAN_HPP_NAMESPACE return m_commandBuffer; } + operator VULKAN_HPP_NAMESPACE::CommandBuffer() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_commandBuffer ) @@ -2976,7 +3011,7 @@ namespace VULKAN_HPP_NAMESPACE DebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, VkDebugUtilsMessengerEXT messenger, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_instance( *instance ) + : m_instance( instance ) , m_messenger( messenger ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( instance.getDispatcher() ) @@ -3020,6 +3055,11 @@ namespace VULKAN_HPP_NAMESPACE return m_messenger; } + operator VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT() const VULKAN_HPP_NOEXCEPT + { + return m_messenger; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_messenger ) @@ -3091,7 +3131,7 @@ namespace VULKAN_HPP_NAMESPACE DescriptorPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_descriptorPool( descriptorPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -3135,6 +3175,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorPool; } + operator VULKAN_HPP_NAMESPACE::DescriptorPool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorPool ) @@ -3199,7 +3244,7 @@ namespace VULKAN_HPP_NAMESPACE public: DescriptorSet( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorSet descriptorSet, VkDescriptorPool descriptorPool ) - : m_device( *device ), m_descriptorPool( descriptorPool ), m_descriptorSet( descriptorSet ), m_dispatcher( device.getDispatcher() ) + : m_device( device ), m_descriptorPool( descriptorPool ), m_descriptorSet( descriptorSet ), m_dispatcher( device.getDispatcher() ) { } @@ -3240,6 +3285,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorSet; } + operator VULKAN_HPP_NAMESPACE::DescriptorSet() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorSet ) @@ -3338,7 +3388,7 @@ namespace VULKAN_HPP_NAMESPACE DescriptorSetLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDescriptorSetLayout descriptorSetLayout, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_descriptorSetLayout( descriptorSetLayout ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -3382,6 +3432,11 @@ namespace VULKAN_HPP_NAMESPACE return m_descriptorSetLayout; } + operator VULKAN_HPP_NAMESPACE::DescriptorSetLayout() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_descriptorSetLayout ) @@ -3453,7 +3508,7 @@ namespace VULKAN_HPP_NAMESPACE DeviceMemory( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkDeviceMemory memory, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_memory( memory ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -3497,6 +3552,11 @@ namespace VULKAN_HPP_NAMESPACE return m_memory; } + operator VULKAN_HPP_NAMESPACE::DeviceMemory() const VULKAN_HPP_NOEXCEPT + { + return m_memory; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_memory ) @@ -3566,7 +3626,7 @@ namespace VULKAN_HPP_NAMESPACE public: DisplayKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::PhysicalDevice const & physicalDevice, VkDisplayKHR display ) - : m_physicalDevice( *physicalDevice ), m_display( display ), m_dispatcher( physicalDevice.getDispatcher() ) + : m_physicalDevice( physicalDevice ), m_display( display ), m_dispatcher( physicalDevice.getDispatcher() ) { } @@ -3605,6 +3665,11 @@ namespace VULKAN_HPP_NAMESPACE return m_display; } + operator VULKAN_HPP_NAMESPACE::DisplayKHR() const VULKAN_HPP_NOEXCEPT + { + return m_display; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_display ) @@ -3751,6 +3816,11 @@ namespace VULKAN_HPP_NAMESPACE return m_displayModeKHR; } + operator VULKAN_HPP_NAMESPACE::DisplayModeKHR() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR; + } + void clear() VULKAN_HPP_NOEXCEPT { m_physicalDevice = nullptr; @@ -3811,7 +3881,7 @@ namespace VULKAN_HPP_NAMESPACE Event( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkEvent event, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_event( event ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -3855,6 +3925,11 @@ namespace VULKAN_HPP_NAMESPACE return m_event; } + operator VULKAN_HPP_NAMESPACE::Event() const VULKAN_HPP_NOEXCEPT + { + return m_event; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_event ) @@ -3952,7 +4027,7 @@ namespace VULKAN_HPP_NAMESPACE Fence( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkFence fence, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_fence( fence ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -3996,6 +4071,11 @@ namespace VULKAN_HPP_NAMESPACE return m_fence; } + operator VULKAN_HPP_NAMESPACE::Fence() const VULKAN_HPP_NOEXCEPT + { + return m_fence; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_fence ) @@ -4070,7 +4150,7 @@ namespace VULKAN_HPP_NAMESPACE Framebuffer( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkFramebuffer framebuffer, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_framebuffer( framebuffer ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4114,6 +4194,11 @@ namespace VULKAN_HPP_NAMESPACE return m_framebuffer; } + operator VULKAN_HPP_NAMESPACE::Framebuffer() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_framebuffer ) @@ -4184,7 +4269,7 @@ namespace VULKAN_HPP_NAMESPACE Image( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkImage image, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_image( image ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4228,6 +4313,11 @@ namespace VULKAN_HPP_NAMESPACE return m_image; } + operator VULKAN_HPP_NAMESPACE::Image() const VULKAN_HPP_NOEXCEPT + { + return m_image; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_image ) @@ -4311,7 +4401,7 @@ namespace VULKAN_HPP_NAMESPACE ImageView( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkImageView imageView, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_imageView( imageView ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4355,6 +4445,11 @@ namespace VULKAN_HPP_NAMESPACE return m_imageView; } + operator VULKAN_HPP_NAMESPACE::ImageView() const VULKAN_HPP_NOEXCEPT + { + return m_imageView; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_imageView ) @@ -4425,7 +4520,7 @@ namespace VULKAN_HPP_NAMESPACE PipelineCache( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPipelineCache pipelineCache, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_pipelineCache( pipelineCache ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4469,6 +4564,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipelineCache; } + operator VULKAN_HPP_NAMESPACE::PipelineCache() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipelineCache ) @@ -4574,7 +4674,7 @@ namespace VULKAN_HPP_NAMESPACE VkPipeline pipeline, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr, VULKAN_HPP_NAMESPACE::Result successCode = VULKAN_HPP_NAMESPACE::Result::eSuccess ) - : m_device( *device ) + : m_device( device ) , m_pipeline( pipeline ) , m_allocator( static_cast( allocator ) ) , m_constructorSuccessCode( successCode ) @@ -4621,6 +4721,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipeline; } + operator VULKAN_HPP_NAMESPACE::Pipeline() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipeline ) @@ -4738,7 +4843,7 @@ namespace VULKAN_HPP_NAMESPACE PipelineLayout( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPipelineLayout pipelineLayout, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_pipelineLayout( pipelineLayout ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4782,6 +4887,11 @@ namespace VULKAN_HPP_NAMESPACE return m_pipelineLayout; } + operator VULKAN_HPP_NAMESPACE::PipelineLayout() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_pipelineLayout ) @@ -4853,7 +4963,7 @@ namespace VULKAN_HPP_NAMESPACE PrivateDataSlot( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkPrivateDataSlot privateDataSlot, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_privateDataSlot( privateDataSlot ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -4897,6 +5007,11 @@ namespace VULKAN_HPP_NAMESPACE return m_privateDataSlot; } + operator VULKAN_HPP_NAMESPACE::PrivateDataSlot() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlot; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_privateDataSlot ) @@ -4968,7 +5083,7 @@ namespace VULKAN_HPP_NAMESPACE QueryPool( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkQueryPool queryPool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_queryPool( queryPool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5012,6 +5127,11 @@ namespace VULKAN_HPP_NAMESPACE return m_queryPool; } + operator VULKAN_HPP_NAMESPACE::QueryPool() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_queryPool ) @@ -5148,6 +5268,11 @@ namespace VULKAN_HPP_NAMESPACE return m_queue; } + operator VULKAN_HPP_NAMESPACE::Queue() const VULKAN_HPP_NOEXCEPT + { + return m_queue; + } + void clear() VULKAN_HPP_NOEXCEPT { m_queue = nullptr; @@ -5240,7 +5365,7 @@ namespace VULKAN_HPP_NAMESPACE RenderPass( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkRenderPass renderPass, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_renderPass( renderPass ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5284,6 +5409,11 @@ namespace VULKAN_HPP_NAMESPACE return m_renderPass; } + operator VULKAN_HPP_NAMESPACE::RenderPass() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_renderPass ) @@ -5358,7 +5488,7 @@ namespace VULKAN_HPP_NAMESPACE Sampler( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSampler sampler, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_sampler( sampler ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5402,6 +5532,11 @@ namespace VULKAN_HPP_NAMESPACE return m_sampler; } + operator VULKAN_HPP_NAMESPACE::Sampler() const VULKAN_HPP_NOEXCEPT + { + return m_sampler; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_sampler ) @@ -5472,7 +5607,7 @@ namespace VULKAN_HPP_NAMESPACE SamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSamplerYcbcrConversion ycbcrConversion, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_ycbcrConversion( ycbcrConversion ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5516,6 +5651,11 @@ namespace VULKAN_HPP_NAMESPACE return m_ycbcrConversion; } + operator VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion() const VULKAN_HPP_NOEXCEPT + { + return m_ycbcrConversion; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_ycbcrConversion ) @@ -5587,7 +5727,7 @@ namespace VULKAN_HPP_NAMESPACE Semaphore( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSemaphore semaphore, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_semaphore( semaphore ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5631,6 +5771,11 @@ namespace VULKAN_HPP_NAMESPACE return m_semaphore; } + operator VULKAN_HPP_NAMESPACE::Semaphore() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_semaphore ) @@ -5706,7 +5851,7 @@ namespace VULKAN_HPP_NAMESPACE SemaphoreSciSyncPoolNV( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSemaphoreSciSyncPoolNV semaphorePool, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_semaphorePool( semaphorePool ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5750,6 +5895,11 @@ namespace VULKAN_HPP_NAMESPACE return m_semaphorePool; } + operator VULKAN_HPP_NAMESPACE::SemaphoreSciSyncPoolNV() const VULKAN_HPP_NOEXCEPT + { + return m_semaphorePool; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_semaphorePool ) @@ -5822,7 +5972,7 @@ namespace VULKAN_HPP_NAMESPACE ShaderModule( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkShaderModule shaderModule, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_shaderModule( shaderModule ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -5866,6 +6016,11 @@ namespace VULKAN_HPP_NAMESPACE return m_shaderModule; } + operator VULKAN_HPP_NAMESPACE::ShaderModule() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_shaderModule ) @@ -5945,7 +6100,7 @@ namespace VULKAN_HPP_NAMESPACE SurfaceKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Instance const & instance, VkSurfaceKHR surface, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_instance( *instance ) + : m_instance( instance ) , m_surface( surface ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( instance.getDispatcher() ) @@ -5989,6 +6144,11 @@ namespace VULKAN_HPP_NAMESPACE return m_surface; } + operator VULKAN_HPP_NAMESPACE::SurfaceKHR() const VULKAN_HPP_NOEXCEPT + { + return m_surface; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_surface ) @@ -6059,7 +6219,7 @@ namespace VULKAN_HPP_NAMESPACE SwapchainKHR( VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::Device const & device, VkSwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::Optional allocator = nullptr ) - : m_device( *device ) + : m_device( device ) , m_swapchain( swapchain ) , m_allocator( static_cast( allocator ) ) , m_dispatcher( device.getDispatcher() ) @@ -6103,6 +6263,11 @@ namespace VULKAN_HPP_NAMESPACE return m_swapchain; } + operator VULKAN_HPP_NAMESPACE::SwapchainKHR() const VULKAN_HPP_NOEXCEPT + { + return m_swapchain; + } + void clear() VULKAN_HPP_NOEXCEPT { if ( m_swapchain ) diff --git a/vulkan/vulkansc_shared.hpp b/vulkan/vulkansc_shared.hpp index fd740a9..7a8bc56 100644 --- a/vulkan/vulkansc_shared.hpp +++ b/vulkan/vulkansc_shared.hpp @@ -182,6 +182,13 @@ namespace VULKAN_HPP_NAMESPACE return bool( m_handle ); } +# if defined( VULKAN_HPP_SMART_HANDLE_IMPLICIT_CAST ) + operator HandleType() const VULKAN_HPP_NOEXCEPT + { + return m_handle; + } +# endif + const HandleType * operator->() const VULKAN_HPP_NOEXCEPT { return &m_handle;