From a4590dc8fbaf99ee901d2f7026337aa9632016ac Mon Sep 17 00:00:00 2001 From: ink-soul Date: Fri, 26 May 2023 14:12:02 +0800 Subject: [PATCH] Revert "commit for revert" This reverts commit 3c7ad199a01534bedf96a1e230733849f1d6914c. --- CMakeLists.txt | 2 +- {littleRender => homework}/CMakeLists.txt | 22 +- homework/homework0/homework0.cpp | 1260 +++++++++++++++++ .../homework1/homework1.cpp | 496 ++++++- .../homework1/homework1.h | 205 ++- littleRender/glTFModel.cpp | 490 ------- littleRender/render.h | 202 --- 7 files changed, 1960 insertions(+), 717 deletions(-) rename {littleRender => homework}/CMakeLists.txt (83%) create mode 100644 homework/homework0/homework0.cpp rename littleRender/render.cpp => homework/homework1/homework1.cpp (81%) rename littleRender/glTFModel.h => homework/homework1/homework1.h (54%) delete mode 100644 littleRender/glTFModel.cpp delete mode 100644 littleRender/render.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 17a597f..751a347 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,6 +148,6 @@ ENDIF(WIN32) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin/") add_subdirectory(base) -add_subdirectory(littleRender) +add_subdirectory(homework) # add_subdirectory(examples) diff --git a/littleRender/CMakeLists.txt b/homework/CMakeLists.txt similarity index 83% rename from littleRender/CMakeLists.txt rename to homework/CMakeLists.txt index 9d15d02..8103e34 100644 --- a/littleRender/CMakeLists.txt +++ b/homework/CMakeLists.txt @@ -1,37 +1,36 @@ # Function for building single homework -# Function for building single homework function(buildHomework HOMEWORK_NAME) - SET(HOMEWORK_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}) + SET(HOMEWORK_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}/${HOMEWORK_NAME}) message(STATUS "Generating project file for homework in ${HOMEWORK_FOLDER}") # Main file(GLOB SOURCE *.cpp ${BASE_HEADERS} ${HOMEWORK_FOLDER}/*.cpp) - SET(MAIN_CPP ${HOMEWORK_FOLDER}/${HOMEWORK_NAME}.cpp) + SET(MAIN_CPP ${HOMEWORK_FOLDER}/${HOMEWORK_NAME}.cpp "homework1/homework1.h") if(EXISTS ${HOMEWORK_FOLDER}/main.cpp) - SET(MAIN_CPP ${HOMEWORK_FOLDER}/main.cpp) + SET(MAIN_CPP ${HOMEWORK_FOLDER}/main.cpp "homework1/homework1.h") ENDIF() if(EXISTS ${HOMEWORK_FOLDER}/${HOMEWORK_NAME}.h) - SET(MAIN_HEADER ${HOMEWORK_FOLDER}/${HOMEWORK_NAME}.h) + SET(MAIN_HEADER ${HOMEWORK_FOLDER}/${HOMEWORK_NAME}.h "homework1/homework1.h") ENDIF() # imgui homework requires additional source files IF(${HOMEWORK_NAME} STREQUAL "imgui") file(GLOB ADD_SOURCE "../external/imgui/*.cpp") - SET(SOURCE ${SOURCE} ${ADD_SOURCE}) + SET(SOURCE ${SOURCE} ${ADD_SOURCE} "homework1/homework1.h") ENDIF() # wayland requires additional source files IF(USE_WAYLAND_WSI) - SET(SOURCE ${SOURCE} ${CMAKE_BINARY_DIR}/xdg-shell-client-protocol.h ${CMAKE_BINARY_DIR}/xdg-shell-protocol.c) + SET(SOURCE ${SOURCE} ${CMAKE_BINARY_DIR}/xdg-shell-client-protocol.h ${CMAKE_BINARY_DIR}/xdg-shell-protocol.c "homework1/homework1.h") ENDIF() # Add shaders - set(SHADER_DIR_GLSL "../data/buster_drone/shaders/glsl") + set(SHADER_DIR_GLSL "../data/homework/shaders/glsl/${HOMEWORK_NAME}") file(GLOB SHADERS_GLSL "${SHADER_DIR_GLSL}/*.vert" "${SHADER_DIR_GLSL}/*.frag" "${SHADER_DIR_GLSL}/*.comp" "${SHADER_DIR_GLSL}/*.geom" "${SHADER_DIR_GLSL}/*.tesc" "${SHADER_DIR_GLSL}/*.tese" "${SHADER_DIR_GLSL}/*.mesh" "${SHADER_DIR_GLSL}/*.task" "${SHADER_DIR_GLSL}/*.rgen" "${SHADER_DIR_GLSL}/*.rchit" "${SHADER_DIR_GLSL}/*.rmiss" "${SHADER_DIR_GLSL}/*.rcall") - set(SHADER_DIR_HLSL "../data/buster_drone/shaders/glsl") + set(SHADER_DIR_HLSL "../data/homework/shaders/hlsl/${HOMEWORK_NAME}") file(GLOB SHADERS_HLSL "${SHADER_DIR_HLSL}/*.vert" "${SHADER_DIR_HLSL}/*.frag" "${SHADER_DIR_HLSL}/*.comp" "${SHADER_DIR_HLSL}/*.geom" "${SHADER_DIR_HLSL}/*.tesc" "${SHADER_DIR_HLSL}/*.tese" "${SHADER_DIR_HLSL}/*.mesh" "${SHADER_DIR_HLSL}/*.task" "${SHADER_DIR_HLSL}/*.rgen" "${SHADER_DIR_HLSL}/*.rchit" "${SHADER_DIR_HLSL}/*.rmiss" "${SHADER_DIR_HLSL}/*.rcall") source_group("Shaders\\GLSL" FILES ${SHADERS_GLSL}) source_group("Shaders\\HLSL" FILES ${SHADERS_HLSL}) # Add optional readme / tutorial file(GLOB README_FILES "${HOMEWORK_FOLDER}/*.md") if(WIN32) - add_executable(${HOMEWORK_NAME} WIN32 ${MAIN_CPP} ${SOURCE} ${MAIN_HEADER} ${SHADERS_GLSL} ${SHADERS_HLSL} ${README_FILES}) + add_executable(${HOMEWORK_NAME} WIN32 ${MAIN_CPP} ${SOURCE} ${MAIN_HEADER} ${SHADERS_GLSL} ${SHADERS_HLSL} ${README_FILES} "homework1/homework1.h") target_link_libraries(${HOMEWORK_NAME} base ${Vulkan_LIBRARY} ${WINLIBS}) else(WIN32) add_executable(${HOMEWORK_NAME} ${MAIN_CPP} ${SOURCE} ${MAIN_HEADER} ${SHADERS_GLSL} ${SHADERS_HLSL} ${README_FILES}) @@ -79,7 +78,8 @@ function(buildHomeworks) endfunction(buildHomeworks) set(HOMEWORKS - render + homework0 + homework1 ) buildHomeworks() diff --git a/homework/homework0/homework0.cpp b/homework/homework0/homework0.cpp new file mode 100644 index 0000000..bfaa972 --- /dev/null +++ b/homework/homework0/homework0.cpp @@ -0,0 +1,1260 @@ +/* +* Vulkan Example - Basic indexed triangle rendering +* +* Note: +* This is a "pedal to the metal" example to show off how to get Vulkan up and displaying something +* Contrary to the other examples, this one won't make use of helper functions or initializers +* Except in a few cases (swap chain setup e.g.) +* +* Copyright (C) 2016-2017 by Sascha Willems - www.saschawillems.de +* +* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT) +*/ + +#include +#include +#include +#include +#include +#include +#include + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#include +#include + +#include +#include "vulkanexamplebase.h" + +// Set to "true" to enable Vulkan's validation layers (see vulkandebug.cpp for details) +#define ENABLE_VALIDATION false +// Set to "true" to use staging buffers for uploading vertex and index data to device local memory +// See "prepareVertices" for details on what's staging and on why to use it +#define USE_STAGING true + +class VulkanExample : public VulkanExampleBase +{ +public: + // Vertex layout used in this example + struct Vertex { + float position[3]; + float color[3]; + }; + + // Vertex buffer and attributes + struct { + VkDeviceMemory memory; // Handle to the device memory for this buffer + VkBuffer buffer; // Handle to the Vulkan buffer object that the memory is bound to + } vertices; + + // Index buffer + struct { + VkDeviceMemory memory; + VkBuffer buffer; + uint32_t count; + } indices; + + // Uniform buffer block object + struct { + VkDeviceMemory memory; + VkBuffer buffer; + VkDescriptorBufferInfo descriptor; + } uniformBufferVS; + + // For simplicity we use the same uniform block layout as in the shader: + // + // layout(set = 0, binding = 0) uniform UBO + // { + // mat4 projectionMatrix; + // mat4 modelMatrix; + // mat4 viewMatrix; + // } ubo; + // + // This way we can just memcopy the ubo data to the ubo + // Note: You should use data types that align with the GPU in order to avoid manual padding (vec4, mat4) + struct { + glm::mat4 projectionMatrix; + glm::mat4 modelMatrix; + glm::mat4 viewMatrix; + } uboVS; + + // The pipeline layout is used by a pipeline to access the descriptor sets + // It defines interface (without binding any actual data) between the shader stages used by the pipeline and the shader resources + // A pipeline layout can be shared among multiple pipelines as long as their interfaces match + VkPipelineLayout pipelineLayout; + + // Pipelines (often called "pipeline state objects") are used to bake all states that affect a pipeline + // While in OpenGL every state can be changed at (almost) any time, Vulkan requires to layout the graphics (and compute) pipeline states upfront + // So for each combination of non-dynamic pipeline states you need a new pipeline (there are a few exceptions to this not discussed here) + // Even though this adds a new dimension of planning ahead, it's a great opportunity for performance optimizations by the driver + VkPipeline pipeline; + + // The descriptor set layout describes the shader binding layout (without actually referencing descriptor) + // Like the pipeline layout it's pretty much a blueprint and can be used with different descriptor sets as long as their layout matches + VkDescriptorSetLayout descriptorSetLayout; + + // The descriptor set stores the resources bound to the binding points in a shader + // It connects the binding points of the different shaders with the buffers and images used for those bindings + VkDescriptorSet descriptorSet; + + + // Synchronization primitives + // Synchronization is an important concept of Vulkan that OpenGL mostly hid away. Getting this right is crucial to using Vulkan. + + // Semaphores + // Used to coordinate operations within the graphics queue and ensure correct command ordering + VkSemaphore presentCompleteSemaphore; + VkSemaphore renderCompleteSemaphore; + + // Fences + // Used to check the completion of queue operations (e.g. command buffer execution) + std::vector queueCompleteFences; + + VulkanExample() : VulkanExampleBase(ENABLE_VALIDATION) + { + title = "games 106 - homework0"; + // To keep things simple, we don't use the UI overlay + settings.overlay = false; + // Setup a default look-at camera + camera.type = Camera::CameraType::lookat; + camera.setPosition(glm::vec3(0.0f, 0.0f, -2.5f)); + camera.setRotation(glm::vec3(0.0f)); + camera.setPerspective(60.0f, (float)width / (float)height, 1.0f, 256.0f); + // Values not set here are initialized in the base class constructor + } + + ~VulkanExample() + { + // Clean up used Vulkan resources + // Note: Inherited destructor cleans up resources stored in base class + vkDestroyPipeline(device, pipeline, nullptr); + + vkDestroyPipelineLayout(device, pipelineLayout, nullptr); + vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr); + + vkDestroyBuffer(device, vertices.buffer, nullptr); + vkFreeMemory(device, vertices.memory, nullptr); + + vkDestroyBuffer(device, indices.buffer, nullptr); + vkFreeMemory(device, indices.memory, nullptr); + + vkDestroyBuffer(device, uniformBufferVS.buffer, nullptr); + vkFreeMemory(device, uniformBufferVS.memory, nullptr); + + vkDestroySemaphore(device, presentCompleteSemaphore, nullptr); + vkDestroySemaphore(device, renderCompleteSemaphore, nullptr); + + for (auto& fence : queueCompleteFences) + { + vkDestroyFence(device, fence, nullptr); + } + } + + // This function is used to request a device memory type that supports all the property flags we request (e.g. device local, host visible) + // Upon success it will return the index of the memory type that fits our requested memory properties + // This is necessary as implementations can offer an arbitrary number of memory types with different + // memory properties. + // You can check http://vulkan.gpuinfo.org/ for details on different memory configurations + uint32_t getMemoryTypeIndex(uint32_t typeBits, VkMemoryPropertyFlags properties) + { + // Iterate over all memory types available for the device used in this example + for (uint32_t i = 0; i < deviceMemoryProperties.memoryTypeCount; i++) + { + if ((typeBits & 1) == 1) + { + if ((deviceMemoryProperties.memoryTypes[i].propertyFlags & properties) == properties) + { + return i; + } + } + typeBits >>= 1; + } + + throw "Could not find a suitable memory type!"; + } + + // Create the Vulkan synchronization primitives used in this example + void prepareSynchronizationPrimitives() + { + // Semaphores (Used for correct command ordering) + VkSemaphoreCreateInfo semaphoreCreateInfo = {}; + semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; + semaphoreCreateInfo.pNext = nullptr; + + // Semaphore used to ensure that image presentation is complete before starting to submit again + VK_CHECK_RESULT(vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &presentCompleteSemaphore)); + + // Semaphore used to ensure that all commands submitted have been finished before submitting the image to the queue + VK_CHECK_RESULT(vkCreateSemaphore(device, &semaphoreCreateInfo, nullptr, &renderCompleteSemaphore)); + + // Fences (Used to check draw command buffer completion) + VkFenceCreateInfo fenceCreateInfo = {}; + fenceCreateInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + // Create in signaled state so we don't wait on first render of each command buffer + fenceCreateInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; + queueCompleteFences.resize(drawCmdBuffers.size()); + for (auto& fence : queueCompleteFences) + { + VK_CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &fence)); + } + } + + // Get a new command buffer from the command pool + // If begin is true, the command buffer is also started so we can start adding commands + VkCommandBuffer getCommandBuffer(bool begin) + { + VkCommandBuffer cmdBuffer; + + VkCommandBufferAllocateInfo cmdBufAllocateInfo = {}; + cmdBufAllocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + cmdBufAllocateInfo.commandPool = cmdPool; + cmdBufAllocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + cmdBufAllocateInfo.commandBufferCount = 1; + + VK_CHECK_RESULT(vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &cmdBuffer)); + + // If requested, also start the new command buffer + if (begin) + { + VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo(); + VK_CHECK_RESULT(vkBeginCommandBuffer(cmdBuffer, &cmdBufInfo)); + } + + return cmdBuffer; + } + + // End the command buffer and submit it to the queue + // Uses a fence to ensure command buffer has finished executing before deleting it + void flushCommandBuffer(VkCommandBuffer commandBuffer) + { + assert(commandBuffer != VK_NULL_HANDLE); + + VK_CHECK_RESULT(vkEndCommandBuffer(commandBuffer)); + + VkSubmitInfo submitInfo = {}; + submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &commandBuffer; + + // Create fence to ensure that the command buffer has finished executing + VkFenceCreateInfo fenceCreateInfo = {}; + fenceCreateInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + fenceCreateInfo.flags = 0; + VkFence fence; + VK_CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &fence)); + + // Submit to the queue + VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, fence)); + // Wait for the fence to signal that command buffer has finished executing + VK_CHECK_RESULT(vkWaitForFences(device, 1, &fence, VK_TRUE, DEFAULT_FENCE_TIMEOUT)); + + vkDestroyFence(device, fence, nullptr); + vkFreeCommandBuffers(device, cmdPool, 1, &commandBuffer); + } + + // Build separate command buffers for every framebuffer image + // Unlike in OpenGL all rendering commands are recorded once into command buffers that are then resubmitted to the queue + // This allows to generate work upfront and from multiple threads, one of the biggest advantages of Vulkan + void buildCommandBuffers() + { + VkCommandBufferBeginInfo cmdBufInfo = {}; + cmdBufInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + cmdBufInfo.pNext = nullptr; + + // Set clear values for all framebuffer attachments with loadOp set to clear + // We use two attachments (color and depth) that are cleared at the start of the subpass and as such we need to set clear values for both + VkClearValue clearValues[2]; + clearValues[0].color = { { 0.0f, 0.0f, 0.2f, 1.0f } }; + clearValues[1].depthStencil = { 1.0f, 0 }; + + VkRenderPassBeginInfo renderPassBeginInfo = {}; + renderPassBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + renderPassBeginInfo.pNext = nullptr; + renderPassBeginInfo.renderPass = renderPass; + renderPassBeginInfo.renderArea.offset.x = 0; + renderPassBeginInfo.renderArea.offset.y = 0; + renderPassBeginInfo.renderArea.extent.width = width; + renderPassBeginInfo.renderArea.extent.height = height; + renderPassBeginInfo.clearValueCount = 2; + renderPassBeginInfo.pClearValues = clearValues; + + for (int32_t i = 0; i < drawCmdBuffers.size(); ++i) + { + // Set target frame buffer + renderPassBeginInfo.framebuffer = frameBuffers[i]; + + VK_CHECK_RESULT(vkBeginCommandBuffer(drawCmdBuffers[i], &cmdBufInfo)); + + // Start the first sub pass specified in our default render pass setup by the base class + // This will clear the color and depth attachment + vkCmdBeginRenderPass(drawCmdBuffers[i], &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); + + // Update dynamic viewport state + VkViewport viewport = {}; + viewport.height = (float)height; + viewport.width = (float)width; + viewport.minDepth = (float) 0.0f; + viewport.maxDepth = (float) 1.0f; + vkCmdSetViewport(drawCmdBuffers[i], 0, 1, &viewport); + + // Update dynamic scissor state + VkRect2D scissor = {}; + scissor.extent.width = width; + scissor.extent.height = height; + scissor.offset.x = 0; + scissor.offset.y = 0; + vkCmdSetScissor(drawCmdBuffers[i], 0, 1, &scissor); + + // Bind descriptor sets describing shader binding points + vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &descriptorSet, 0, nullptr); + + // Bind the rendering pipeline + // The pipeline (state object) contains all states of the rendering pipeline, binding it will set all the states specified at pipeline creation time + vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); + + // Bind triangle vertex buffer (contains position and colors) + VkDeviceSize offsets[1] = { 0 }; + vkCmdBindVertexBuffers(drawCmdBuffers[i], 0, 1, &vertices.buffer, offsets); + + // Bind triangle index buffer + vkCmdBindIndexBuffer(drawCmdBuffers[i], indices.buffer, 0, VK_INDEX_TYPE_UINT32); + + // Draw indexed triangle + vkCmdDrawIndexed(drawCmdBuffers[i], indices.count, 1, 0, 0, 1); + + vkCmdEndRenderPass(drawCmdBuffers[i]); + + // Ending the render pass will add an implicit barrier transitioning the frame buffer color attachment to + // VK_IMAGE_LAYOUT_PRESENT_SRC_KHR for presenting it to the windowing system + + VK_CHECK_RESULT(vkEndCommandBuffer(drawCmdBuffers[i])); + } + } + + void draw() + { +#if defined(VK_USE_PLATFORM_MACOS_MVK) + // SRS - on macOS use swapchain helper function with common semaphores/fences for proper resize handling + // Get next image in the swap chain (back/front buffer) + prepareFrame(); + + // Use a fence to wait until the command buffer has finished execution before using it again + VK_CHECK_RESULT(vkWaitForFences(device, 1, &waitFences[currentBuffer], VK_TRUE, UINT64_MAX)); + VK_CHECK_RESULT(vkResetFences(device, 1, &waitFences[currentBuffer])); +#else + // SRS - on other platforms use original bare code with local semaphores/fences for illustrative purposes + // Get next image in the swap chain (back/front buffer) + VkResult acquire = swapChain.acquireNextImage(presentCompleteSemaphore, ¤tBuffer); + if (!((acquire == VK_SUCCESS) || (acquire == VK_SUBOPTIMAL_KHR))) { + VK_CHECK_RESULT(acquire); + } + + // Use a fence to wait until the command buffer has finished execution before using it again + VK_CHECK_RESULT(vkWaitForFences(device, 1, &queueCompleteFences[currentBuffer], VK_TRUE, UINT64_MAX)); + VK_CHECK_RESULT(vkResetFences(device, 1, &queueCompleteFences[currentBuffer])); +#endif + + // Pipeline stage at which the queue submission will wait (via pWaitSemaphores) + VkPipelineStageFlags waitStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + // The submit info structure specifies a command buffer queue submission batch + VkSubmitInfo submitInfo = {}; + submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submitInfo.pWaitDstStageMask = &waitStageMask; // Pointer to the list of pipeline stages that the semaphore waits will occur at + submitInfo.waitSemaphoreCount = 1; // One wait semaphore + submitInfo.signalSemaphoreCount = 1; // One signal semaphore + submitInfo.pCommandBuffers = &drawCmdBuffers[currentBuffer]; // Command buffers(s) to execute in this batch (submission) + submitInfo.commandBufferCount = 1; // One command buffer + +#if defined(VK_USE_PLATFORM_MACOS_MVK) + // SRS - on macOS use swapchain helper function with common semaphores/fences for proper resize handling + submitInfo.pWaitSemaphores = &semaphores.presentComplete; // Semaphore(s) to wait upon before the submitted command buffer starts executing + submitInfo.pSignalSemaphores = &semaphores.renderComplete; // Semaphore(s) to be signaled when command buffers have completed + + // Submit to the graphics queue passing a wait fence + VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, waitFences[currentBuffer])); + + // Present the current buffer to the swap chain + submitFrame(); +#else + // SRS - on other platforms use original bare code with local semaphores/fences for illustrative purposes + submitInfo.pWaitSemaphores = &presentCompleteSemaphore; // Semaphore(s) to wait upon before the submitted command buffer starts executing + submitInfo.pSignalSemaphores = &renderCompleteSemaphore; // Semaphore(s) to be signaled when command buffers have completed + + // Submit to the graphics queue passing a wait fence + VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, queueCompleteFences[currentBuffer])); + + // Present the current buffer to the swap chain + // Pass the semaphore signaled by the command buffer submission from the submit info as the wait semaphore for swap chain presentation + // This ensures that the image is not presented to the windowing system until all commands have been submitted + VkResult present = swapChain.queuePresent(queue, currentBuffer, renderCompleteSemaphore); + if (!((present == VK_SUCCESS) || (present == VK_SUBOPTIMAL_KHR))) { + VK_CHECK_RESULT(present); + } +#endif + } + + // Prepare vertex and index buffers for an indexed triangle + // Also uploads them to device local memory using staging and initializes vertex input and attribute binding to match the vertex shader + void prepareVertices(bool useStagingBuffers) + { + // A note on memory management in Vulkan in general: + // This is a very complex topic and while it's fine for an example application to small individual memory allocations that is not + // what should be done a real-world application, where you should allocate large chunks of memory at once instead. + + // Setup vertices + std::vector vertexBuffer = + { + { { 1.0f, 1.0f, 0.0f }, { 1.0f, 0.0f, 0.0f } }, + { { -1.0f, 1.0f, 0.0f }, { 0.0f, 1.0f, 0.0f } }, + { { 0.0f, -1.0f, 0.0f }, { 0.0f, 0.0f, 1.0f } } + }; + uint32_t vertexBufferSize = static_cast(vertexBuffer.size()) * sizeof(Vertex); + + // Setup indices + std::vector indexBuffer = { 0, 1, 2 }; + indices.count = static_cast(indexBuffer.size()); + uint32_t indexBufferSize = indices.count * sizeof(uint32_t); + + VkMemoryAllocateInfo memAlloc = {}; + memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + VkMemoryRequirements memReqs; + + void *data; + + if (useStagingBuffers) + { + // Static data like vertex and index buffer should be stored on the device memory + // for optimal (and fastest) access by the GPU + // + // To achieve this we use so-called "staging buffers" : + // - Create a buffer that's visible to the host (and can be mapped) + // - Copy the data to this buffer + // - Create another buffer that's local on the device (VRAM) with the same size + // - Copy the data from the host to the device using a command buffer + // - Delete the host visible (staging) buffer + // - Use the device local buffers for rendering + + struct StagingBuffer { + VkDeviceMemory memory; + VkBuffer buffer; + }; + + struct { + StagingBuffer vertices; + StagingBuffer indices; + } stagingBuffers; + + // Vertex buffer + VkBufferCreateInfo vertexBufferInfo = {}; + vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + vertexBufferInfo.size = vertexBufferSize; + // Buffer is used as the copy source + vertexBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + // Create a host-visible buffer to copy the vertex data to (staging buffer) + VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &stagingBuffers.vertices.buffer)); + vkGetBufferMemoryRequirements(device, stagingBuffers.vertices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + // Request a host visible memory type that can be used to copy our data do + // Also request it to be coherent, so that writes are visible to the GPU right after unmapping the buffer + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.vertices.memory)); + // Map and copy + VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.vertices.memory, 0, memAlloc.allocationSize, 0, &data)); + memcpy(data, vertexBuffer.data(), vertexBufferSize); + vkUnmapMemory(device, stagingBuffers.vertices.memory); + VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.vertices.buffer, stagingBuffers.vertices.memory, 0)); + + // Create a device local buffer to which the (host local) vertex data will be copied and which will be used for rendering + vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); + vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); + VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); + + // Index buffer + VkBufferCreateInfo indexbufferInfo = {}; + indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + indexbufferInfo.size = indexBufferSize; + indexbufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + // Copy index data to a buffer visible to the host (staging buffer) + VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &stagingBuffers.indices.buffer)); + vkGetBufferMemoryRequirements(device, stagingBuffers.indices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &stagingBuffers.indices.memory)); + VK_CHECK_RESULT(vkMapMemory(device, stagingBuffers.indices.memory, 0, indexBufferSize, 0, &data)); + memcpy(data, indexBuffer.data(), indexBufferSize); + vkUnmapMemory(device, stagingBuffers.indices.memory); + VK_CHECK_RESULT(vkBindBufferMemory(device, stagingBuffers.indices.buffer, stagingBuffers.indices.memory, 0)); + + // Create destination buffer with device only visibility + indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); + vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); + VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); + + // Buffer copies have to be submitted to a queue, so we need a command buffer for them + // Note: Some devices offer a dedicated transfer queue (with only the transfer bit set) that may be faster when doing lots of copies + VkCommandBuffer copyCmd = getCommandBuffer(true); + + // Put buffer region copies into command buffer + VkBufferCopy copyRegion = {}; + + // Vertex buffer + copyRegion.size = vertexBufferSize; + vkCmdCopyBuffer(copyCmd, stagingBuffers.vertices.buffer, vertices.buffer, 1, ©Region); + // Index buffer + copyRegion.size = indexBufferSize; + vkCmdCopyBuffer(copyCmd, stagingBuffers.indices.buffer, indices.buffer, 1, ©Region); + + // Flushing the command buffer will also submit it to the queue and uses a fence to ensure that all commands have been executed before returning + flushCommandBuffer(copyCmd); + + // Destroy staging buffers + // Note: Staging buffer must not be deleted before the copies have been submitted and executed + vkDestroyBuffer(device, stagingBuffers.vertices.buffer, nullptr); + vkFreeMemory(device, stagingBuffers.vertices.memory, nullptr); + vkDestroyBuffer(device, stagingBuffers.indices.buffer, nullptr); + vkFreeMemory(device, stagingBuffers.indices.memory, nullptr); + } + else + { + // Don't use staging + // Create host-visible buffers only and use these for rendering. This is not advised and will usually result in lower rendering performance + + // Vertex buffer + VkBufferCreateInfo vertexBufferInfo = {}; + vertexBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + vertexBufferInfo.size = vertexBufferSize; + vertexBufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; + + // Copy vertex data to a buffer visible to the host + VK_CHECK_RESULT(vkCreateBuffer(device, &vertexBufferInfo, nullptr, &vertices.buffer)); + vkGetBufferMemoryRequirements(device, vertices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT is host visible memory, and VK_MEMORY_PROPERTY_HOST_COHERENT_BIT makes sure writes are directly visible + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &vertices.memory)); + VK_CHECK_RESULT(vkMapMemory(device, vertices.memory, 0, memAlloc.allocationSize, 0, &data)); + memcpy(data, vertexBuffer.data(), vertexBufferSize); + vkUnmapMemory(device, vertices.memory); + VK_CHECK_RESULT(vkBindBufferMemory(device, vertices.buffer, vertices.memory, 0)); + + // Index buffer + VkBufferCreateInfo indexbufferInfo = {}; + indexbufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + indexbufferInfo.size = indexBufferSize; + indexbufferInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; + + // Copy index data to a buffer visible to the host + VK_CHECK_RESULT(vkCreateBuffer(device, &indexbufferInfo, nullptr, &indices.buffer)); + vkGetBufferMemoryRequirements(device, indices.buffer, &memReqs); + memAlloc.allocationSize = memReqs.size; + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &indices.memory)); + VK_CHECK_RESULT(vkMapMemory(device, indices.memory, 0, indexBufferSize, 0, &data)); + memcpy(data, indexBuffer.data(), indexBufferSize); + vkUnmapMemory(device, indices.memory); + VK_CHECK_RESULT(vkBindBufferMemory(device, indices.buffer, indices.memory, 0)); + } + } + + void setupDescriptorPool() + { + // We need to tell the API the number of max. requested descriptors per type + VkDescriptorPoolSize typeCounts[1]; + // This example only uses one descriptor type (uniform buffer) and only requests one descriptor of this type + typeCounts[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + typeCounts[0].descriptorCount = 1; + // For additional types you need to add new entries in the type count list + // E.g. for two combined image samplers : + // typeCounts[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + // typeCounts[1].descriptorCount = 2; + + // Create the global descriptor pool + // All descriptors used in this example are allocated from this pool + VkDescriptorPoolCreateInfo descriptorPoolInfo = {}; + descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + descriptorPoolInfo.pNext = nullptr; + descriptorPoolInfo.poolSizeCount = 1; + descriptorPoolInfo.pPoolSizes = typeCounts; + // Set the max. number of descriptor sets that can be requested from this pool (requesting beyond this limit will result in an error) + descriptorPoolInfo.maxSets = 1; + + VK_CHECK_RESULT(vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &descriptorPool)); + } + + void setupDescriptorSetLayout() + { + // Setup layout of descriptors used in this example + // Basically connects the different shader stages to descriptors for binding uniform buffers, image samplers, etc. + // So every shader binding should map to one descriptor set layout binding + + // Binding 0: Uniform buffer (Vertex shader) + VkDescriptorSetLayoutBinding layoutBinding = {}; + layoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + layoutBinding.descriptorCount = 1; + layoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; + layoutBinding.pImmutableSamplers = nullptr; + + VkDescriptorSetLayoutCreateInfo descriptorLayout = {}; + descriptorLayout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + descriptorLayout.pNext = nullptr; + descriptorLayout.bindingCount = 1; + descriptorLayout.pBindings = &layoutBinding; + + VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorLayout, nullptr, &descriptorSetLayout)); + + // Create the pipeline layout that is used to generate the rendering pipelines that are based on this descriptor set layout + // In a more complex scenario you would have different pipeline layouts for different descriptor set layouts that could be reused + VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {}; + pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pPipelineLayoutCreateInfo.pNext = nullptr; + pPipelineLayoutCreateInfo.setLayoutCount = 1; + pPipelineLayoutCreateInfo.pSetLayouts = &descriptorSetLayout; + + VK_CHECK_RESULT(vkCreatePipelineLayout(device, &pPipelineLayoutCreateInfo, nullptr, &pipelineLayout)); + } + + void setupDescriptorSet() + { + // Allocate a new descriptor set from the global descriptor pool + VkDescriptorSetAllocateInfo allocInfo = {}; + allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + allocInfo.descriptorPool = descriptorPool; + allocInfo.descriptorSetCount = 1; + allocInfo.pSetLayouts = &descriptorSetLayout; + + VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet)); + + // Update the descriptor set determining the shader binding points + // For every binding point used in a shader there needs to be one + // descriptor set matching that binding point + + VkWriteDescriptorSet writeDescriptorSet = {}; + + // Binding 0 : Uniform buffer + writeDescriptorSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + writeDescriptorSet.dstSet = descriptorSet; + writeDescriptorSet.descriptorCount = 1; + writeDescriptorSet.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + writeDescriptorSet.pBufferInfo = &uniformBufferVS.descriptor; + // Binds this uniform buffer to binding point 0 + writeDescriptorSet.dstBinding = 0; + + vkUpdateDescriptorSets(device, 1, &writeDescriptorSet, 0, nullptr); + } + + // Create the depth (and stencil) buffer attachments used by our framebuffers + // Note: Override of virtual function in the base class and called from within VulkanExampleBase::prepare + void setupDepthStencil() + { + // Create an optimal image used as the depth stencil attachment + VkImageCreateInfo image = {}; + image.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image.imageType = VK_IMAGE_TYPE_2D; + image.format = depthFormat; + // Use example's height and width + image.extent = { width, height, 1 }; + image.mipLevels = 1; + image.arrayLayers = 1; + image.samples = VK_SAMPLE_COUNT_1_BIT; + image.tiling = VK_IMAGE_TILING_OPTIMAL; + image.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; + image.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + VK_CHECK_RESULT(vkCreateImage(device, &image, nullptr, &depthStencil.image)); + + // Allocate memory for the image (device local) and bind it to our image + VkMemoryAllocateInfo memAlloc = {}; + memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + VkMemoryRequirements memReqs; + vkGetImageMemoryRequirements(device, depthStencil.image, &memReqs); + memAlloc.allocationSize = memReqs.size; + memAlloc.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &depthStencil.mem)); + VK_CHECK_RESULT(vkBindImageMemory(device, depthStencil.image, depthStencil.mem, 0)); + + // Create a view for the depth stencil image + // Images aren't directly accessed in Vulkan, but rather through views described by a subresource range + // This allows for multiple views of one image with differing ranges (e.g. for different layers) + VkImageViewCreateInfo depthStencilView = {}; + depthStencilView.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + depthStencilView.viewType = VK_IMAGE_VIEW_TYPE_2D; + depthStencilView.format = depthFormat; + depthStencilView.subresourceRange = {}; + depthStencilView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + // Stencil aspect should only be set on depth + stencil formats (VK_FORMAT_D16_UNORM_S8_UINT..VK_FORMAT_D32_SFLOAT_S8_UINT) + if (depthFormat >= VK_FORMAT_D16_UNORM_S8_UINT) + depthStencilView.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; + + depthStencilView.subresourceRange.baseMipLevel = 0; + depthStencilView.subresourceRange.levelCount = 1; + depthStencilView.subresourceRange.baseArrayLayer = 0; + depthStencilView.subresourceRange.layerCount = 1; + depthStencilView.image = depthStencil.image; + VK_CHECK_RESULT(vkCreateImageView(device, &depthStencilView, nullptr, &depthStencil.view)); + } + + // Create a frame buffer for each swap chain image + // Note: Override of virtual function in the base class and called from within VulkanExampleBase::prepare + void setupFrameBuffer() + { + // Create a frame buffer for every image in the swapchain + frameBuffers.resize(swapChain.imageCount); + for (size_t i = 0; i < frameBuffers.size(); i++) + { + std::array attachments; + attachments[0] = swapChain.buffers[i].view; // Color attachment is the view of the swapchain image + attachments[1] = depthStencil.view; // Depth/Stencil attachment is the same for all frame buffers + + VkFramebufferCreateInfo frameBufferCreateInfo = {}; + frameBufferCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + // All frame buffers use the same renderpass setup + frameBufferCreateInfo.renderPass = renderPass; + frameBufferCreateInfo.attachmentCount = static_cast(attachments.size()); + frameBufferCreateInfo.pAttachments = attachments.data(); + frameBufferCreateInfo.width = width; + frameBufferCreateInfo.height = height; + frameBufferCreateInfo.layers = 1; + // Create the framebuffer + VK_CHECK_RESULT(vkCreateFramebuffer(device, &frameBufferCreateInfo, nullptr, &frameBuffers[i])); + } + } + + // Render pass setup + // Render passes are a new concept in Vulkan. They describe the attachments used during rendering and may contain multiple subpasses with attachment dependencies + // This allows the driver to know up-front what the rendering will look like and is a good opportunity to optimize especially on tile-based renderers (with multiple subpasses) + // Using sub pass dependencies also adds implicit layout transitions for the attachment used, so we don't need to add explicit image memory barriers to transform them + // Note: Override of virtual function in the base class and called from within VulkanExampleBase::prepare + void setupRenderPass() + { + // This example will use a single render pass with one subpass + + // Descriptors for the attachments used by this renderpass + std::array attachments = {}; + + // Color attachment + attachments[0].format = swapChain.colorFormat; // Use the color format selected by the swapchain + attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; // We don't use multi sampling in this example + attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; // Clear this attachment at the start of the render pass + attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // Keep its contents after the render pass is finished (for displaying it) + attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // We don't use stencil, so don't care for load + attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // Same for store + attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Layout at render pass start. Initial doesn't matter, so we use undefined + attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; // Layout to which the attachment is transitioned when the render pass is finished + // As we want to present the color buffer to the swapchain, we transition to PRESENT_KHR + // Depth attachment + attachments[1].format = depthFormat; // A proper depth format is selected in the example base + attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; + attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; // Clear depth at start of first subpass + attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // We don't need depth after render pass has finished (DONT_CARE may result in better performance) + attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // No stencil + attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // No Stencil + attachments[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Layout at render pass start. Initial doesn't matter, so we use undefined + attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; // Transition to depth/stencil attachment + + // Setup attachment references + VkAttachmentReference colorReference = {}; + colorReference.attachment = 0; // Attachment 0 is color + colorReference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // Attachment layout used as color during the subpass + + VkAttachmentReference depthReference = {}; + depthReference.attachment = 1; // Attachment 1 is color + depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; // Attachment used as depth/stencil used during the subpass + + // Setup a single subpass reference + VkSubpassDescription subpassDescription = {}; + subpassDescription.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + subpassDescription.colorAttachmentCount = 1; // Subpass uses one color attachment + subpassDescription.pColorAttachments = &colorReference; // Reference to the color attachment in slot 0 + subpassDescription.pDepthStencilAttachment = &depthReference; // Reference to the depth attachment in slot 1 + subpassDescription.inputAttachmentCount = 0; // Input attachments can be used to sample from contents of a previous subpass + subpassDescription.pInputAttachments = nullptr; // (Input attachments not used by this example) + subpassDescription.preserveAttachmentCount = 0; // Preserved attachments can be used to loop (and preserve) attachments through subpasses + subpassDescription.pPreserveAttachments = nullptr; // (Preserve attachments not used by this example) + subpassDescription.pResolveAttachments = nullptr; // Resolve attachments are resolved at the end of a sub pass and can be used for e.g. multi sampling + + // Setup subpass dependencies + // These will add the implicit attachment layout transitions specified by the attachment descriptions + // The actual usage layout is preserved through the layout specified in the attachment reference + // Each subpass dependency will introduce a memory and execution dependency between the source and dest subpass described by + // srcStageMask, dstStageMask, srcAccessMask, dstAccessMask (and dependencyFlags is set) + // Note: VK_SUBPASS_EXTERNAL is a special constant that refers to all commands executed outside of the actual renderpass) + std::array dependencies; + + // Does the transition from final to initial layout for the depth an color attachments + // Depth attachment + dependencies[0].srcSubpass = VK_SUBPASS_EXTERNAL; + dependencies[0].dstSubpass = 0; + dependencies[0].srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + dependencies[0].dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; + dependencies[0].srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + dependencies[0].dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; + dependencies[0].dependencyFlags = 0; + // Color attachment + dependencies[1].srcSubpass = VK_SUBPASS_EXTERNAL; + dependencies[1].dstSubpass = 0; + dependencies[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependencies[1].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependencies[1].srcAccessMask = 0; + dependencies[1].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; + dependencies[1].dependencyFlags = 0; + + // Create the actual renderpass + VkRenderPassCreateInfo renderPassInfo = {}; + renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + renderPassInfo.attachmentCount = static_cast(attachments.size()); // Number of attachments used by this render pass + renderPassInfo.pAttachments = attachments.data(); // Descriptions of the attachments used by the render pass + renderPassInfo.subpassCount = 1; // We only use one subpass in this example + renderPassInfo.pSubpasses = &subpassDescription; // Description of that subpass + renderPassInfo.dependencyCount = static_cast(dependencies.size()); // Number of subpass dependencies + renderPassInfo.pDependencies = dependencies.data(); // Subpass dependencies used by the render pass + + VK_CHECK_RESULT(vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass)); + } + + // Vulkan loads its shaders from an immediate binary representation called SPIR-V + // Shaders are compiled offline from e.g. GLSL using the reference glslang compiler + // This function loads such a shader from a binary file and returns a shader module structure + VkShaderModule loadSPIRVShader(std::string filename) + { + size_t shaderSize; + char* shaderCode = NULL; + +#if defined(__ANDROID__) + // Load shader from compressed asset + AAsset* asset = AAssetManager_open(androidApp->activity->assetManager, filename.c_str(), AASSET_MODE_STREAMING); + assert(asset); + shaderSize = AAsset_getLength(asset); + assert(shaderSize > 0); + + shaderCode = new char[shaderSize]; + AAsset_read(asset, shaderCode, shaderSize); + AAsset_close(asset); +#else + std::ifstream is(filename, std::ios::binary | std::ios::in | std::ios::ate); + + if (is.is_open()) + { + shaderSize = is.tellg(); + is.seekg(0, std::ios::beg); + // Copy file contents into a buffer + shaderCode = new char[shaderSize]; + is.read(shaderCode, shaderSize); + is.close(); + assert(shaderSize > 0); + } +#endif + if (shaderCode) + { + // Create a new shader module that will be used for pipeline creation + VkShaderModuleCreateInfo moduleCreateInfo{}; + moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + moduleCreateInfo.codeSize = shaderSize; + moduleCreateInfo.pCode = (uint32_t*)shaderCode; + + VkShaderModule shaderModule; + VK_CHECK_RESULT(vkCreateShaderModule(device, &moduleCreateInfo, NULL, &shaderModule)); + + delete[] shaderCode; + + return shaderModule; + } + else + { + std::cerr << "Error: Could not open shader file \"" << filename << "\"" << std::endl; + return VK_NULL_HANDLE; + } + } + + void preparePipelines() + { + // Create the graphics pipeline used in this example + // Vulkan uses the concept of rendering pipelines to encapsulate fixed states, replacing OpenGL's complex state machine + // A pipeline is then stored and hashed on the GPU making pipeline changes very fast + // Note: There are still a few dynamic states that are not directly part of the pipeline (but the info that they are used is) + + VkGraphicsPipelineCreateInfo pipelineCreateInfo = {}; + pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + // The layout used for this pipeline (can be shared among multiple pipelines using the same layout) + pipelineCreateInfo.layout = pipelineLayout; + // Renderpass this pipeline is attached to + pipelineCreateInfo.renderPass = renderPass; + + // Construct the different states making up the pipeline + + // Input assembly state describes how primitives are assembled + // This pipeline will assemble vertex data as a triangle lists (though we only use one triangle) + VkPipelineInputAssemblyStateCreateInfo inputAssemblyState = {}; + inputAssemblyState.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + inputAssemblyState.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + // Rasterization state + VkPipelineRasterizationStateCreateInfo rasterizationState = {}; + rasterizationState.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizationState.polygonMode = VK_POLYGON_MODE_FILL; + rasterizationState.cullMode = VK_CULL_MODE_NONE; + rasterizationState.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; + rasterizationState.depthClampEnable = VK_FALSE; + rasterizationState.rasterizerDiscardEnable = VK_FALSE; + rasterizationState.depthBiasEnable = VK_FALSE; + rasterizationState.lineWidth = 1.0f; + + // Color blend state describes how blend factors are calculated (if used) + // We need one blend attachment state per color attachment (even if blending is not used) + VkPipelineColorBlendAttachmentState blendAttachmentState[1] = {}; + blendAttachmentState[0].colorWriteMask = 0xf; + blendAttachmentState[0].blendEnable = VK_FALSE; + VkPipelineColorBlendStateCreateInfo colorBlendState = {}; + colorBlendState.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + colorBlendState.attachmentCount = 1; + colorBlendState.pAttachments = blendAttachmentState; + + // Viewport state sets the number of viewports and scissor used in this pipeline + // Note: This is actually overridden by the dynamic states (see below) + VkPipelineViewportStateCreateInfo viewportState = {}; + viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewportState.viewportCount = 1; + viewportState.scissorCount = 1; + + // Enable dynamic states + // Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer + // To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer. + // For this example we will set the viewport and scissor using dynamic states + std::vector dynamicStateEnables; + dynamicStateEnables.push_back(VK_DYNAMIC_STATE_VIEWPORT); + dynamicStateEnables.push_back(VK_DYNAMIC_STATE_SCISSOR); + VkPipelineDynamicStateCreateInfo dynamicState = {}; + dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamicState.pDynamicStates = dynamicStateEnables.data(); + dynamicState.dynamicStateCount = static_cast(dynamicStateEnables.size()); + + // Depth and stencil state containing depth and stencil compare and test operations + // We only use depth tests and want depth tests and writes to be enabled and compare with less or equal + VkPipelineDepthStencilStateCreateInfo depthStencilState = {}; + depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + depthStencilState.depthTestEnable = VK_TRUE; + depthStencilState.depthWriteEnable = VK_TRUE; + depthStencilState.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL; + depthStencilState.depthBoundsTestEnable = VK_FALSE; + depthStencilState.back.failOp = VK_STENCIL_OP_KEEP; + depthStencilState.back.passOp = VK_STENCIL_OP_KEEP; + depthStencilState.back.compareOp = VK_COMPARE_OP_ALWAYS; + depthStencilState.stencilTestEnable = VK_FALSE; + depthStencilState.front = depthStencilState.back; + + // Multi sampling state + // This example does not make use of multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline + VkPipelineMultisampleStateCreateInfo multisampleState = {}; + multisampleState.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampleState.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + multisampleState.pSampleMask = nullptr; + + // Vertex input descriptions + // Specifies the vertex input parameters for a pipeline + + // Vertex input binding + // This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers) + VkVertexInputBindingDescription vertexInputBinding = {}; + vertexInputBinding.binding = 0; + vertexInputBinding.stride = sizeof(Vertex); + vertexInputBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + + // Input attribute bindings describe shader attribute locations and memory layouts + std::array vertexInputAttributs; + // These match the following shader layout (see triangle.vert): + // layout (location = 0) in vec3 inPos; + // layout (location = 1) in vec3 inColor; + // Attribute location 0: Position + vertexInputAttributs[0].binding = 0; + vertexInputAttributs[0].location = 0; + // Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32) + vertexInputAttributs[0].format = VK_FORMAT_R32G32B32_SFLOAT; + vertexInputAttributs[0].offset = offsetof(Vertex, position); + // Attribute location 1: Color + vertexInputAttributs[1].binding = 0; + vertexInputAttributs[1].location = 1; + // Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32) + vertexInputAttributs[1].format = VK_FORMAT_R32G32B32_SFLOAT; + vertexInputAttributs[1].offset = offsetof(Vertex, color); + + // Vertex input state used for pipeline creation + VkPipelineVertexInputStateCreateInfo vertexInputState = {}; + vertexInputState.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertexInputState.vertexBindingDescriptionCount = 1; + vertexInputState.pVertexBindingDescriptions = &vertexInputBinding; + vertexInputState.vertexAttributeDescriptionCount = 2; + vertexInputState.pVertexAttributeDescriptions = vertexInputAttributs.data(); + + // Shaders + std::array shaderStages{}; + + // Vertex shader + shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + // Set pipeline stage for this shader + shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; + // Load binary SPIR-V shader + shaderStages[0].module = loadSPIRVShader(getHomeworkShadersPath() + "homework0/homework0.vert.spv"); + // Main entry point for the shader + shaderStages[0].pName = "main"; + assert(shaderStages[0].module != VK_NULL_HANDLE); + + // Fragment shader + shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + // Set pipeline stage for this shader + shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; + // Load binary SPIR-V shader + shaderStages[1].module = loadSPIRVShader(getHomeworkShadersPath() + "homework0/homework0.frag.spv"); + // Main entry point for the shader + shaderStages[1].pName = "main"; + assert(shaderStages[1].module != VK_NULL_HANDLE); + + // Set pipeline shader stage info + pipelineCreateInfo.stageCount = static_cast(shaderStages.size()); + pipelineCreateInfo.pStages = shaderStages.data(); + + // Assign the pipeline states to the pipeline creation info structure + pipelineCreateInfo.pVertexInputState = &vertexInputState; + pipelineCreateInfo.pInputAssemblyState = &inputAssemblyState; + pipelineCreateInfo.pRasterizationState = &rasterizationState; + pipelineCreateInfo.pColorBlendState = &colorBlendState; + pipelineCreateInfo.pMultisampleState = &multisampleState; + pipelineCreateInfo.pViewportState = &viewportState; + pipelineCreateInfo.pDepthStencilState = &depthStencilState; + pipelineCreateInfo.pDynamicState = &dynamicState; + + // Create rendering pipeline using the specified states + VK_CHECK_RESULT(vkCreateGraphicsPipelines(device, pipelineCache, 1, &pipelineCreateInfo, nullptr, &pipeline)); + + // Shader modules are no longer needed once the graphics pipeline has been created + vkDestroyShaderModule(device, shaderStages[0].module, nullptr); + vkDestroyShaderModule(device, shaderStages[1].module, nullptr); + } + + void prepareUniformBuffers() + { + // Prepare and initialize a uniform buffer block containing shader uniforms + // Single uniforms like in OpenGL are no longer present in Vulkan. All Shader uniforms are passed via uniform buffer blocks + VkMemoryRequirements memReqs; + + // Vertex shader uniform buffer block + VkBufferCreateInfo bufferInfo = {}; + VkMemoryAllocateInfo allocInfo = {}; + allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + allocInfo.pNext = nullptr; + allocInfo.allocationSize = 0; + allocInfo.memoryTypeIndex = 0; + + bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + bufferInfo.size = sizeof(uboVS); + // This buffer will be used as a uniform buffer + bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; + + // Create a new buffer + VK_CHECK_RESULT(vkCreateBuffer(device, &bufferInfo, nullptr, &uniformBufferVS.buffer)); + // Get memory requirements including size, alignment and memory type + vkGetBufferMemoryRequirements(device, uniformBufferVS.buffer, &memReqs); + allocInfo.allocationSize = memReqs.size; + // Get the memory type index that supports host visible memory access + // Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial + // We also want the buffer to be host coherent so we don't have to flush (or sync after every update. + // Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base + allocInfo.memoryTypeIndex = getMemoryTypeIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + // Allocate memory for the uniform buffer + VK_CHECK_RESULT(vkAllocateMemory(device, &allocInfo, nullptr, &(uniformBufferVS.memory))); + // Bind memory to buffer + VK_CHECK_RESULT(vkBindBufferMemory(device, uniformBufferVS.buffer, uniformBufferVS.memory, 0)); + + // Store information in the uniform's descriptor that is used by the descriptor set + uniformBufferVS.descriptor.buffer = uniformBufferVS.buffer; + uniformBufferVS.descriptor.offset = 0; + uniformBufferVS.descriptor.range = sizeof(uboVS); + + updateUniformBuffers(); + } + + void updateUniformBuffers() + { + // Pass matrices to the shaders + uboVS.projectionMatrix = camera.matrices.perspective; + uboVS.viewMatrix = camera.matrices.view; + uboVS.modelMatrix = glm::mat4(1.0f); + + // Map uniform buffer and update it + uint8_t *pData; + VK_CHECK_RESULT(vkMapMemory(device, uniformBufferVS.memory, 0, sizeof(uboVS), 0, (void **)&pData)); + memcpy(pData, &uboVS, sizeof(uboVS)); + // Unmap after data has been copied + // Note: Since we requested a host coherent memory type for the uniform buffer, the write is instantly visible to the GPU + vkUnmapMemory(device, uniformBufferVS.memory); + } + + void prepare() + { + VulkanExampleBase::prepare(); + prepareSynchronizationPrimitives(); + prepareVertices(USE_STAGING); + prepareUniformBuffers(); + setupDescriptorSetLayout(); + preparePipelines(); + setupDescriptorPool(); + setupDescriptorSet(); + buildCommandBuffers(); + prepared = true; + } + + virtual void render() + { + if (!prepared) + return; + draw(); + } + + virtual void viewChanged() + { + // This function is called by the base example class each time the view is changed by user input + updateUniformBuffers(); + } +}; + +// OS specific macros for the example main entry points +// Most of the code base is shared for the different supported operating systems, but stuff like message handling differs + +#if defined(_WIN32) +// Windows entry point +VulkanExample *vulkanExample; +LRESULT CALLBACK WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) +{ + if (vulkanExample != NULL) + { + vulkanExample->handleMessages(hWnd, uMsg, wParam, lParam); + } + return (DefWindowProc(hWnd, uMsg, wParam, lParam)); +} +int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR pCmdLine, int nCmdShow) +{ + for (size_t i = 0; i < __argc; i++) { VulkanExample::args.push_back(__argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->setupWindow(hInstance, WndProc); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + return 0; +} + +#elif defined(__ANDROID__) +// Android entry point +VulkanExample *vulkanExample; +void android_main(android_app* state) +{ + vulkanExample = new VulkanExample(); + state->userData = vulkanExample; + state->onAppCmd = VulkanExample::handleAppCommand; + state->onInputEvent = VulkanExample::handleAppInput; + androidApp = state; + vulkanExample->renderLoop(); + delete(vulkanExample); +} +#elif defined(_DIRECT2DISPLAY) + +// Linux entry point with direct to display wsi +// Direct to Displays (D2D) is used on embedded platforms +VulkanExample *vulkanExample; +static void handleEvent() +{ +} +int main(const int argc, const char *argv[]) +{ + for (size_t i = 0; i < argc; i++) { VulkanExample::args.push_back(argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + return 0; +} +#elif defined(VK_USE_PLATFORM_DIRECTFB_EXT) +VulkanExample *vulkanExample; +static void handleEvent(const DFBWindowEvent *event) +{ + if (vulkanExample != NULL) + { + vulkanExample->handleEvent(event); + } +} +int main(const int argc, const char *argv[]) +{ + for (size_t i = 0; i < argc; i++) { VulkanExample::args.push_back(argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->setupWindow(); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + return 0; +} +#elif defined(VK_USE_PLATFORM_WAYLAND_KHR) +VulkanExample *vulkanExample; +int main(const int argc, const char *argv[]) +{ + for (size_t i = 0; i < argc; i++) { VulkanExample::args.push_back(argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->setupWindow(); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + return 0; +} +#elif defined(__linux__) || defined(__FreeBSD__) + +// Linux entry point +VulkanExample *vulkanExample; +#if defined(VK_USE_PLATFORM_XCB_KHR) +static void handleEvent(const xcb_generic_event_t *event) +{ + if (vulkanExample != NULL) + { + vulkanExample->handleEvent(event); + } +} +#else +static void handleEvent() +{ +} +#endif +int main(const int argc, const char *argv[]) +{ + for (size_t i = 0; i < argc; i++) { VulkanExample::args.push_back(argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->setupWindow(); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + return 0; +} +#elif (defined(VK_USE_PLATFORM_MACOS_MVK) && defined(VK_EXAMPLE_XCODE_GENERATED)) +VulkanExample *vulkanExample; +int main(const int argc, const char *argv[]) +{ + @autoreleasepool + { + for (size_t i = 0; i < argc; i++) { VulkanExample::args.push_back(argv[i]); }; + vulkanExample = new VulkanExample(); + vulkanExample->initVulkan(); + vulkanExample->setupWindow(nullptr); + vulkanExample->prepare(); + vulkanExample->renderLoop(); + delete(vulkanExample); + } + return 0; +} +#endif diff --git a/littleRender/render.cpp b/homework/homework1/homework1.cpp similarity index 81% rename from littleRender/render.cpp rename to homework/homework1/homework1.cpp index d601f2a..298196a 100644 --- a/littleRender/render.cpp +++ b/homework/homework1/homework1.cpp @@ -16,10 +16,498 @@ * * If you are looking for a complete glTF implementation, check out https://github.com/SaschaWillems/Vulkan-glTF-PBR/ */ -#include "render.h" -#include "glTFModel.h" +#include "homework1.h" + + /* + glTF loading functions + + The following functions take a glTF input model loaded via tinyglTF and convert all required data into our own structure + */ + + void VulkanglTFModel::loadImages(tinygltf::Model& input) + { + // Images can be stored inside the glTF (which is the case for the sample model), so instead of directly + // loading them from disk, we fetch them from the glTF loader and upload the buffers + images.resize(input.images.size()); + for (size_t i = 0; i < input.images.size(); i++) { + tinygltf::Image& glTFImage = input.images[i]; + // Get the image data from the glTF loader + unsigned char* buffer = nullptr; + VkDeviceSize bufferSize = 0; + bool deleteBuffer = false; + // We convert RGB-only images to RGBA, as most devices don't support RGB-formats in Vulkan + if (glTFImage.component == 3) { + bufferSize = glTFImage.width * glTFImage.height * 4; + buffer = new unsigned char[bufferSize]; + unsigned char* rgba = buffer; + unsigned char* rgb = &glTFImage.image[0]; + for (size_t i = 0; i < glTFImage.width * glTFImage.height; ++i) { + memcpy(rgba, rgb, sizeof(unsigned char) * 3); + rgba += 4; + rgb += 3; + } + deleteBuffer = true; + } + else { + buffer = &glTFImage.image[0]; + bufferSize = glTFImage.image.size(); + } + // Load texture from image buffer + images[i].texture.fromBuffer(buffer, bufferSize, VK_FORMAT_R8G8B8A8_UNORM, glTFImage.width, glTFImage.height, vulkanDevice, copyQueue); + if (deleteBuffer) { + delete[] buffer; + } + } + } + + void VulkanglTFModel::loadTextures(tinygltf::Model& input) + { + textures.resize(input.textures.size()); + for (size_t i = 0; i < input.textures.size(); i++) { + textures[i].imageIndex = input.textures[i].source; + } + } + + void VulkanglTFModel::loadAnimations(tinygltf::Model& input) + { + animations.resize(input.animations.size()); + + for (size_t i = 0; i < input.animations.size(); ++i) + { + auto glTFAnimation = input.animations[i]; + animations[i].name = glTFAnimation.name; + + //Samplers + animations[i].samplers.resize(glTFAnimation.samplers.size()); + for (size_t j = 0; j < glTFAnimation.samplers.size(); ++j) + { + auto glTFSampler = glTFAnimation.samplers[j]; + auto& dstSampler = animations[i].samplers[j]; + dstSampler.interpolation = glTFSampler.interpolation; + + // Read sampler keyframe input time values + { + const auto& accessor = input.accessors[glTFSampler.input]; + const auto& bufferView = input.bufferViews[accessor.bufferView]; + const auto& buffer = input.buffers[bufferView.buffer]; + const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; + const float* buf = static_cast(dataPtr); + for (size_t index = 0; index < accessor.count; ++index) + { + dstSampler.inputs.push_back(buf[index]); + } + // Adjust animation's start and end times + for (auto input : animations[i].samplers[j].inputs) + { + if (input < animations[i].start) + { + animations[i].start = input; + }; + if (input > animations[i].end) + { + animations[i].end = input; + } + } + } + + // Read sampler keyframe output translate/rotate/scale values + { + const auto& accessor = input.accessors[glTFSampler.output]; + const auto& bufferView = input.bufferViews[accessor.bufferView]; + const auto& buffer = input.buffers[bufferView.buffer]; + const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; + switch (accessor.type) + { + case TINYGLTF_TYPE_VEC3: + { + const glm::vec3* buf = static_cast(dataPtr); + for (size_t index = 0; index < accessor.count; index++) + { + dstSampler.outputsVec4.push_back(glm::vec4(buf[index], 0.0f)); + } + break; + } + case TINYGLTF_TYPE_VEC4: + { + const glm::vec4* buf = static_cast(dataPtr); + for (size_t index = 0; index < accessor.count; index++) + { + dstSampler.outputsVec4.push_back(buf[index]); + } + break; + } + default: + { + std::cout << "unknown type" << std::endl; + break; + } + } + } + } + + animations[i].channels.resize(glTFAnimation.channels.size()); + for (size_t j = 0; j < glTFAnimation.channels.size(); ++j) + { + auto glTFChannel = glTFAnimation.channels[j]; + auto& dstChannel = animations[i].channels[j]; + dstChannel.path = glTFChannel.target_path; + dstChannel.samplerIndex = glTFChannel.sampler; + dstChannel.node = nodeFromIndex(glTFChannel.target_node); + } + } + } + + void VulkanglTFModel::loadMaterials(tinygltf::Model& input) + { + materials.resize(input.materials.size()); + for (size_t i = 0; i < input.materials.size(); i++) { + // We only read the most basic properties required for our sample + tinygltf::Material glTFMaterial = input.materials[i]; + // Get the base color factor + if (glTFMaterial.values.find("baseColorFactor") != glTFMaterial.values.end()) { + materials[i].baseColorFactor = glm::make_vec4(glTFMaterial.values["baseColorFactor"].ColorFactor().data()); + } + // Get base color texture index + if (glTFMaterial.values.find("baseColorTexture") != glTFMaterial.values.end()) { + materials[i].baseColorTextureIndex = glTFMaterial.values["baseColorTexture"].TextureIndex(); + } + if (glTFMaterial.values.find("metallicRoughnessTexture") != glTFMaterial.values.end()) { + materials[i].matalicRoughTextureIndex = glTFMaterial.values["metallicRoughnessTexture"].TextureIndex(); + } + if (glTFMaterial.additionalValues.find("normalTexture") != glTFMaterial.additionalValues.end()) + { + materials[i].normalMapTextureIndex = glTFMaterial.additionalValues["normalTexture"].TextureIndex(); + } + if (glTFMaterial.emissiveTexture.index != -1) + { + materials[i].emissiveTextureIndex = glTFMaterial.emissiveTexture.index; + } + if (glTFMaterial.emissiveFactor.size() == 3) + { + materials[i].materialData.values.emissiveFactor = glm::make_vec3(glTFMaterial.emissiveFactor.data()); + } + + if (glTFMaterial.values.find("baseColorFactor") != glTFMaterial.values.end()) + { + materials[i].materialData.values.baseColorFactor = glm::make_vec4(glTFMaterial.values["baseColorFactor"].ColorFactor().data()); + } + } + } + + void VulkanglTFModel::loadNode(const tinygltf::Node& inputNode, const tinygltf::Model& input, VulkanglTFModel::Node* parent, uint32_t nodeIndex,std::vector& indexBuffer, std::vector& vertexBuffer) + { + VulkanglTFModel::Node* node = new VulkanglTFModel::Node{}; + node->matrix = glm::mat4(1.0f); + node->parent = parent; + node->index = nodeIndex; + + // Get the local node matrix + // It's either made up from translation, rotation, scale or a 4x4 matrix + if (inputNode.translation.size() == 3) { + node->matrix = glm::translate(node->matrix, glm::vec3(glm::make_vec3(inputNode.translation.data()))); + } + if (inputNode.rotation.size() == 4) { + glm::quat q = glm::make_quat(inputNode.rotation.data()); + node->matrix *= glm::mat4(q); + } + if (inputNode.scale.size() == 3) { + node->matrix = glm::scale(node->matrix, glm::vec3(glm::make_vec3(inputNode.scale.data()))); + } + if (inputNode.matrix.size() == 16) { + node->matrix = glm::make_mat4x4(inputNode.matrix.data()); + }; + + // Load node's children + if (inputNode.children.size() > 0) { + for (size_t i = 0; i < inputNode.children.size(); i++) { + loadNode(input.nodes[inputNode.children[i]], input , node, inputNode.children[i],indexBuffer, vertexBuffer); + } + } + + // If the node contains mesh data, we load vertices and indices from the buffers + // In glTF this is done via accessors and buffer views + if (inputNode.mesh > -1) { + const tinygltf::Mesh mesh = input.meshes[inputNode.mesh]; + // Iterate through all primitives of this node's mesh + for (size_t i = 0; i < mesh.primitives.size(); i++) { + const tinygltf::Primitive& glTFPrimitive = mesh.primitives[i]; + uint32_t firstIndex = static_cast(indexBuffer.size()); + uint32_t vertexStart = static_cast(vertexBuffer.size()); + uint32_t indexCount = 0; + // Vertices + { + const float* positionBuffer = nullptr; + const float* normalsBuffer = nullptr; + const float* texCoordsBuffer = nullptr; + const float* tangentsBuffer = nullptr; + size_t vertexCount = 0; + + // Get buffer data for vertex positions + if (glTFPrimitive.attributes.find("POSITION") != glTFPrimitive.attributes.end()) { + const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("POSITION")->second]; + const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; + positionBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); + vertexCount = accessor.count; + } + // Get buffer data for vertex normals + if (glTFPrimitive.attributes.find("NORMAL") != glTFPrimitive.attributes.end()) { + const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("NORMAL")->second]; + const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; + normalsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); + } + // Get buffer data for vertex texture coordinates + // glTF supports multiple sets, we only load the first one + if (glTFPrimitive.attributes.find("TEXCOORD_0") != glTFPrimitive.attributes.end()) { + const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("TEXCOORD_0")->second]; + const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; + texCoordsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); + } + + if (glTFPrimitive.attributes.find("TANGENT") != glTFPrimitive.attributes.end()) + { + const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("TANGENT")->second]; + const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; + tangentsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); + } + + // Append data to model's vertex buffer + for (size_t v = 0; v < vertexCount; v++) { + Vertex vert{}; + vert.pos = glm::vec4(glm::make_vec3(&positionBuffer[v * 3]), 1.0f); + vert.normal = glm::normalize(glm::vec3(normalsBuffer ? glm::make_vec3(&normalsBuffer[v * 3]) : glm::vec3(0.0f))); + vert.uv = texCoordsBuffer ? glm::make_vec2(&texCoordsBuffer[v * 2]) : glm::vec3(0.0f); + vert.tangent = tangentsBuffer ? glm::normalize(glm::make_vec3(&tangentsBuffer[v * 4])) : glm::vec3(0.0f); + vert.color = glm::vec3(1.0f, 1.0f, nodeIndex);//Temp set index in color attribute + vertexBuffer.push_back(vert); + } + } + // Indices + { + const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.indices]; + const tinygltf::BufferView& bufferView = input.bufferViews[accessor.bufferView]; + const tinygltf::Buffer& buffer = input.buffers[bufferView.buffer]; + + indexCount += static_cast(accessor.count); + + // glTF supports different component types of indices + switch (accessor.componentType) { + case TINYGLTF_PARAMETER_TYPE_UNSIGNED_INT: { + const uint32_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); + for (size_t index = 0; index < accessor.count; index++) { + indexBuffer.push_back(buf[index] + vertexStart); + } + break; + } + case TINYGLTF_PARAMETER_TYPE_UNSIGNED_SHORT: { + const uint16_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); + for (size_t index = 0; index < accessor.count; index++) { + indexBuffer.push_back(buf[index] + vertexStart); + } + break; + } + case TINYGLTF_PARAMETER_TYPE_UNSIGNED_BYTE: { + const uint8_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); + for (size_t index = 0; index < accessor.count; index++) { + indexBuffer.push_back(buf[index] + vertexStart); + } + break; + } + default: + std::cerr << "Index component type " << accessor.componentType << " not supported!" << std::endl; + return; + } + } + Primitive primitive{}; + primitive.firstIndex = firstIndex; + primitive.indexCount = indexCount; + primitive.materialIndex = glTFPrimitive.material; + node->mesh.primitives.push_back(primitive); + } + } + + if (parent) { + parent->children.push_back(node); + } + else { + nodes.push_back(node); + } + } + + VulkanglTFModel::Node* VulkanglTFModel::findNode(Node* parent, uint32_t index) + { + Node* nodeFound = nullptr; + if (parent->index == index) + { + return parent; + } + for (auto& child : parent->children) + { + nodeFound = findNode(child, index); + if (nodeFound) + { + break; + } + } + return nodeFound; + } + + VulkanglTFModel::Node* VulkanglTFModel::nodeFromIndex(uint32_t index) + { + Node* nodeFound = nullptr; + for (auto& node : nodes) + { + nodeFound = findNode(node, index); + if (nodeFound) + { + break; + } + } + return nodeFound; + } + + void VulkanglTFModel::updateAnimation(float deltaTime, vks::Buffer& buffer) + { + constexpr uint32_t activeAnimation = 0; + Animation& animation = animations[activeAnimation]; + animation.currentTime += deltaTime; + if (animation.currentTime > animation.end) + { + animation.currentTime -= animation.end; + } + + for (auto& channel : animation.channels) + { + auto& sampler = animation.samplers[channel.samplerIndex]; + for (size_t i = 0; i < sampler.inputs.size() - 1; ++i) + { + if (sampler.interpolation != "LINEAR") + { + std::cout << "This sample only supports linear interpolations\n"; + continue; + } + if ((animation.currentTime >= sampler.inputs[i]) && (animation.currentTime <= sampler.inputs[i + 1])) + { + float ratio = (animation.currentTime - sampler.inputs[i]) / (sampler.inputs[i + 1] - sampler.inputs[i]); + if (channel.path == "translation") + { + channel.node->translation = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], ratio); + channel.node->bAnimateNode = true; + } + if (channel.path == "rotation") + { + glm::quat q1; + q1.x = sampler.outputsVec4[i].x; + q1.y = sampler.outputsVec4[i].y; + q1.z = sampler.outputsVec4[i].z; + q1.w = sampler.outputsVec4[i].w; + + glm::quat q2; + q2.x = sampler.outputsVec4[i + 1].x; + q2.y = sampler.outputsVec4[i + 1].y; + q2.z = sampler.outputsVec4[i + 1].z; + q2.w = sampler.outputsVec4[i + 1].w; + + channel.node->rotation = glm::normalize(glm::slerp(q1, q2, ratio)); + channel.node->bAnimateNode = true; + } + if (channel.path == "scale") + { + channel.node->scale = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], ratio); + channel.node->bAnimateNode = true; + } + } + } + } + std::vector nodeMatrics(nodeCount); + for (auto& node : nodes) + { + updateNodeMatrix(node, nodeMatrics); + } + buffer.copyTo(nodeMatrics.data(), nodeCount * sizeof(glm::mat4)); + } + + void VulkanglTFModel::updateNodeMatrix(Node* node, std::vector& nodeMatrics) + { + nodeMatrics[node->index] = getNodeMatrix(node); + for (auto& child : node->children) + { + updateNodeMatrix(child, nodeMatrics); + } + } + + glm::mat4 VulkanglTFModel::getNodeMatrix(Node* node) + { + glm::mat4 nodeMatrix = node->getLocalMatrix(); + Node* currentParent = node->parent; + while (currentParent) + { + nodeMatrix = currentParent->getLocalMatrix() * nodeMatrix; + currentParent = currentParent->parent; + } + return nodeMatrix; + } + + /* + glTF rendering functions + */ + + // Draw a single node including child nodes (if present) + void VulkanglTFModel::drawNode(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, VulkanglTFModel::Node* node, bool bPushConstants) + { + if (node->mesh.primitives.size() > 0) { + // Pass the node's matrix via push constants + // Traverse the node hierarchy to the top-most parent to get the final matrix of the current node + glm::mat4 nodeMatrix = node->matrix; + VulkanglTFModel::Node* currentParent = node->parent; + while (currentParent) { + nodeMatrix = currentParent->matrix * nodeMatrix; + currentParent = currentParent->parent; + } + + for (VulkanglTFModel::Primitive& primitive : node->mesh.primitives) { + if (primitive.indexCount > 0) { + // Get the texture index for this primitive + if (textures.size() > 0) + { + VulkanglTFModel::Texture texture = textures[materials[primitive.materialIndex].baseColorTextureIndex]; + auto normalMap = textures[materials[primitive.materialIndex].normalMapTextureIndex]; + auto roughMetalMap = textures[materials[primitive.materialIndex].matalicRoughTextureIndex]; + + if (materials[primitive.materialIndex].emissiveTextureIndex >= 0) + { + auto emissiveMap = textures[materials[primitive.materialIndex].emissiveTextureIndex]; + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 4, 1, &images[emissiveMap.imageIndex].descriptorSet, 0, nullptr); + } + + // Bind the descriptor for the current primitive's texture + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 1, 1, &images[texture.imageIndex].descriptorSet, 0, nullptr); + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 2, 1, &images[normalMap.imageIndex].descriptorSet, 0, nullptr); + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 3, 1, &images[roughMetalMap.imageIndex].descriptorSet, 0, nullptr); + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 5, 1, &materials[primitive.materialIndex].materialData.descriptorSet, 0, nullptr); + } + vkCmdDrawIndexed(commandBuffer, primitive.indexCount, 1, primitive.firstIndex, 0, 0); + } + } + } + for (auto& child : node->children) { + drawNode(commandBuffer, pipelineLayout, child, bPushConstants); + } + } + + // Draw the glTF scene starting at the top-level-nodes + void VulkanglTFModel::draw(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, bool flag = true) + { + // All vertices and indices are stored in single buffers, so we only need to bind once + VkDeviceSize offsets[1] = { 0 }; + vkCmdBindVertexBuffers(commandBuffer, 0, 1, &vertices.buffer, offsets); + vkCmdBindIndexBuffer(commandBuffer, indices.buffer, 0, VK_INDEX_TYPE_UINT32); + // Render all nodes at top-level + for (auto& node : nodes) { + drawNode(commandBuffer, pipelineLayout, node, flag); + } + } + - VulkanExample::VulkanExample(): VulkanExampleBase(ENABLE_VALIDATION) @@ -184,7 +672,7 @@ vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayouts.pbrLayout, 0, 1, &descriptorSet, 0, nullptr); vkCmdBindDescriptorSets(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayouts.pbrLayout, 6, 1, &skinDescriptorSet, 0, nullptr); vkCmdBindPipeline(drawCmdBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, wireframe ? pipelines.wireframe : pipelines.solid); - glTFModel.draw(drawCmdBuffers[i], pipelineLayouts.pbrLayout,true); + glTFModel.draw(drawCmdBuffers[i], pipelineLayouts.pbrLayout); vkCmdEndRenderPass(drawCmdBuffers[i]); { diff --git a/littleRender/glTFModel.h b/homework/homework1/homework1.h similarity index 54% rename from littleRender/glTFModel.h rename to homework/homework1/homework1.h index ded215f..1385b9c 100644 --- a/littleRender/glTFModel.h +++ b/homework/homework1/homework1.h @@ -1,8 +1,9 @@ -#pragma once -#define TINYGLTF_IMPLEMENTATION -#define STB_IMAGE_IMPLEMENTATION -#define TINYGLTF_NO_STB_IMAGE_WRITE +#include +#include +#include +#include +#include #define GLM_FORCE_RADIANS #define GLM_FORCE_DEPTH_ZERO_TO_ONE @@ -10,10 +11,20 @@ #include #include +#define TINYGLTF_IMPLEMENTATION +#define STB_IMAGE_IMPLEMENTATION +#define TINYGLTF_NO_STB_IMAGE_WRITE +#ifdef VK_USE_PLATFORM_ANDROID_KHR + #define TINYGLTF_ANDROID_LOAD_FROM_ASSETS +#endif #include "tiny_gltf.h" + #include "vulkanexamplebase.h" +#define ENABLE_VALIDATION false + + // Contains everything required to render a glTF model in Vulkan // This class is heavily simplified (compared to glTF's feature set) but retains the basic glTF structure class VulkanglTFModel @@ -80,14 +91,14 @@ public: } glm::mat4 matrix; bool bAnimateNode = false; - + ~Node() { for (auto& child : children) { delete child; }; } - - + + }; // material data for pbr @@ -134,7 +145,7 @@ public: std::vector joints; vks::Buffer ssbo; VkDescriptorSet descriptorSet; - + }; struct AnimationSampler @@ -178,7 +189,7 @@ public: - //VulkanglTFModel(); + //VulkanglTFModel(); ~VulkanglTFModel() { for (auto node : nodes) { @@ -214,4 +225,180 @@ public: void updateAnimation(float deltaTime, vks::Buffer& buffer); void drawNode(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, VulkanglTFModel::Node* node, bool bPushConstants); void draw(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, bool flag); +}; + +class VulkanExample : public VulkanExampleBase +{ +public: + bool wireframe = false; + bool normalMapping = true; + bool ToneMapping = true; + bool pbrEnabled = true; + + VulkanglTFModel glTFModel; + + struct ShaderData { + vks::Buffer buffer; + struct Values { + glm::mat4 projection; + glm::mat4 model; + glm::vec4 lightPos = glm::vec4(5.0f, 5.0f, 5.0f, 1.0f); + glm::vec4 viewPos; + glm::vec4 bFlagSet = glm::vec4(0.0f, 0.0f, 0.0f, 0.0f); + } values; + vks::Buffer skinSSBO; + } shaderData; + + struct StagingBuffer { + VkBuffer buffer; + VkDeviceMemory memory; + } vertexStaging, indexStaging; + + struct Pipelines { + VkPipeline solid; + VkPipeline wireframe = VK_NULL_HANDLE; + VkPipeline toneMapping = VK_NULL_HANDLE; + } pipelines; + + struct PipelineLayouts + { + VkPipelineLayout pbrLayout; + VkPipelineLayout tonemappingLayout; + } pipelineLayouts; + + VkPipelineLayout pipelineLayout; + + VkDescriptorSet descriptorSet; + VkDescriptorSet skinDescriptorSet; + VkDescriptorSet tonemappingDescriptorSet = VK_NULL_HANDLE; + + struct FrameBufferAttachment + { + VkImage image; + VkDeviceMemory deviceMemory; + VkImageView imageView; + VkFormat format; + + + void destroy(VkDevice device) + { + vkDestroyImage(device, image, nullptr); + vkDestroyImageView(device, imageView,nullptr); + vkFreeMemory(device, deviceMemory, nullptr); + + } + }; + + struct FrameBuffer + { + int32_t width, height; + VkFramebuffer frameBuffer; + VkRenderPass renderPass; + void setSize(int32_t w, int32_t h) + { + this->width = w; + this->height = h; + } + void destroy(VkDevice device) + { + vkDestroyFramebuffer(device, frameBuffer, nullptr); + vkDestroyRenderPass(device, renderPass, nullptr); + } + }; + + struct PBRFrameBuffer { + FrameBufferAttachment color, depth; + FrameBuffer fbo; + bool bCreate = false; + } pbrFrameBuffer; + + VkSampler colorSampler; + + struct DescriptorSetLayouts { + VkDescriptorSetLayout matrices; + VkDescriptorSetLayout textures; + VkDescriptorSetLayout materialUniform; + VkDescriptorSetLayout ssbo; + VkDescriptorSetLayout jointMatrices; + } descriptorSetLayouts; + + struct IBLTextures + { + vks::TextureCubeMap skyboxCube; + vks::TextureCubeMap irradianceCube; + vks::TextureCubeMap prefilteredCube; + vks::Texture2D lutBrdf; + } ibltextures; + + struct OffScreen + { + VkImage image; + VkImageView view; + VkDeviceMemory memory; + VkFramebuffer framebuffer; + } offscreen; + + struct IrradiancePushBlock + { + glm::mat4 mvp; + // Sampling deltas + float deltaPhi = (2.0f * float(M_PI)) / 180.0f; + float deltaTheta = (0.5f * float(M_PI)) / 64.0f; + } irradiancePushBlock; + + struct PrefilterPushBlock { + glm::mat4 mvp; + float roughness; + uint32_t numSamples = 32u; + } prefilterPushBlock; + + VulkanglTFModel skyboxModel; + + VulkanExample(); + ~VulkanExample() + { + // Clean up used Vulkan resources + // Note : Inherited destructor cleans up resources stored in base class + vkDestroyPipeline(device, pipelines.solid, nullptr); + vkDestroyPipeline(device, pipelines.toneMapping, nullptr); + if (pipelines.wireframe != VK_NULL_HANDLE) { + vkDestroyPipeline(device, pipelines.wireframe, nullptr); + } + + vkDestroyPipelineLayout(device, pipelineLayouts.pbrLayout, nullptr); + vkDestroyPipelineLayout(device, pipelineLayouts.tonemappingLayout, nullptr); + vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.matrices, nullptr); + vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.textures, nullptr); + vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.materialUniform, nullptr); + vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.ssbo, nullptr); + ibltextures.irradianceCube.destroy(); + ibltextures.skyboxCube.destroy(); + ibltextures.prefilteredCube.destroy(); + ibltextures.lutBrdf.destroy(); + pbrFrameBuffer.color.destroy(device); + pbrFrameBuffer.depth.destroy(device); + pbrFrameBuffer.fbo.destroy(device); + vkDestroySampler(device, colorSampler, nullptr); + + shaderData.buffer.destroy(); + shaderData.skinSSBO.destroy(); + } + void loadglTFFile(std::string filename, VulkanglTFModel& model, bool bSkyboxFlag); + virtual void getEnabledFeatures(); + void createAttachment(VkFormat format, VkImageUsageFlagBits usage, FrameBufferAttachment* attachment, uint32_t width, uint32_t height); + virtual void setupFrameBuffer(); + void buildCommandBuffers(); + void loadAssets(); + void setupDescriptors(); + void preparePipelines(); + void CreateToneMappingPipeline(); + void GenerateIrradianceCubemap(); + void GeneratePrefilteredCubemap(); + void GenerateBRDFLUT(); + void prepareUniformBuffers(); + void updateUniformBuffers(); + void prepare(); + virtual void render(); + virtual void viewChanged(); + virtual void OnUpdateUIOverlay(vks::UIOverlay* overlay); }; \ No newline at end of file diff --git a/littleRender/glTFModel.cpp b/littleRender/glTFModel.cpp deleted file mode 100644 index 943aad6..0000000 --- a/littleRender/glTFModel.cpp +++ /dev/null @@ -1,490 +0,0 @@ -#include "glTFModel.h" - -/* - glTF loading functions - - The following functions take a glTF input model loaded via tinyglTF and convert all required data into our own structure - */ - -void VulkanglTFModel::loadImages(tinygltf::Model& input) -{ - // Images can be stored inside the glTF (which is the case for the sample model), so instead of directly - // loading them from disk, we fetch them from the glTF loader and upload the buffers - images.resize(input.images.size()); - for (size_t i = 0; i < input.images.size(); i++) { - tinygltf::Image& glTFImage = input.images[i]; - // Get the image data from the glTF loader - unsigned char* buffer = nullptr; - VkDeviceSize bufferSize = 0; - bool deleteBuffer = false; - // We convert RGB-only images to RGBA, as most devices don't support RGB-formats in Vulkan - if (glTFImage.component == 3) { - bufferSize = glTFImage.width * glTFImage.height * 4; - buffer = new unsigned char[bufferSize]; - unsigned char* rgba = buffer; - unsigned char* rgb = &glTFImage.image[0]; - for (size_t i = 0; i < glTFImage.width * glTFImage.height; ++i) { - memcpy(rgba, rgb, sizeof(unsigned char) * 3); - rgba += 4; - rgb += 3; - } - deleteBuffer = true; - } - else { - buffer = &glTFImage.image[0]; - bufferSize = glTFImage.image.size(); - } - // Load texture from image buffer - images[i].texture.fromBuffer(buffer, bufferSize, VK_FORMAT_R8G8B8A8_UNORM, glTFImage.width, glTFImage.height, vulkanDevice, copyQueue); - if (deleteBuffer) { - delete[] buffer; - } - } -} - -void VulkanglTFModel::loadTextures(tinygltf::Model& input) -{ - textures.resize(input.textures.size()); - for (size_t i = 0; i < input.textures.size(); i++) { - textures[i].imageIndex = input.textures[i].source; - } -} - -void VulkanglTFModel::loadAnimations(tinygltf::Model& input) -{ - animations.resize(input.animations.size()); - - for (size_t i = 0; i < input.animations.size(); ++i) - { - auto glTFAnimation = input.animations[i]; - animations[i].name = glTFAnimation.name; - - //Samplers - animations[i].samplers.resize(glTFAnimation.samplers.size()); - for (size_t j = 0; j < glTFAnimation.samplers.size(); ++j) - { - auto glTFSampler = glTFAnimation.samplers[j]; - auto& dstSampler = animations[i].samplers[j]; - dstSampler.interpolation = glTFSampler.interpolation; - - // Read sampler keyframe input time values - { - const auto& accessor = input.accessors[glTFSampler.input]; - const auto& bufferView = input.bufferViews[accessor.bufferView]; - const auto& buffer = input.buffers[bufferView.buffer]; - const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; - const float* buf = static_cast(dataPtr); - for (size_t index = 0; index < accessor.count; ++index) - { - dstSampler.inputs.push_back(buf[index]); - } - // Adjust animation's start and end times - for (auto input : animations[i].samplers[j].inputs) - { - if (input < animations[i].start) - { - animations[i].start = input; - }; - if (input > animations[i].end) - { - animations[i].end = input; - } - } - } - - // Read sampler keyframe output translate/rotate/scale values - { - const auto& accessor = input.accessors[glTFSampler.output]; - const auto& bufferView = input.bufferViews[accessor.bufferView]; - const auto& buffer = input.buffers[bufferView.buffer]; - const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; - switch (accessor.type) - { - case TINYGLTF_TYPE_VEC3: - { - const glm::vec3* buf = static_cast(dataPtr); - for (size_t index = 0; index < accessor.count; index++) - { - dstSampler.outputsVec4.push_back(glm::vec4(buf[index], 0.0f)); - } - break; - } - case TINYGLTF_TYPE_VEC4: - { - const glm::vec4* buf = static_cast(dataPtr); - for (size_t index = 0; index < accessor.count; index++) - { - dstSampler.outputsVec4.push_back(buf[index]); - } - break; - } - default: - { - std::cout << "unknown type" << std::endl; - break; - } - } - } - } - - animations[i].channels.resize(glTFAnimation.channels.size()); - for (size_t j = 0; j < glTFAnimation.channels.size(); ++j) - { - auto glTFChannel = glTFAnimation.channels[j]; - auto& dstChannel = animations[i].channels[j]; - dstChannel.path = glTFChannel.target_path; - dstChannel.samplerIndex = glTFChannel.sampler; - dstChannel.node = nodeFromIndex(glTFChannel.target_node); - } - } -} - -void VulkanglTFModel::loadMaterials(tinygltf::Model& input) -{ - materials.resize(input.materials.size()); - for (size_t i = 0; i < input.materials.size(); i++) { - // We only read the most basic properties required for our sample - tinygltf::Material glTFMaterial = input.materials[i]; - // Get the base color factor - if (glTFMaterial.values.find("baseColorFactor") != glTFMaterial.values.end()) { - materials[i].baseColorFactor = glm::make_vec4(glTFMaterial.values["baseColorFactor"].ColorFactor().data()); - } - // Get base color texture index - if (glTFMaterial.values.find("baseColorTexture") != glTFMaterial.values.end()) { - materials[i].baseColorTextureIndex = glTFMaterial.values["baseColorTexture"].TextureIndex(); - } - if (glTFMaterial.values.find("metallicRoughnessTexture") != glTFMaterial.values.end()) { - materials[i].matalicRoughTextureIndex = glTFMaterial.values["metallicRoughnessTexture"].TextureIndex(); - } - if (glTFMaterial.additionalValues.find("normalTexture") != glTFMaterial.additionalValues.end()) - { - materials[i].normalMapTextureIndex = glTFMaterial.additionalValues["normalTexture"].TextureIndex(); - } - if (glTFMaterial.emissiveTexture.index != -1) - { - materials[i].emissiveTextureIndex = glTFMaterial.emissiveTexture.index; - } - if (glTFMaterial.emissiveFactor.size() == 3) - { - materials[i].materialData.values.emissiveFactor = glm::make_vec3(glTFMaterial.emissiveFactor.data()); - } - - if (glTFMaterial.values.find("baseColorFactor") != glTFMaterial.values.end()) - { - materials[i].materialData.values.baseColorFactor = glm::make_vec4(glTFMaterial.values["baseColorFactor"].ColorFactor().data()); - } - } -} - -void VulkanglTFModel::loadNode(const tinygltf::Node& inputNode, const tinygltf::Model& input, VulkanglTFModel::Node* parent, uint32_t nodeIndex, std::vector& indexBuffer, std::vector& vertexBuffer) -{ - VulkanglTFModel::Node* node = new VulkanglTFModel::Node{}; - node->matrix = glm::mat4(1.0f); - node->parent = parent; - node->index = nodeIndex; - - // Get the local node matrix - // It's either made up from translation, rotation, scale or a 4x4 matrix - if (inputNode.translation.size() == 3) { - node->matrix = glm::translate(node->matrix, glm::vec3(glm::make_vec3(inputNode.translation.data()))); - } - if (inputNode.rotation.size() == 4) { - glm::quat q = glm::make_quat(inputNode.rotation.data()); - node->matrix *= glm::mat4(q); - } - if (inputNode.scale.size() == 3) { - node->matrix = glm::scale(node->matrix, glm::vec3(glm::make_vec3(inputNode.scale.data()))); - } - if (inputNode.matrix.size() == 16) { - node->matrix = glm::make_mat4x4(inputNode.matrix.data()); - }; - - // Load node's children - if (inputNode.children.size() > 0) { - for (size_t i = 0; i < inputNode.children.size(); i++) { - loadNode(input.nodes[inputNode.children[i]], input, node, inputNode.children[i], indexBuffer, vertexBuffer); - } - } - - // If the node contains mesh data, we load vertices and indices from the buffers - // In glTF this is done via accessors and buffer views - if (inputNode.mesh > -1) { - const tinygltf::Mesh mesh = input.meshes[inputNode.mesh]; - // Iterate through all primitives of this node's mesh - for (size_t i = 0; i < mesh.primitives.size(); i++) { - const tinygltf::Primitive& glTFPrimitive = mesh.primitives[i]; - uint32_t firstIndex = static_cast(indexBuffer.size()); - uint32_t vertexStart = static_cast(vertexBuffer.size()); - uint32_t indexCount = 0; - // Vertices - { - const float* positionBuffer = nullptr; - const float* normalsBuffer = nullptr; - const float* texCoordsBuffer = nullptr; - const float* tangentsBuffer = nullptr; - size_t vertexCount = 0; - - // Get buffer data for vertex positions - if (glTFPrimitive.attributes.find("POSITION") != glTFPrimitive.attributes.end()) { - const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("POSITION")->second]; - const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; - positionBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); - vertexCount = accessor.count; - } - // Get buffer data for vertex normals - if (glTFPrimitive.attributes.find("NORMAL") != glTFPrimitive.attributes.end()) { - const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("NORMAL")->second]; - const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; - normalsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); - } - // Get buffer data for vertex texture coordinates - // glTF supports multiple sets, we only load the first one - if (glTFPrimitive.attributes.find("TEXCOORD_0") != glTFPrimitive.attributes.end()) { - const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("TEXCOORD_0")->second]; - const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; - texCoordsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); - } - - if (glTFPrimitive.attributes.find("TANGENT") != glTFPrimitive.attributes.end()) - { - const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.attributes.find("TANGENT")->second]; - const tinygltf::BufferView& view = input.bufferViews[accessor.bufferView]; - tangentsBuffer = reinterpret_cast(&(input.buffers[view.buffer].data[accessor.byteOffset + view.byteOffset])); - } - - // Append data to model's vertex buffer - for (size_t v = 0; v < vertexCount; v++) { - Vertex vert{}; - vert.pos = glm::vec4(glm::make_vec3(&positionBuffer[v * 3]), 1.0f); - vert.normal = glm::normalize(glm::vec3(normalsBuffer ? glm::make_vec3(&normalsBuffer[v * 3]) : glm::vec3(0.0f))); - vert.uv = texCoordsBuffer ? glm::make_vec2(&texCoordsBuffer[v * 2]) : glm::vec3(0.0f); - vert.tangent = tangentsBuffer ? glm::normalize(glm::make_vec3(&tangentsBuffer[v * 4])) : glm::vec3(0.0f); - vert.color = glm::vec3(1.0f, 1.0f, nodeIndex);//Temp set index in color attribute - vertexBuffer.push_back(vert); - } - } - // Indices - { - const tinygltf::Accessor& accessor = input.accessors[glTFPrimitive.indices]; - const tinygltf::BufferView& bufferView = input.bufferViews[accessor.bufferView]; - const tinygltf::Buffer& buffer = input.buffers[bufferView.buffer]; - - indexCount += static_cast(accessor.count); - - // glTF supports different component types of indices - switch (accessor.componentType) { - case TINYGLTF_PARAMETER_TYPE_UNSIGNED_INT: { - const uint32_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); - for (size_t index = 0; index < accessor.count; index++) { - indexBuffer.push_back(buf[index] + vertexStart); - } - break; - } - case TINYGLTF_PARAMETER_TYPE_UNSIGNED_SHORT: { - const uint16_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); - for (size_t index = 0; index < accessor.count; index++) { - indexBuffer.push_back(buf[index] + vertexStart); - } - break; - } - case TINYGLTF_PARAMETER_TYPE_UNSIGNED_BYTE: { - const uint8_t* buf = reinterpret_cast(&buffer.data[accessor.byteOffset + bufferView.byteOffset]); - for (size_t index = 0; index < accessor.count; index++) { - indexBuffer.push_back(buf[index] + vertexStart); - } - break; - } - default: - std::cerr << "Index component type " << accessor.componentType << " not supported!" << std::endl; - return; - } - } - Primitive primitive{}; - primitive.firstIndex = firstIndex; - primitive.indexCount = indexCount; - primitive.materialIndex = glTFPrimitive.material; - node->mesh.primitives.push_back(primitive); - } - } - - if (parent) { - parent->children.push_back(node); - } - else { - nodes.push_back(node); - } -} - -VulkanglTFModel::Node* VulkanglTFModel::findNode(Node* parent, uint32_t index) -{ - Node* nodeFound = nullptr; - if (parent->index == index) - { - return parent; - } - for (auto& child : parent->children) - { - nodeFound = findNode(child, index); - if (nodeFound) - { - break; - } - } - return nodeFound; -} - -VulkanglTFModel::Node* VulkanglTFModel::nodeFromIndex(uint32_t index) -{ - Node* nodeFound = nullptr; - for (auto& node : nodes) - { - nodeFound = findNode(node, index); - if (nodeFound) - { - break; - } - } - return nodeFound; -} - -void VulkanglTFModel::updateAnimation(float deltaTime, vks::Buffer& buffer) -{ - constexpr uint32_t activeAnimation = 0; - Animation& animation = animations[activeAnimation]; - animation.currentTime += deltaTime; - if (animation.currentTime > animation.end) - { - animation.currentTime -= animation.end; - } - - for (auto& channel : animation.channels) - { - auto& sampler = animation.samplers[channel.samplerIndex]; - for (size_t i = 0; i < sampler.inputs.size() - 1; ++i) - { - if (sampler.interpolation != "LINEAR") - { - std::cout << "This sample only supports linear interpolations\n"; - continue; - } - if ((animation.currentTime >= sampler.inputs[i]) && (animation.currentTime <= sampler.inputs[i + 1])) - { - float ratio = (animation.currentTime - sampler.inputs[i]) / (sampler.inputs[i + 1] - sampler.inputs[i]); - if (channel.path == "translation") - { - channel.node->translation = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], ratio); - channel.node->bAnimateNode = true; - } - if (channel.path == "rotation") - { - glm::quat q1; - q1.x = sampler.outputsVec4[i].x; - q1.y = sampler.outputsVec4[i].y; - q1.z = sampler.outputsVec4[i].z; - q1.w = sampler.outputsVec4[i].w; - - glm::quat q2; - q2.x = sampler.outputsVec4[i + 1].x; - q2.y = sampler.outputsVec4[i + 1].y; - q2.z = sampler.outputsVec4[i + 1].z; - q2.w = sampler.outputsVec4[i + 1].w; - - channel.node->rotation = glm::normalize(glm::slerp(q1, q2, ratio)); - channel.node->bAnimateNode = true; - } - if (channel.path == "scale") - { - channel.node->scale = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], ratio); - channel.node->bAnimateNode = true; - } - } - } - } - std::vector nodeMatrics(nodeCount); - for (auto& node : nodes) - { - updateNodeMatrix(node, nodeMatrics); - } - buffer.copyTo(nodeMatrics.data(), nodeCount * sizeof(glm::mat4)); -} - -void VulkanglTFModel::updateNodeMatrix(Node* node, std::vector& nodeMatrics) -{ - nodeMatrics[node->index] = getNodeMatrix(node); - for (auto& child : node->children) - { - updateNodeMatrix(child, nodeMatrics); - } -} - -glm::mat4 VulkanglTFModel::getNodeMatrix(Node* node) -{ - glm::mat4 nodeMatrix = node->getLocalMatrix(); - Node* currentParent = node->parent; - while (currentParent) - { - nodeMatrix = currentParent->getLocalMatrix() * nodeMatrix; - currentParent = currentParent->parent; - } - return nodeMatrix; -} - -/* - glTF rendering functions -*/ - -// Draw a single node including child nodes (if present) -void VulkanglTFModel::drawNode(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, VulkanglTFModel::Node* node, bool bPushConstants) -{ - if (node->mesh.primitives.size() > 0) { - // Pass the node's matrix via push constants - // Traverse the node hierarchy to the top-most parent to get the final matrix of the current node - glm::mat4 nodeMatrix = node->matrix; - VulkanglTFModel::Node* currentParent = node->parent; - while (currentParent) { - nodeMatrix = currentParent->matrix * nodeMatrix; - currentParent = currentParent->parent; - } - - for (VulkanglTFModel::Primitive& primitive : node->mesh.primitives) { - if (primitive.indexCount > 0) { - // Get the texture index for this primitive - if (textures.size() > 0) - { - VulkanglTFModel::Texture texture = textures[materials[primitive.materialIndex].baseColorTextureIndex]; - auto normalMap = textures[materials[primitive.materialIndex].normalMapTextureIndex]; - auto roughMetalMap = textures[materials[primitive.materialIndex].matalicRoughTextureIndex]; - - if (materials[primitive.materialIndex].emissiveTextureIndex >= 0) - { - auto emissiveMap = textures[materials[primitive.materialIndex].emissiveTextureIndex]; - vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 4, 1, &images[emissiveMap.imageIndex].descriptorSet, 0, nullptr); - } - - // Bind the descriptor for the current primitive's texture - vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 1, 1, &images[texture.imageIndex].descriptorSet, 0, nullptr); - vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 2, 1, &images[normalMap.imageIndex].descriptorSet, 0, nullptr); - vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 3, 1, &images[roughMetalMap.imageIndex].descriptorSet, 0, nullptr); - vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 5, 1, &materials[primitive.materialIndex].materialData.descriptorSet, 0, nullptr); - } - vkCmdDrawIndexed(commandBuffer, primitive.indexCount, 1, primitive.firstIndex, 0, 0); - } - } - } - for (auto& child : node->children) { - drawNode(commandBuffer, pipelineLayout, child, bPushConstants); - } -} - -// Draw the glTF scene starting at the top-level-nodes -void VulkanglTFModel::draw(VkCommandBuffer commandBuffer, VkPipelineLayout pipelineLayout, bool flag = true) -{ - // All vertices and indices are stored in single buffers, so we only need to bind once - VkDeviceSize offsets[1] = { 0 }; - vkCmdBindVertexBuffers(commandBuffer, 0, 1, &vertices.buffer, offsets); - vkCmdBindIndexBuffer(commandBuffer, indices.buffer, 0, VK_INDEX_TYPE_UINT32); - // Render all nodes at top-level - for (auto& node : nodes) { - drawNode(commandBuffer, pipelineLayout, node, flag); - } -} \ No newline at end of file diff --git a/littleRender/render.h b/littleRender/render.h deleted file mode 100644 index 7a52829..0000000 --- a/littleRender/render.h +++ /dev/null @@ -1,202 +0,0 @@ - -#include -#include -#include -#include -#include - -#define GLM_FORCE_RADIANS -#define GLM_FORCE_DEPTH_ZERO_TO_ONE -#include -#include -#include - -#ifdef VK_USE_PLATFORM_ANDROID_KHR - #define TINYGLTF_ANDROID_LOAD_FROM_ASSETS -#endif - -#include "tiny_gltf.h" -#include "glTFModel.h" -#include "vulkanexamplebase.h" - -#define ENABLE_VALIDATION false - - - -class VulkanExample : public VulkanExampleBase -{ -public: - bool wireframe = false; - bool normalMapping = true; - bool ToneMapping = true; - bool pbrEnabled = true; - - VulkanglTFModel glTFModel; - - struct ShaderData { - vks::Buffer buffer; - struct Values { - glm::mat4 projection; - glm::mat4 model; - glm::vec4 lightPos = glm::vec4(5.0f, 5.0f, 5.0f, 1.0f); - glm::vec4 viewPos; - glm::vec4 bFlagSet = glm::vec4(0.0f, 0.0f, 0.0f, 0.0f); - } values; - vks::Buffer skinSSBO; - } shaderData; - - struct StagingBuffer { - VkBuffer buffer; - VkDeviceMemory memory; - } vertexStaging, indexStaging; - - struct Pipelines { - VkPipeline solid; - VkPipeline wireframe = VK_NULL_HANDLE; - VkPipeline toneMapping = VK_NULL_HANDLE; - } pipelines; - - struct PipelineLayouts - { - VkPipelineLayout pbrLayout; - VkPipelineLayout tonemappingLayout; - } pipelineLayouts; - - VkPipelineLayout pipelineLayout; - - VkDescriptorSet descriptorSet; - VkDescriptorSet skinDescriptorSet; - VkDescriptorSet tonemappingDescriptorSet = VK_NULL_HANDLE; - - struct FrameBufferAttachment - { - VkImage image; - VkDeviceMemory deviceMemory; - VkImageView imageView; - VkFormat format; - - - void destroy(VkDevice device) - { - vkDestroyImage(device, image, nullptr); - vkDestroyImageView(device, imageView,nullptr); - vkFreeMemory(device, deviceMemory, nullptr); - - } - }; - - struct FrameBuffer - { - int32_t width, height; - VkFramebuffer frameBuffer; - VkRenderPass renderPass; - void setSize(int32_t w, int32_t h) - { - this->width = w; - this->height = h; - } - void destroy(VkDevice device) - { - vkDestroyFramebuffer(device, frameBuffer, nullptr); - vkDestroyRenderPass(device, renderPass, nullptr); - } - }; - - struct PBRFrameBuffer { - FrameBufferAttachment color, depth; - FrameBuffer fbo; - bool bCreate = false; - } pbrFrameBuffer; - - VkSampler colorSampler; - - struct DescriptorSetLayouts { - VkDescriptorSetLayout matrices; - VkDescriptorSetLayout textures; - VkDescriptorSetLayout materialUniform; - VkDescriptorSetLayout ssbo; - VkDescriptorSetLayout jointMatrices; - } descriptorSetLayouts; - - struct IBLTextures - { - vks::TextureCubeMap skyboxCube; - vks::TextureCubeMap irradianceCube; - vks::TextureCubeMap prefilteredCube; - vks::Texture2D lutBrdf; - } ibltextures; - - struct OffScreen - { - VkImage image; - VkImageView view; - VkDeviceMemory memory; - VkFramebuffer framebuffer; - } offscreen; - - struct IrradiancePushBlock - { - glm::mat4 mvp; - // Sampling deltas - float deltaPhi = (2.0f * float(M_PI)) / 180.0f; - float deltaTheta = (0.5f * float(M_PI)) / 64.0f; - } irradiancePushBlock; - - struct PrefilterPushBlock { - glm::mat4 mvp; - float roughness; - uint32_t numSamples = 32u; - } prefilterPushBlock; - - VulkanglTFModel skyboxModel; - - VulkanExample(); - ~VulkanExample() - { - // Clean up used Vulkan resources - // Note : Inherited destructor cleans up resources stored in base class - vkDestroyPipeline(device, pipelines.solid, nullptr); - vkDestroyPipeline(device, pipelines.toneMapping, nullptr); - if (pipelines.wireframe != VK_NULL_HANDLE) { - vkDestroyPipeline(device, pipelines.wireframe, nullptr); - } - - vkDestroyPipelineLayout(device, pipelineLayouts.pbrLayout, nullptr); - vkDestroyPipelineLayout(device, pipelineLayouts.tonemappingLayout, nullptr); - vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.matrices, nullptr); - vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.textures, nullptr); - vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.materialUniform, nullptr); - vkDestroyDescriptorSetLayout(device, descriptorSetLayouts.ssbo, nullptr); - ibltextures.irradianceCube.destroy(); - ibltextures.skyboxCube.destroy(); - ibltextures.prefilteredCube.destroy(); - ibltextures.lutBrdf.destroy(); - pbrFrameBuffer.color.destroy(device); - pbrFrameBuffer.depth.destroy(device); - pbrFrameBuffer.fbo.destroy(device); - vkDestroySampler(device, colorSampler, nullptr); - - shaderData.buffer.destroy(); - shaderData.skinSSBO.destroy(); - } - - - void loadglTFFile(std::string filename, VulkanglTFModel& model, bool bSkyboxFlag); - virtual void getEnabledFeatures(); - void createAttachment(VkFormat format, VkImageUsageFlagBits usage, FrameBufferAttachment* attachment, uint32_t width, uint32_t height); - virtual void setupFrameBuffer(); - void buildCommandBuffers(); - void loadAssets(); - void setupDescriptors(); - void preparePipelines(); - void CreateToneMappingPipeline(); - void GenerateIrradianceCubemap(); - void GeneratePrefilteredCubemap(); - void GenerateBRDFLUT(); - void prepareUniformBuffers(); - void updateUniformBuffers(); - void prepare(); - virtual void render(); - virtual void viewChanged(); - virtual void OnUpdateUIOverlay(vks::UIOverlay* overlay); -}; \ No newline at end of file