From ce5d5d2aff50f9ce8159d609824eb2391bed9ef2 Mon Sep 17 00:00:00 2001 From: MrPurple666 Date: Sun, 18 May 2025 17:45:32 +0000 Subject: [PATCH] THIS NEEDS TO BE CHECKED BEFORE MERGE: RAII fix, initial MSAA, some fixes for memory misallocation (#116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • MSAA Fixes: Fixes upload/download for MSAA textures using temporary non-MSAA images. Ensures compatibility with color formats and adds fallbacks for depth/stencil. • Memory fix misallocation: Adds checks for null/zero-length operations in memory management and improves cleanup to avoid crashes (Related to crash issues due to misallocation, RP5 and 865) • Vulkan Initialization (RAII): this almost rewrites the way vulkan initializes to avoid crashes, using a correct order now (thanks @crueter for the initial fix) •Please check before merging: - Test MSAA workflows (especially color/depth transitions and low memory cases). - Verify memory operations (e.g., unmapping zero-length regions). - Check Vulkan object lifetimes and platform-specific behavior. - Check others plataforms beyond android Why is everything in one PR? Otherwise, this is all a big fix, by checking the points above we can create a branch for each one and check them by themselves. I'm not standing still while I'm away, I'm just out of time for now. Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/116 Co-authored-by: MrPurple666 Co-committed-by: MrPurple666 --- src/common/free_region_manager.h | 15 + src/common/host_memory.cpp | 31 +- .../renderer_vulkan/renderer_vulkan.cpp | 48 +-- .../renderer_vulkan/renderer_vulkan.h | 27 +- .../renderer_vulkan/vk_texture_cache.cpp | 310 +++++++++++++----- .../renderer_vulkan/vk_texture_cache.h | 3 +- 6 files changed, 314 insertions(+), 120 deletions(-) diff --git a/src/common/free_region_manager.h b/src/common/free_region_manager.h index 2e590d6094..2ff629b359 100644 --- a/src/common/free_region_manager.h +++ b/src/common/free_region_manager.h @@ -12,12 +12,22 @@ class FreeRegionManager { public: explicit FreeRegionManager() = default; ~FreeRegionManager() = default; + + // Clear all free regions + void Clear() { + std::scoped_lock lk(m_mutex); + m_free_regions.clear(); + } void SetAddressSpace(void* start, size_t size) { this->FreeBlock(start, size); } std::pair FreeBlock(void* block_ptr, size_t size) { + if (block_ptr == nullptr || size == 0) { + return {nullptr, 0}; + } + std::scoped_lock lk(m_mutex); // Check to see if we are adjacent to any regions. @@ -41,6 +51,11 @@ public: } void AllocateBlock(void* block_ptr, size_t size) { + // Skip if pointer is null or size is zero + if (block_ptr == nullptr || size == 0) { + return; + } + std::scoped_lock lk(m_mutex); auto address = reinterpret_cast(block_ptr); diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index e0b5a6a67c..d06522ec2c 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -491,6 +491,12 @@ public: // Intersect the range with our address space. AdjustMap(&virtual_offset, &length); + // Skip if length is zero after adjustment + if (length == 0) { + LOG_DEBUG(HW_Memory, "Skipping zero-length mapping at virtual_offset={}", virtual_offset); + return; + } + // We are removing a placeholder. free_manager.AllocateBlock(virtual_base + virtual_offset, length); @@ -520,13 +526,21 @@ public: // Intersect the range with our address space. AdjustMap(&virtual_offset, &length); + // Skip if length is zero after adjustment + if (length == 0) { + return; + } + // Merge with any adjacent placeholder mappings. auto [merged_pointer, merged_size] = free_manager.FreeBlock(virtual_base + virtual_offset, length); - void* ret = mmap(merged_pointer, merged_size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); + // Only attempt to mmap if we have a valid pointer and size + if (merged_pointer != nullptr && merged_size > 0) { + void* ret = mmap(merged_pointer, merged_size, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); + } } void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) { @@ -576,19 +590,26 @@ public: private: /// Release all resources in the object void Release() { + // Make sure we release resources in the correct order + // First clear the free region manager to avoid any dangling references + free_manager.Clear(); + if (virtual_map_base != MAP_FAILED) { int ret = munmap(virtual_map_base, virtual_size); ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); + virtual_map_base = reinterpret_cast(MAP_FAILED); } if (backing_base != MAP_FAILED) { int ret = munmap(backing_base, backing_size); ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); + backing_base = reinterpret_cast(MAP_FAILED); } if (fd != -1) { int ret = close(fd); ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno)); + fd = -1; } } @@ -686,8 +707,10 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, ASSERT(virtual_offset + length <= virtual_size); ASSERT(host_offset + length <= backing_size); if (length == 0 || !virtual_base || !impl) { + LOG_ERROR(HW_Memory, "Invalid mapping operation: virtual_base or impl is null"); return; } + LOG_INFO(HW_Memory, "Mapping memory: virtual_offset={}, host_offset={}, length={}", virtual_offset, host_offset, length); impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); } @@ -696,8 +719,10 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) ASSERT(length % PageAlignment == 0); ASSERT(virtual_offset + length <= virtual_size); if (length == 0 || !virtual_base || !impl) { + LOG_ERROR(HW_Memory, "Invalid unmapping operation: virtual_base or impl is null"); return; } + LOG_INFO(HW_Memory, "Unmapping memory: virtual_offset={}, length={}", virtual_offset, length); impl->Unmap(virtual_offset + virtual_base_offset, length); } diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 30f5ff7a75..f5b73d63ef 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -110,27 +110,16 @@ try , device_memory(device_memory_) , gpu(gpu_) , library(OpenLibrary(context.get())) - , - // Create raw Vulkan instance first - instance(CreateInstance(*library, - dld, - VK_API_VERSION_1_1, - render_window.GetWindowInfo().type, - Settings::values.renderer_debug.GetValue())) - , - // Now create RAII wrappers for the resources in the correct order - managed_instance(MakeManagedInstance(instance, dld)) - , - // Create debug messenger if debug is enabled - debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance) - : vk::DebugUtilsMessenger{}) - , managed_debug_messenger(Settings::values.renderer_debug - ? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld) - : ManagedDebugUtilsMessenger{}) - , - // Create surface - surface(CreateSurface(instance, render_window.GetWindowInfo())) - , managed_surface(MakeManagedSurface(surface, instance, dld)) + , dld() + // Initialize resources in the same order as they are declared in the header + , instance(CreateInstance(*library, + dld, + VK_API_VERSION_1_1, + render_window.GetWindowInfo().type, + Settings::values.renderer_debug.GetValue())) + , debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance) + : vk::DebugUtilsMessenger{}) + , surface(CreateSurface(instance, render_window.GetWindowInfo())) , device(CreateDevice(instance, dld, *surface)) , memory_allocator(device) , state_tracker() @@ -172,22 +161,19 @@ try scheduler, PresentFiltersForAppletCapture) , rasterizer(render_window, gpu, device_memory, device, memory_allocator, state_tracker, scheduler) - , applet_frame() { + , turbo_mode() + , applet_frame() + , managed_instance(MakeManagedInstance(instance, dld)) + , managed_debug_messenger(Settings::values.renderer_debug + ? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld) + : ManagedDebugUtilsMessenger{}) + , managed_surface(MakeManagedSurface(surface, instance, dld)) { if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) { turbo_mode.emplace(instance, dld); scheduler.RegisterOnSubmit([this] { turbo_mode->QueueSubmitted(); }); } -#ifndef ANDROID - // Release ownership from the old instance and surface - instance.release(); - surface.release(); - if (Settings::values.renderer_debug) { - debug_messenger.release(); - } -#endif - Report(); } catch (const vk::Exception& exception) { LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index c1e6d5db7f..ec4215253f 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -76,32 +76,37 @@ private: std::shared_ptr library; vk::InstanceDispatch dld; - // Keep original handles for compatibility with existing code + // Order of member variables determines destruction order (reverse of declaration) + // Critical Vulkan resources should be declared in proper dependency order + + // Base Vulkan instance, debugging, and surface vk::Instance instance; - // RAII wrapper for instance - ManagedInstance managed_instance; - vk::DebugUtilsMessenger debug_messenger; - // RAII wrapper for debug messenger - ManagedDebugUtilsMessenger managed_debug_messenger; - vk::SurfaceKHR surface; - // RAII wrapper for surface - ManagedSurface managed_surface; - + + // Device and core resources Device device; MemoryAllocator memory_allocator; StateTracker state_tracker; Scheduler scheduler; Swapchain swapchain; PresentManager present_manager; + + // Rendering components BlitScreen blit_swapchain; BlitScreen blit_capture; BlitScreen blit_applet; RasterizerVulkan rasterizer; - std::optional turbo_mode; + // Optional components + std::optional turbo_mode; Frame applet_frame; + + // RAII wrappers - must be destroyed before their raw handles + // so they are declared after to be destroyed first + ManagedInstance managed_instance; + ManagedDebugUtilsMessenger managed_debug_messenger; + ManagedSurface managed_surface; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 0b98d71b97..ebd8b65760 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -1523,22 +1523,76 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas Image::~Image() = default; void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset, - std::span copies) { + std::span copies) { // TODO: Move this to another API const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); if (is_rescaled) { ScaleDown(true); } - scheduler->RequestOutsideRenderPassOperationContext(); - auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); - const VkBuffer src_buffer = buffer; - const VkImage vk_image = *original_image; - const VkImageAspectFlags vk_aspect_mask = aspect_mask; - const bool is_initialized = std::exchange(initialized, true); - scheduler->Record([src_buffer, vk_image, vk_aspect_mask, is_initialized, - vk_copies](vk::CommandBuffer cmdbuf) { - CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies); - }); + + // Handle MSAA upload if necessary + if (info.num_samples > 1 && runtime->CanUploadMSAA()) { + // Only use MSAA copy pass for color formats + // Depth/stencil formats need special handling + if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { + // Create a temporary non-MSAA image to upload the data first + ImageInfo temp_info = info; + temp_info.num_samples = 1; + + // Create image with same usage flags as the target image to avoid validation errors + VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info); + image_ci.usage = original_image.UsageFlags(); + vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci); + + // Upload to the temporary non-MSAA image + scheduler->RequestOutsideRenderPassOperationContext(); + auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); + const VkBuffer src_buffer = buffer; + const VkImage temp_vk_image = *temp_image; + const VkImageAspectFlags vk_aspect_mask = aspect_mask; + scheduler->Record([src_buffer, temp_vk_image, vk_aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { + CopyBufferToImage(cmdbuf, src_buffer, temp_vk_image, vk_aspect_mask, false, vk_copies); + }); + + // Now use MSAACopyPass to convert from non-MSAA to MSAA + std::vector image_copies; + for (const auto& copy : copies) { + VideoCommon::ImageCopy image_copy; + image_copy.src_offset = {0, 0, 0}; // Use zero offset for source + image_copy.dst_offset = copy.image_offset; + image_copy.src_subresource = copy.image_subresource; + image_copy.dst_subresource = copy.image_subresource; + image_copy.extent = copy.image_extent; + image_copies.push_back(image_copy); + } + + // Create a wrapper Image for the temporary image + Image temp_wrapper(*runtime, temp_info, 0, 0); + temp_wrapper.original_image = std::move(temp_image); + temp_wrapper.current_image = &Image::original_image; + temp_wrapper.aspect_mask = aspect_mask; + temp_wrapper.initialized = true; + + // Use MSAACopyPass to convert from non-MSAA to MSAA + runtime->msaa_copy_pass->CopyImage(*this, temp_wrapper, image_copies, false); + std::exchange(initialized, true); + return; + } + // For depth/stencil formats, fall back to regular upload + } else { + // Regular non-MSAA upload + scheduler->RequestOutsideRenderPassOperationContext(); + auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); + const VkBuffer src_buffer = buffer; + const VkImage vk_image = *original_image; + const VkImageAspectFlags vk_aspect_mask = aspect_mask; + const bool is_initialized = std::exchange(initialized, true); + scheduler->Record([src_buffer, vk_image, vk_aspect_mask, is_initialized, + vk_copies](vk::CommandBuffer cmdbuf) { + CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies); + }); + } + if (is_rescaled) { ScaleUp(); } @@ -1560,75 +1614,185 @@ void Image::DownloadMemory(VkBuffer buffer, size_t offset, } void Image::DownloadMemory(std::span buffers_span, std::span offsets_span, - std::span copies) { + std::span copies) { const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); if (is_rescaled) { ScaleDown(); } - boost::container::small_vector buffers_vector{}; - boost::container::small_vector, 8> - vk_copies; - for (size_t index = 0; index < buffers_span.size(); index++) { - buffers_vector.emplace_back(buffers_span[index]); - vk_copies.emplace_back( - TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); - } - scheduler->RequestOutsideRenderPassOperationContext(); - scheduler->Record([buffers = std::move(buffers_vector), image = *original_image, - aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { - const VkImageMemoryBarrier read_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_GENERAL, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, read_barrier); - for (size_t index = 0; index < buffers.size(); index++) { - cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], - vk_copies[index]); + // Handle MSAA download if necessary + if (info.num_samples > 1 && runtime->msaa_copy_pass) { + // Only use MSAA copy pass for color formats + // Depth/stencil formats need special handling + if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { + // Create a temporary non-MSAA image to download the data + ImageInfo temp_info = info; + temp_info.num_samples = 1; + + // Create image with same usage flags as the target image to avoid validation errors + VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info); + image_ci.usage = original_image.UsageFlags(); + vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci); + + // Create a wrapper Image for the temporary image + Image temp_wrapper(*runtime, temp_info, 0, 0); + temp_wrapper.original_image = std::move(temp_image); + temp_wrapper.current_image = &Image::original_image; + temp_wrapper.aspect_mask = aspect_mask; + temp_wrapper.initialized = true; + + // Convert from MSAA to non-MSAA using MSAACopyPass + std::vector image_copies; + for (const auto& copy : copies) { + VideoCommon::ImageCopy image_copy; + image_copy.src_offset = copy.image_offset; + image_copy.dst_offset = copy.image_offset; + image_copy.src_subresource = copy.image_subresource; + image_copy.dst_subresource = copy.image_subresource; + image_copy.extent = copy.image_extent; + image_copies.push_back(image_copy); + } + + // Use MSAACopyPass to convert from MSAA to non-MSAA + runtime->msaa_copy_pass->CopyImage(temp_wrapper, *this, image_copies, true); + + // Now download from the non-MSAA image + boost::container::small_vector buffers_vector{}; + boost::container::small_vector, 8> + vk_copies; + for (size_t index = 0; index < buffers_span.size(); index++) { + buffers_vector.emplace_back(buffers_span[index]); + vk_copies.emplace_back( + TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); + } + + scheduler->RequestOutsideRenderPassOperationContext(); + scheduler->Record([buffers = std::move(buffers_vector), image = *temp_wrapper.original_image, + aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { + const VkImageMemoryBarrier read_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, read_barrier); + + for (size_t index = 0; index < buffers.size(); index++) { + cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], + vk_copies[index]); + } + + const VkMemoryBarrier memory_write_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + const VkImageMemoryBarrier image_write_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + 0, memory_write_barrier, nullptr, image_write_barrier); + }); + return; } + // For depth/stencil formats, fall back to regular download + } else { + // Regular non-MSAA download + boost::container::small_vector buffers_vector{}; + boost::container::small_vector, 8> + vk_copies; + for (size_t index = 0; index < buffers_span.size(); index++) { + buffers_vector.emplace_back(buffers_span[index]); + vk_copies.emplace_back( + TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); + } + scheduler->RequestOutsideRenderPassOperationContext(); + scheduler->Record([buffers = std::move(buffers_vector), image = *original_image, + aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { + const VkImageMemoryBarrier read_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, read_barrier); + + for (size_t index = 0; index < buffers.size(); index++) { + cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], + vk_copies[index]); + } + + const VkMemoryBarrier memory_write_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + const VkImageMemoryBarrier image_write_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + 0, memory_write_barrier, nullptr, image_write_barrier); + }); + } - const VkMemoryBarrier memory_write_barrier{ - .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - }; - const VkImageMemoryBarrier image_write_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = 0, - .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_GENERAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - 0, memory_write_barrier, nullptr, image_write_barrier); - }); if (is_rescaled) { ScaleUp(true); } diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 18d20b2db5..cd11cc8fc7 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -82,8 +82,7 @@ public: } bool CanUploadMSAA() const noexcept { - // TODO: Implement buffer to MSAA uploads - return false; + return msaa_copy_pass.operator bool(); } void AccelerateImageUpload(Image&, const StagingBufferRef&,