diff --git a/src/common/free_region_manager.h b/src/common/free_region_manager.h index 2ff629b359..2e590d6094 100644 --- a/src/common/free_region_manager.h +++ b/src/common/free_region_manager.h @@ -12,22 +12,12 @@ class FreeRegionManager { public: explicit FreeRegionManager() = default; ~FreeRegionManager() = default; - - // Clear all free regions - void Clear() { - std::scoped_lock lk(m_mutex); - m_free_regions.clear(); - } void SetAddressSpace(void* start, size_t size) { this->FreeBlock(start, size); } std::pair FreeBlock(void* block_ptr, size_t size) { - if (block_ptr == nullptr || size == 0) { - return {nullptr, 0}; - } - std::scoped_lock lk(m_mutex); // Check to see if we are adjacent to any regions. @@ -51,11 +41,6 @@ public: } void AllocateBlock(void* block_ptr, size_t size) { - // Skip if pointer is null or size is zero - if (block_ptr == nullptr || size == 0) { - return; - } - std::scoped_lock lk(m_mutex); auto address = reinterpret_cast(block_ptr); diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index d06522ec2c..e0b5a6a67c 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp @@ -491,12 +491,6 @@ public: // Intersect the range with our address space. AdjustMap(&virtual_offset, &length); - // Skip if length is zero after adjustment - if (length == 0) { - LOG_DEBUG(HW_Memory, "Skipping zero-length mapping at virtual_offset={}", virtual_offset); - return; - } - // We are removing a placeholder. free_manager.AllocateBlock(virtual_base + virtual_offset, length); @@ -526,21 +520,13 @@ public: // Intersect the range with our address space. AdjustMap(&virtual_offset, &length); - // Skip if length is zero after adjustment - if (length == 0) { - return; - } - // Merge with any adjacent placeholder mappings. auto [merged_pointer, merged_size] = free_manager.FreeBlock(virtual_base + virtual_offset, length); - // Only attempt to mmap if we have a valid pointer and size - if (merged_pointer != nullptr && merged_size > 0) { - void* ret = mmap(merged_pointer, merged_size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); - } + void* ret = mmap(merged_pointer, merged_size, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno)); } void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) { @@ -590,26 +576,19 @@ public: private: /// Release all resources in the object void Release() { - // Make sure we release resources in the correct order - // First clear the free region manager to avoid any dangling references - free_manager.Clear(); - if (virtual_map_base != MAP_FAILED) { int ret = munmap(virtual_map_base, virtual_size); ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); - virtual_map_base = reinterpret_cast(MAP_FAILED); } if (backing_base != MAP_FAILED) { int ret = munmap(backing_base, backing_size); ASSERT_MSG(ret == 0, "munmap failed: {}", strerror(errno)); - backing_base = reinterpret_cast(MAP_FAILED); } if (fd != -1) { int ret = close(fd); ASSERT_MSG(ret == 0, "close failed: {}", strerror(errno)); - fd = -1; } } @@ -707,10 +686,8 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, ASSERT(virtual_offset + length <= virtual_size); ASSERT(host_offset + length <= backing_size); if (length == 0 || !virtual_base || !impl) { - LOG_ERROR(HW_Memory, "Invalid mapping operation: virtual_base or impl is null"); return; } - LOG_INFO(HW_Memory, "Mapping memory: virtual_offset={}, host_offset={}, length={}", virtual_offset, host_offset, length); impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); } @@ -719,10 +696,8 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) ASSERT(length % PageAlignment == 0); ASSERT(virtual_offset + length <= virtual_size); if (length == 0 || !virtual_base || !impl) { - LOG_ERROR(HW_Memory, "Invalid unmapping operation: virtual_base or impl is null"); return; } - LOG_INFO(HW_Memory, "Unmapping memory: virtual_offset={}, length={}", virtual_offset, length); impl->Unmap(virtual_offset + virtual_base_offset, length); } diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index f5b73d63ef..30f5ff7a75 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -110,16 +110,27 @@ try , device_memory(device_memory_) , gpu(gpu_) , library(OpenLibrary(context.get())) - , dld() - // Initialize resources in the same order as they are declared in the header - , instance(CreateInstance(*library, - dld, - VK_API_VERSION_1_1, - render_window.GetWindowInfo().type, - Settings::values.renderer_debug.GetValue())) - , debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance) - : vk::DebugUtilsMessenger{}) - , surface(CreateSurface(instance, render_window.GetWindowInfo())) + , + // Create raw Vulkan instance first + instance(CreateInstance(*library, + dld, + VK_API_VERSION_1_1, + render_window.GetWindowInfo().type, + Settings::values.renderer_debug.GetValue())) + , + // Now create RAII wrappers for the resources in the correct order + managed_instance(MakeManagedInstance(instance, dld)) + , + // Create debug messenger if debug is enabled + debug_messenger(Settings::values.renderer_debug ? CreateDebugUtilsCallback(instance) + : vk::DebugUtilsMessenger{}) + , managed_debug_messenger(Settings::values.renderer_debug + ? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld) + : ManagedDebugUtilsMessenger{}) + , + // Create surface + surface(CreateSurface(instance, render_window.GetWindowInfo())) + , managed_surface(MakeManagedSurface(surface, instance, dld)) , device(CreateDevice(instance, dld, *surface)) , memory_allocator(device) , state_tracker() @@ -161,19 +172,22 @@ try scheduler, PresentFiltersForAppletCapture) , rasterizer(render_window, gpu, device_memory, device, memory_allocator, state_tracker, scheduler) - , turbo_mode() - , applet_frame() - , managed_instance(MakeManagedInstance(instance, dld)) - , managed_debug_messenger(Settings::values.renderer_debug - ? MakeManagedDebugUtilsMessenger(debug_messenger, instance, dld) - : ManagedDebugUtilsMessenger{}) - , managed_surface(MakeManagedSurface(surface, instance, dld)) { + , applet_frame() { if (Settings::values.renderer_force_max_clock.GetValue() && device.ShouldBoostClocks()) { turbo_mode.emplace(instance, dld); scheduler.RegisterOnSubmit([this] { turbo_mode->QueueSubmitted(); }); } +#ifndef ANDROID + // Release ownership from the old instance and surface + instance.release(); + surface.release(); + if (Settings::values.renderer_debug) { + debug_messenger.release(); + } +#endif + Report(); } catch (const vk::Exception& exception) { LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index ec4215253f..c1e6d5db7f 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -76,37 +76,32 @@ private: std::shared_ptr library; vk::InstanceDispatch dld; - // Order of member variables determines destruction order (reverse of declaration) - // Critical Vulkan resources should be declared in proper dependency order - - // Base Vulkan instance, debugging, and surface + // Keep original handles for compatibility with existing code vk::Instance instance; + // RAII wrapper for instance + ManagedInstance managed_instance; + vk::DebugUtilsMessenger debug_messenger; + // RAII wrapper for debug messenger + ManagedDebugUtilsMessenger managed_debug_messenger; + vk::SurfaceKHR surface; - - // Device and core resources + // RAII wrapper for surface + ManagedSurface managed_surface; + Device device; MemoryAllocator memory_allocator; StateTracker state_tracker; Scheduler scheduler; Swapchain swapchain; PresentManager present_manager; - - // Rendering components BlitScreen blit_swapchain; BlitScreen blit_capture; BlitScreen blit_applet; RasterizerVulkan rasterizer; - - // Optional components std::optional turbo_mode; + Frame applet_frame; - - // RAII wrappers - must be destroyed before their raw handles - // so they are declared after to be destroyed first - ManagedInstance managed_instance; - ManagedDebugUtilsMessenger managed_debug_messenger; - ManagedSurface managed_surface; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index ebd8b65760..0b98d71b97 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -1523,76 +1523,22 @@ Image::Image(const VideoCommon::NullImageParams& params) : VideoCommon::ImageBas Image::~Image() = default; void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset, - std::span copies) { + std::span copies) { // TODO: Move this to another API const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); if (is_rescaled) { ScaleDown(true); } - - // Handle MSAA upload if necessary - if (info.num_samples > 1 && runtime->CanUploadMSAA()) { - // Only use MSAA copy pass for color formats - // Depth/stencil formats need special handling - if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { - // Create a temporary non-MSAA image to upload the data first - ImageInfo temp_info = info; - temp_info.num_samples = 1; - - // Create image with same usage flags as the target image to avoid validation errors - VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info); - image_ci.usage = original_image.UsageFlags(); - vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci); - - // Upload to the temporary non-MSAA image - scheduler->RequestOutsideRenderPassOperationContext(); - auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); - const VkBuffer src_buffer = buffer; - const VkImage temp_vk_image = *temp_image; - const VkImageAspectFlags vk_aspect_mask = aspect_mask; - scheduler->Record([src_buffer, temp_vk_image, vk_aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { - CopyBufferToImage(cmdbuf, src_buffer, temp_vk_image, vk_aspect_mask, false, vk_copies); - }); - - // Now use MSAACopyPass to convert from non-MSAA to MSAA - std::vector image_copies; - for (const auto& copy : copies) { - VideoCommon::ImageCopy image_copy; - image_copy.src_offset = {0, 0, 0}; // Use zero offset for source - image_copy.dst_offset = copy.image_offset; - image_copy.src_subresource = copy.image_subresource; - image_copy.dst_subresource = copy.image_subresource; - image_copy.extent = copy.image_extent; - image_copies.push_back(image_copy); - } - - // Create a wrapper Image for the temporary image - Image temp_wrapper(*runtime, temp_info, 0, 0); - temp_wrapper.original_image = std::move(temp_image); - temp_wrapper.current_image = &Image::original_image; - temp_wrapper.aspect_mask = aspect_mask; - temp_wrapper.initialized = true; - - // Use MSAACopyPass to convert from non-MSAA to MSAA - runtime->msaa_copy_pass->CopyImage(*this, temp_wrapper, image_copies, false); - std::exchange(initialized, true); - return; - } - // For depth/stencil formats, fall back to regular upload - } else { - // Regular non-MSAA upload - scheduler->RequestOutsideRenderPassOperationContext(); - auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); - const VkBuffer src_buffer = buffer; - const VkImage vk_image = *original_image; - const VkImageAspectFlags vk_aspect_mask = aspect_mask; - const bool is_initialized = std::exchange(initialized, true); - scheduler->Record([src_buffer, vk_image, vk_aspect_mask, is_initialized, - vk_copies](vk::CommandBuffer cmdbuf) { - CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies); - }); - } - + scheduler->RequestOutsideRenderPassOperationContext(); + auto vk_copies = TransformBufferImageCopies(copies, offset, aspect_mask); + const VkBuffer src_buffer = buffer; + const VkImage vk_image = *original_image; + const VkImageAspectFlags vk_aspect_mask = aspect_mask; + const bool is_initialized = std::exchange(initialized, true); + scheduler->Record([src_buffer, vk_image, vk_aspect_mask, is_initialized, + vk_copies](vk::CommandBuffer cmdbuf) { + CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, is_initialized, vk_copies); + }); if (is_rescaled) { ScaleUp(); } @@ -1614,185 +1560,75 @@ void Image::DownloadMemory(VkBuffer buffer, size_t offset, } void Image::DownloadMemory(std::span buffers_span, std::span offsets_span, - std::span copies) { + std::span copies) { const bool is_rescaled = True(flags & ImageFlagBits::Rescaled); if (is_rescaled) { ScaleDown(); } - - // Handle MSAA download if necessary - if (info.num_samples > 1 && runtime->msaa_copy_pass) { - // Only use MSAA copy pass for color formats - // Depth/stencil formats need special handling - if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { - // Create a temporary non-MSAA image to download the data - ImageInfo temp_info = info; - temp_info.num_samples = 1; - - // Create image with same usage flags as the target image to avoid validation errors - VkImageCreateInfo image_ci = MakeImageCreateInfo(runtime->device, temp_info); - image_ci.usage = original_image.UsageFlags(); - vk::Image temp_image = runtime->memory_allocator.CreateImage(image_ci); - - // Create a wrapper Image for the temporary image - Image temp_wrapper(*runtime, temp_info, 0, 0); - temp_wrapper.original_image = std::move(temp_image); - temp_wrapper.current_image = &Image::original_image; - temp_wrapper.aspect_mask = aspect_mask; - temp_wrapper.initialized = true; - - // Convert from MSAA to non-MSAA using MSAACopyPass - std::vector image_copies; - for (const auto& copy : copies) { - VideoCommon::ImageCopy image_copy; - image_copy.src_offset = copy.image_offset; - image_copy.dst_offset = copy.image_offset; - image_copy.src_subresource = copy.image_subresource; - image_copy.dst_subresource = copy.image_subresource; - image_copy.extent = copy.image_extent; - image_copies.push_back(image_copy); - } - - // Use MSAACopyPass to convert from MSAA to non-MSAA - runtime->msaa_copy_pass->CopyImage(temp_wrapper, *this, image_copies, true); - - // Now download from the non-MSAA image - boost::container::small_vector buffers_vector{}; - boost::container::small_vector, 8> - vk_copies; - for (size_t index = 0; index < buffers_span.size(); index++) { - buffers_vector.emplace_back(buffers_span[index]); - vk_copies.emplace_back( - TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); - } - - scheduler->RequestOutsideRenderPassOperationContext(); - scheduler->Record([buffers = std::move(buffers_vector), image = *temp_wrapper.original_image, - aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { - const VkImageMemoryBarrier read_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_GENERAL, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, read_barrier); - - for (size_t index = 0; index < buffers.size(); index++) { - cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], - vk_copies[index]); - } - - const VkMemoryBarrier memory_write_barrier{ - .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - }; - const VkImageMemoryBarrier image_write_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = 0, - .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_GENERAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - 0, memory_write_barrier, nullptr, image_write_barrier); - }); - return; - } - // For depth/stencil formats, fall back to regular download - } else { - // Regular non-MSAA download - boost::container::small_vector buffers_vector{}; - boost::container::small_vector, 8> - vk_copies; - for (size_t index = 0; index < buffers_span.size(); index++) { - buffers_vector.emplace_back(buffers_span[index]); - vk_copies.emplace_back( - TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); - } - scheduler->RequestOutsideRenderPassOperationContext(); - scheduler->Record([buffers = std::move(buffers_vector), image = *original_image, - aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { - const VkImageMemoryBarrier read_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, - .oldLayout = VK_IMAGE_LAYOUT_GENERAL, - .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, - 0, read_barrier); - - for (size_t index = 0; index < buffers.size(); index++) { - cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], - vk_copies[index]); - } - - const VkMemoryBarrier memory_write_barrier{ - .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, - }; - const VkImageMemoryBarrier image_write_barrier{ - .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, - .pNext = nullptr, - .srcAccessMask = 0, - .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - .newLayout = VK_IMAGE_LAYOUT_GENERAL, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange{ - .aspectMask = aspect_mask_, - .baseMipLevel = 0, - .levelCount = VK_REMAINING_MIP_LEVELS, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - }; - cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - 0, memory_write_barrier, nullptr, image_write_barrier); - }); + boost::container::small_vector buffers_vector{}; + boost::container::small_vector, 8> + vk_copies; + for (size_t index = 0; index < buffers_span.size(); index++) { + buffers_vector.emplace_back(buffers_span[index]); + vk_copies.emplace_back( + TransformBufferImageCopies(copies, offsets_span[index], aspect_mask)); } + scheduler->RequestOutsideRenderPassOperationContext(); + scheduler->Record([buffers = std::move(buffers_vector), image = *original_image, + aspect_mask_ = aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) { + const VkImageMemoryBarrier read_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, + .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, read_barrier); + for (size_t index = 0; index < buffers.size(); index++) { + cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffers[index], + vk_copies[index]); + } + + const VkMemoryBarrier memory_write_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + const VkImageMemoryBarrier image_write_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = 0, + .dstAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange{ + .aspectMask = aspect_mask_, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + 0, memory_write_barrier, nullptr, image_write_barrier); + }); if (is_rescaled) { ScaleUp(true); } diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index cd11cc8fc7..18d20b2db5 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -82,7 +82,8 @@ public: } bool CanUploadMSAA() const noexcept { - return msaa_copy_pass.operator bool(); + // TODO: Implement buffer to MSAA uploads + return false; } void AccelerateImageUpload(Image&, const StagingBufferRef&,