diff options
Diffstat (limited to 'libs/hwui/renderthread/VulkanManager.cpp')
-rw-r--r-- | libs/hwui/renderthread/VulkanManager.cpp | 203 |
1 files changed, 109 insertions, 94 deletions
diff --git a/libs/hwui/renderthread/VulkanManager.cpp b/libs/hwui/renderthread/VulkanManager.cpp index ba70afc8b8d2..e93824dfbd30 100644 --- a/libs/hwui/renderthread/VulkanManager.cpp +++ b/libs/hwui/renderthread/VulkanManager.cpp @@ -20,15 +20,18 @@ #include <EGL/eglext.h> #include <GrBackendSemaphore.h> #include <GrBackendSurface.h> -#include <GrContext.h> +#include <GrDirectContext.h> #include <GrTypes.h> #include <android/sync.h> #include <ui/FatVector.h> #include <vk/GrVkExtensions.h> #include <vk/GrVkTypes.h> +#include <cstring> + #include "Properties.h" #include "RenderThread.h" +#include "pipeline/skia/ShaderCache.h" #include "renderstate/RenderState.h" #include "utils/TraceUtils.h" @@ -53,16 +56,39 @@ static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& fe } } +GrVkGetProc VulkanManager::sSkiaGetProp = [](const char* proc_name, VkInstance instance, + VkDevice device) { + if (device != VK_NULL_HANDLE) { + if (strcmp("vkQueueSubmit", proc_name) == 0) { + return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueSubmit; + } else if (strcmp("vkQueueWaitIdle", proc_name) == 0) { + return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueWaitIdle; + } + return vkGetDeviceProcAddr(device, proc_name); + } + return vkGetInstanceProcAddr(instance, proc_name); +}; + #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F) #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F) #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F) -void VulkanManager::destroy() { - if (VK_NULL_HANDLE != mCommandPool) { - mDestroyCommandPool(mDevice, mCommandPool, nullptr); - mCommandPool = VK_NULL_HANDLE; +sp<VulkanManager> VulkanManager::getInstance() { + // cache a weakptr to the context to enable a second thread to share the same vulkan state + static wp<VulkanManager> sWeakInstance = nullptr; + static std::mutex sLock; + + std::lock_guard _lock{sLock}; + sp<VulkanManager> vulkanManager = sWeakInstance.promote(); + if (!vulkanManager.get()) { + vulkanManager = new VulkanManager(); + sWeakInstance = vulkanManager; } + return vulkanManager; +} + +VulkanManager::~VulkanManager() { if (mDevice != VK_NULL_HANDLE) { mDeviceWaitIdle(mDevice); mDestroyDevice(mDevice, nullptr); @@ -73,7 +99,6 @@ void VulkanManager::destroy() { } mGraphicsQueue = VK_NULL_HANDLE; - mPresentQueue = VK_NULL_HANDLE; mDevice = VK_NULL_HANDLE; mPhysicalDevice = VK_NULL_HANDLE; mInstance = VK_NULL_HANDLE; @@ -180,10 +205,6 @@ void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFe } LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount); - // All physical devices and queue families on Android must be capable of - // presentation with any native window. So just use the first one. - mPresentQueueIndex = 0; - { uint32_t extensionCount = 0; err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount, @@ -203,14 +224,7 @@ void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFe LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension); } - auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) { - if (device != VK_NULL_HANDLE) { - return vkGetDeviceProcAddr(device, proc_name); - } - return vkGetInstanceProcAddr(instance, proc_name); - }; - - grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(), + grExtensions.init(sSkiaGetProp, mInstance, mPhysicalDevice, mInstanceExtensions.size(), mInstanceExtensions.data(), mDeviceExtensions.size(), mDeviceExtensions.data()); @@ -277,31 +291,21 @@ void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFe queueNextPtr = &queuePriorityCreateInfo; } - const VkDeviceQueueCreateInfo queueInfo[2] = { - { - VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType - queueNextPtr, // pNext - 0, // VkDeviceQueueCreateFlags - mGraphicsQueueIndex, // queueFamilyIndex - 1, // queueCount - queuePriorities, // pQueuePriorities - }, - { - VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType - queueNextPtr, // pNext - 0, // VkDeviceQueueCreateFlags - mPresentQueueIndex, // queueFamilyIndex - 1, // queueCount - queuePriorities, // pQueuePriorities - }}; - uint32_t queueInfoCount = (mPresentQueueIndex != mGraphicsQueueIndex) ? 2 : 1; + const VkDeviceQueueCreateInfo queueInfo = { + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType + queueNextPtr, // pNext + 0, // VkDeviceQueueCreateFlags + mGraphicsQueueIndex, // queueFamilyIndex + 1, // queueCount + queuePriorities, // pQueuePriorities + }; const VkDeviceCreateInfo deviceInfo = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType &features, // pNext 0, // VkDeviceCreateFlags - queueInfoCount, // queueCreateInfoCount - queueInfo, // pQueueCreateInfos + 1, // queueCreateInfoCount + &queueInfo, // pQueueCreateInfos 0, // layerCount nullptr, // ppEnabledLayerNames (uint32_t)mDeviceExtensions.size(), // extensionCount @@ -348,34 +352,13 @@ void VulkanManager::initialize() { mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue); - // create the command pool for the command buffers - if (VK_NULL_HANDLE == mCommandPool) { - VkCommandPoolCreateInfo commandPoolInfo; - memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo)); - commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; - // this needs to be on the render queue - commandPoolInfo.queueFamilyIndex = mGraphicsQueueIndex; - commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; - SkDEBUGCODE(VkResult res =) - mCreateCommandPool(mDevice, &commandPoolInfo, nullptr, &mCommandPool); - SkASSERT(VK_SUCCESS == res); - } - LOG_ALWAYS_FATAL_IF(mCommandPool == VK_NULL_HANDLE); - - mGetDeviceQueue(mDevice, mPresentQueueIndex, 0, &mPresentQueue); - if (Properties::enablePartialUpdates && Properties::useBufferAge) { mSwapBehavior = SwapBehavior::BufferAge; } } -sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) { - auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) { - if (device != VK_NULL_HANDLE) { - return vkGetDeviceProcAddr(device, proc_name); - } - return vkGetInstanceProcAddr(instance, proc_name); - }; +sk_sp<GrDirectContext> VulkanManager::createContext(const GrContextOptions& options, + ContextType contextType) { GrVkBackendContext backendContext; backendContext.fInstance = mInstance; @@ -386,9 +369,9 @@ sk_sp<GrContext> VulkanManager::createContext(const GrContextOptions& options) { backendContext.fMaxAPIVersion = mAPIVersion; backendContext.fVkExtensions = &mExtensions; backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2; - backendContext.fGetProc = std::move(getProc); + backendContext.fGetProc = sSkiaGetProp; - return GrContext::MakeVulkan(backendContext, options); + return GrDirectContext::MakeVulkan(backendContext, options); } VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const { @@ -459,7 +442,7 @@ Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) { // The following flush blocks the GPU immediately instead of waiting for other // drawing ops. It seems dequeue_fence is not respected otherwise. // TODO: remove the flush after finding why backendSemaphore is not working. - bufferInfo->skSurface->flush(); + bufferInfo->skSurface->flushAndSubmit(); } } } @@ -494,17 +477,10 @@ static void destroy_semaphore(void* context) { } } -void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) { - if (CC_UNLIKELY(Properties::waitForGpuCompletion)) { - ATRACE_NAME("Finishing GPU work"); - mDeviceWaitIdle(mDevice); - } - - VulkanSurface::NativeBufferInfo* bufferInfo = surface->getCurrentBufferInfo(); - if (!bufferInfo) { - // If VulkanSurface::dequeueNativeBuffer failed earlier, then swapBuffers is a no-op. - return; - } +void VulkanManager::finishFrame(SkSurface* surface) { + ATRACE_NAME("Vulkan finish frame"); + ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr, + "finishFrame already has an outstanding semaphore"); VkExportSemaphoreCreateInfo exportInfo; exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO; @@ -517,48 +493,82 @@ void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) semaphoreInfo.flags = 0; VkSemaphore semaphore; VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore); - ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to create semaphore"); + ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore"); GrBackendSemaphore backendSemaphore; backendSemaphore.initVulkan(semaphore); + GrFlushInfo flushInfo; + if (err == VK_SUCCESS) { + mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore); + flushInfo.fNumSemaphores = 1; + flushInfo.fSignalSemaphores = &backendSemaphore; + flushInfo.fFinishedProc = destroy_semaphore; + flushInfo.fFinishedContext = mDestroySemaphoreContext; + } else { + semaphore = VK_NULL_HANDLE; + } + GrSemaphoresSubmitted submitted = + surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo); + GrDirectContext* context = GrAsDirectContext(surface->recordingContext()); + ALOGE_IF(!context, "Surface is not backed by gpu"); + context->submit(); + if (semaphore != VK_NULL_HANDLE) { + if (submitted == GrSemaphoresSubmitted::kYes) { + mSwapSemaphore = semaphore; + } else { + destroy_semaphore(mDestroySemaphoreContext); + mDestroySemaphoreContext = nullptr; + } + } + skiapipeline::ShaderCache::get().onVkFrameFlushed(context); +} + +void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) { + if (CC_UNLIKELY(Properties::waitForGpuCompletion)) { + ATRACE_NAME("Finishing GPU work"); + mDeviceWaitIdle(mDevice); + } + int fenceFd = -1; - DestroySemaphoreInfo* destroyInfo = - new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore); - GrSemaphoresSubmitted submitted = bufferInfo->skSurface->flush( - SkSurface::BackendSurfaceAccess::kPresent, kNone_GrFlushFlags, 1, &backendSemaphore, - destroy_semaphore, destroyInfo); - if (submitted == GrSemaphoresSubmitted::kYes) { + if (mSwapSemaphore != VK_NULL_HANDLE) { VkSemaphoreGetFdInfoKHR getFdInfo; getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR; getFdInfo.pNext = nullptr; - getFdInfo.semaphore = semaphore; + getFdInfo.semaphore = mSwapSemaphore; getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT; - err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd); + VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd); ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd"); } else { ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed"); + + std::lock_guard<std::mutex> lock(mGraphicsQueueMutex); mQueueWaitIdle(mGraphicsQueue); } - destroy_semaphore(destroyInfo); + destroy_semaphore(mDestroySemaphoreContext); surface->presentCurrentBuffer(dirtyRect, fenceFd); + mSwapSemaphore = VK_NULL_HANDLE; + mDestroySemaphoreContext = nullptr; } void VulkanManager::destroySurface(VulkanSurface* surface) { // Make sure all submit commands have finished before starting to destroy objects. - if (VK_NULL_HANDLE != mPresentQueue) { - mQueueWaitIdle(mPresentQueue); + if (VK_NULL_HANDLE != mGraphicsQueue) { + std::lock_guard<std::mutex> lock(mGraphicsQueueMutex); + mQueueWaitIdle(mGraphicsQueue); } mDeviceWaitIdle(mDevice); delete surface; } -VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode colorMode, +VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, + ColorMode colorMode, sk_sp<SkColorSpace> surfaceColorSpace, - SkColorType surfaceColorType, GrContext* grContext, + SkColorType surfaceColorType, + GrDirectContext* grContext, uint32_t extraBuffers) { LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized"); if (!window) { @@ -569,7 +579,7 @@ VulkanSurface* VulkanManager::createSurface(ANativeWindow* window, ColorMode col *this, extraBuffers); } -status_t VulkanManager::fenceWait(int fence, GrContext* grContext) { +status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) { if (!hasVkContext()) { ALOGE("VulkanManager::fenceWait: VkDevice not initialized"); return INVALID_OPERATION; @@ -612,12 +622,12 @@ status_t VulkanManager::fenceWait(int fence, GrContext* grContext) { // Skia takes ownership of the semaphore and will delete it once the wait has finished. grContext->wait(1, &beSemaphore); - grContext->flush(); + grContext->flushAndSubmit(); return OK; } -status_t VulkanManager::createReleaseFence(int* nativeFence, GrContext* grContext) { +status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) { *nativeFence = -1; if (!hasVkContext()) { ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized"); @@ -648,8 +658,13 @@ status_t VulkanManager::createReleaseFence(int* nativeFence, GrContext* grContex // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback // which will remove its ref to the semaphore. The VulkanManager must still release its ref, // when it is done with the semaphore. - GrSemaphoresSubmitted submitted = grContext->flush(kNone_GrFlushFlags, 1, &backendSemaphore, - destroy_semaphore, destroyInfo); + GrFlushInfo flushInfo; + flushInfo.fNumSemaphores = 1; + flushInfo.fSignalSemaphores = &backendSemaphore; + flushInfo.fFinishedProc = destroy_semaphore; + flushInfo.fFinishedContext = destroyInfo; + GrSemaphoresSubmitted submitted = grContext->flush(flushInfo); + grContext->submit(); if (submitted == GrSemaphoresSubmitted::kNo) { ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore"); |