aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-09-13 21:20:25 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-09-13 21:20:25 +0000
commite7d511c97839286514abb9b7f14d4770ae82e50c (patch)
tree7436a747457fb4bc78daf8f0c5c3c858cb6fb072
parent4f44ca478f0068fde923da694420503e1ea2734c (diff)
parentbb1541838d8eb2cdba492a9615f43dfb56d83af0 (diff)
downloadgoldfish-opengl-android12-mainline-ipsec-release.tar.gz
Change-Id: I438e2bfa1590b57c0b2da13249af007a0e54108f
-rw-r--r--system/vulkan_enc/HostVisibleMemoryVirtualization.cpp9
-rw-r--r--system/vulkan_enc/HostVisibleMemoryVirtualization.h8
-rw-r--r--system/vulkan_enc/ResourceTracker.cpp182
-rw-r--r--system/vulkan_enc/vk_struct_id.h2
4 files changed, 183 insertions, 18 deletions
diff --git a/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp b/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
index e66c951a..c51a7220 100644
--- a/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
+++ b/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
@@ -318,12 +318,19 @@ void subAllocHostMemory(
out->subMemory = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
out->subAlloc = alloc->subAlloc;
+ out->isDeviceAddressMemoryAllocation = alloc->isDeviceAddressMemoryAllocation;
+ out->memoryTypeIndex = alloc->memoryTypeIndex;
}
-void subFreeHostMemory(SubAlloc* toFree) {
+bool subFreeHostMemory(SubAlloc* toFree) {
delete_goldfish_VkDeviceMemory(toFree->subMemory);
toFree->subAlloc->free(toFree->mappedPtr);
+ bool nowEmpty = toFree->subAlloc->empty();
+ if (nowEmpty) {
+ ALOGV("%s: We have an empty suballoc, time to free the block perhaps?\n", __func__);
+ }
memset(toFree, 0x0, sizeof(SubAlloc));
+ return nowEmpty;
}
bool canSubAlloc(android::base::guest::SubAllocator* subAlloc, VkDeviceSize size) {
diff --git a/system/vulkan_enc/HostVisibleMemoryVirtualization.h b/system/vulkan_enc/HostVisibleMemoryVirtualization.h
index 86972066..6501ab56 100644
--- a/system/vulkan_enc/HostVisibleMemoryVirtualization.h
+++ b/system/vulkan_enc/HostVisibleMemoryVirtualization.h
@@ -86,6 +86,8 @@ struct HostMemAlloc {
int fd = -1;
uint64_t memoryAddr = 0;
size_t memorySize = 0;
+ bool isDeviceAddressMemoryAllocation = false;
+ bool isDedicated = false;
};
VkResult finishHostMemAllocInit(
@@ -113,6 +115,8 @@ struct SubAlloc {
VkDeviceSize baseOffset = 0;
android::base::guest::SubAllocator* subAlloc = nullptr;
VkDeviceMemory subMemory = VK_NULL_HANDLE;
+ bool isDeviceAddressMemoryAllocation = false;
+ uint32_t memoryTypeIndex = 0;
};
void subAllocHostMemory(
@@ -120,7 +124,9 @@ void subAllocHostMemory(
const VkMemoryAllocateInfo* pAllocateInfo,
SubAlloc* out);
-void subFreeHostMemory(SubAlloc* toFree);
+// Returns true if the block would have been emptied.
+// In that case, we can then go back and tear down the block itself.
+bool subFreeHostMemory(SubAlloc* toFree);
bool canSubAlloc(android::base::guest::SubAllocator* subAlloc, VkDeviceSize size);
diff --git a/system/vulkan_enc/ResourceTracker.cpp b/system/vulkan_enc/ResourceTracker.cpp
index 2e89e28e..dd3a475d 100644
--- a/system/vulkan_enc/ResourceTracker.cpp
+++ b/system/vulkan_enc/ResourceTracker.cpp
@@ -547,6 +547,7 @@ public:
}
if (memInfo.directMapped) {
+ ALOGE("%s: warning: direct mapped memory never goes to unregister!\n", __func__);
subFreeHostMemory(&memInfo.subAlloc);
}
@@ -1330,7 +1331,11 @@ public:
"VK_KHR_image_format_list",
"VK_KHR_sampler_ycbcr_conversion",
"VK_KHR_shader_float16_int8",
+ // Timeline semaphores buggy in newer NVIDIA drivers
+ // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_timeline_semaphore",
+#endif
"VK_AMD_gpu_shader_half_float",
"VK_NV_shader_subgroup_partitioned",
"VK_KHR_shader_subgroup_extended_types",
@@ -2918,16 +2923,37 @@ public:
HostMemBlockIndex res = 0;
bool found = false;
+ VkMemoryAllocateFlagsInfo allocFlagsInfo;
+ VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
+
+ // Add buffer device address capture structs
+ const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
+ vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
+ const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
+ vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
+
+ bool isDeviceAddressMemoryAllocation =
+ allocFlagsInfoPtr && ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
+ (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
+ bool isDedicated = isDeviceAddressMemoryAllocation;
+
while (!found) {
- for (HostMemBlockIndex i = 0; i < blocks.size(); ++i) {
- if (blocks[i].initialized &&
- blocks[i].initResult == VK_SUCCESS &&
- canSubAlloc(
- blocks[i].subAlloc,
- pAllocateInfo->allocationSize)) {
- res = i;
- found = true;
- return res;
+ // If we need a dedicated host mapping, found = true necessarily
+ if (isDedicated) {
+ found = true;
+ } else {
+ for (HostMemBlockIndex i = 0; i < blocks.size(); ++i) {
+ if (blocks[i].initialized &&
+ blocks[i].initResult == VK_SUCCESS &&
+ !blocks[i].isDedicated &&
+ blocks[i].isDeviceAddressMemoryAllocation == isDeviceAddressMemoryAllocation &&
+ canSubAlloc(
+ blocks[i].subAlloc,
+ pAllocateInfo->allocationSize)) {
+ res = i;
+ found = true;
+ return res;
+ }
}
}
@@ -2935,13 +2961,21 @@ public:
auto& hostMemAlloc = blocks.back();
+ hostMemAlloc.isDedicated = isDedicated;
+
// Uninitialized block; allocate on host.
static constexpr VkDeviceSize oneMb = 1048576;
+ // This needs to be a power of 2 that is at least the min alignment needed in HostVisibleMemoryVirtualization.cpp.
+ static constexpr VkDeviceSize biggestPage = 65536;
static constexpr VkDeviceSize kDefaultHostMemBlockSize =
16 * oneMb; // 16 mb
VkDeviceSize roundedUpAllocSize =
oneMb * ((pAllocateInfo->allocationSize + oneMb - 1) / oneMb);
+ // If dedicated, use a smaller "page rounded alloc size".
+ VkDeviceSize pageRoundedAllocSize =
+ biggestPage * ((pAllocateInfo->allocationSize + biggestPage - 1) / biggestPage);
+
VkDeviceSize virtualHeapSize = VIRTUAL_HOST_VISIBLE_HEAP_SIZE;
VkDeviceSize blockSizeNeeded =
@@ -2949,12 +2983,32 @@ public:
std::min(virtualHeapSize,
kDefaultHostMemBlockSize));
- VkMemoryAllocateInfo allocInfoForHost = *pAllocateInfo;
+ VkMemoryAllocateInfo allocInfoForHost = vk_make_orphan_copy(*pAllocateInfo);
+ vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&allocInfoForHost);
allocInfoForHost.allocationSize = blockSizeNeeded;
+ if (isDedicated) {
+ allocInfoForHost.allocationSize = pageRoundedAllocSize;
+ }
+
// TODO: Support dedicated/external host visible allocation
- allocInfoForHost.pNext = nullptr;
+
+ // Support device address capture/replay allocations
+ if (isDeviceAddressMemoryAllocation) {
+ hostMemAlloc.isDeviceAddressMemoryAllocation = true;
+ if (allocFlagsInfoPtr) {
+ ALOGV("%s: has alloc flags\n", __func__);
+ allocFlagsInfo = *allocFlagsInfoPtr;
+ vk_append_struct(&structChainIter, &allocFlagsInfo);
+ }
+
+ if (opaqueCaptureAddressAllocInfoPtr) {
+ ALOGV("%s: has opaque capture address\n", __func__);
+ opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
+ vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
+ }
+ }
mLock.unlock();
VkResult host_res =
@@ -2984,13 +3038,14 @@ public:
uint64_t directMappedAddr = 0;
-
VkResult directMapResult = VK_SUCCESS;
if (mFeatureInfo->hasDirectMem) {
mLock.unlock();
directMapResult =
enc->vkMapMemoryIntoAddressSpaceGOOGLE(
device, hostMemAlloc.memory, &directMappedAddr, true /* do lock */);
+ ALOGV("%s: direct mapped addr 0x%llx\n", __func__,
+ (unsigned long long)directMappedAddr);
mLock.lock();
} else if (mFeatureInfo->hasVirtioGpuNext) {
#if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
@@ -3055,6 +3110,7 @@ public:
}
if (directMapResult != VK_SUCCESS) {
+ ALOGE("%s: error: directMapResult != VK_SUCCESS\n", __func__);
hostMemAlloc.initialized = true;
hostMemAlloc.initResult = directMapResult;
mLock.unlock();
@@ -3066,6 +3122,7 @@ public:
hostMemInfo.mappedPtr =
(uint8_t*)(uintptr_t)directMappedAddr;
hostMemInfo.virtualHostVisibleBacking = true;
+ ALOGV("%s: Set mapped ptr to %p\n", __func__, hostMemInfo.mappedPtr);
VkResult hostMemAllocRes =
finishHostMemAllocInit(
@@ -3081,6 +3138,11 @@ public:
if (hostMemAllocRes != VK_SUCCESS) {
return INVALID_HOST_MEM_BLOCK;
}
+
+ if (isDedicated) {
+ ALOGV("%s: New dedicated block at %zu\n", __func__, blocks.size() - 1);
+ return blocks.size() - 1;
+ }
}
// unreacheable, but we need to make Werror happy
@@ -3145,6 +3207,27 @@ public:
VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
+ VkMemoryAllocateFlagsInfo allocFlagsInfo;
+ VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
+
+ // Add buffer device address capture structs
+ const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
+ vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
+ const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
+ vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
+
+ if (allocFlagsInfoPtr) {
+ ALOGV("%s: has alloc flags\n", __func__);
+ allocFlagsInfo = *allocFlagsInfoPtr;
+ vk_append_struct(&structChainIter, &allocFlagsInfo);
+ }
+
+ if (opaqueCaptureAddressAllocInfoPtr) {
+ ALOGV("%s: has opaque capture address\n", __func__);
+ opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
+ vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
+ }
+
VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
VkImportColorBufferGOOGLE importCbInfo = {
VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, 0,
@@ -3828,7 +3911,60 @@ public:
return;
}
- subFreeHostMemory(&info.subAlloc);
+ VkDeviceMemory baseMemory = info.subAlloc.baseMemory;
+ uint32_t memoryTypeIndex = info.subAlloc.memoryTypeIndex;
+ bool isDeviceAddressMemoryAllocation = info.subAlloc.isDeviceAddressMemoryAllocation;
+ // If this was a device address memory allocation,
+ // free it right away.
+ // TODO: Retest with eagerly freeing other kinds of host visible
+ // allocs as well
+ if (subFreeHostMemory(&info.subAlloc) && isDeviceAddressMemoryAllocation) {
+ ALOGV("%s: Last free for this device-address block, "
+ "free on host and clear block contents\n", __func__);
+ ALOGV("%s: baseMem 0x%llx this mem 0x%llx\n", __func__,
+ (unsigned long long)baseMemory,
+ (unsigned long long)memory);
+ VkEncoder* enc = (VkEncoder*)context;
+ bool freeMemorySyncSupported =
+ mFeatureInfo->hasVulkanFreeMemorySync;
+
+ auto it = info_VkDevice.find(device);
+ if (it == info_VkDevice.end()) {
+ ALOGE("%s: Last free: could not find device\n", __func__);
+ return;
+ }
+
+ auto& deviceInfo = it->second;
+
+ auto& hostMemBlocksForTypeIndex =
+ deviceInfo.hostMemBlocks[memoryTypeIndex];
+
+ size_t indexToRemove = 0;
+ bool found = false;
+ for (const auto& allocInfo : hostMemBlocksForTypeIndex) {
+ if (baseMemory == allocInfo.memory) {
+ found = true;
+ break;
+ }
+ ++indexToRemove;
+ }
+
+ if (!found) {
+ ALOGE("%s: Last free: could not find original block\n", __func__);
+ return;
+ }
+
+ ALOGV("%s: Destroying host mem alloc block at index %zu\n", __func__, indexToRemove);
+
+ destroyHostMemAlloc(
+ freeMemorySyncSupported,
+ enc, device,
+ hostMemBlocksForTypeIndex.data() + indexToRemove);
+
+ ALOGV("%s: Destroying host mem alloc block at index %zu (done)\n", __func__, indexToRemove);
+
+ hostMemBlocksForTypeIndex.erase(hostMemBlocksForTypeIndex.begin() + indexToRemove);
+ }
}
VkResult on_vkMapMemory(
@@ -3841,19 +3977,33 @@ public:
VkMemoryMapFlags,
void** ppData) {
- if (host_result != VK_SUCCESS) return host_result;
+ if (host_result != VK_SUCCESS) {
+ ALOGE("%s: Host failed to map\n", __func__);
+ return host_result;
+ }
AutoLock lock(mLock);
auto it = info_VkDeviceMemory.find(memory);
- if (it == info_VkDeviceMemory.end()) return VK_ERROR_MEMORY_MAP_FAILED;
+ if (it == info_VkDeviceMemory.end()) {
+ ALOGE("%s: Could not find this device memory\n", __func__);
+ return VK_ERROR_MEMORY_MAP_FAILED;
+ }
auto& info = it->second;
- if (!info.mappedPtr) return VK_ERROR_MEMORY_MAP_FAILED;
+ if (!info.mappedPtr) {
+ ALOGE("%s: mappedPtr null\n", __func__);
+ return VK_ERROR_MEMORY_MAP_FAILED;
+ }
if (size != VK_WHOLE_SIZE &&
(info.mappedPtr + offset + size > info.mappedPtr + info.allocationSize)) {
+ ALOGE("%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx total 0x%llx\n", __func__,
+ (unsigned long long)info.allocationSize,
+ (unsigned long long)offset,
+ (unsigned long long)size,
+ (unsigned long long)offset);
return VK_ERROR_MEMORY_MAP_FAILED;
}
diff --git a/system/vulkan_enc/vk_struct_id.h b/system/vulkan_enc/vk_struct_id.h
index 5a7328a9..603c6063 100644
--- a/system/vulkan_enc/vk_struct_id.h
+++ b/system/vulkan_enc/vk_struct_id.h
@@ -58,5 +58,7 @@ REGISTER_VK_STRUCT_ID(VkPhysicalDeviceExternalImageFormatInfo, VK_STRUCTURE_TYPE
REGISTER_VK_STRUCT_ID(VkSemaphoreTypeCreateInfo, VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO);
REGISTER_VK_STRUCT_ID(VkPhysicalDeviceProperties2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2);
REGISTER_VK_STRUCT_ID(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT);
+REGISTER_VK_STRUCT_ID(VkMemoryAllocateFlagsInfo, VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO);
+REGISTER_VK_STRUCT_ID(VkMemoryOpaqueCaptureAddressAllocateInfo, VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
#undef REGISTER_VK_STRUCT_ID