aboutsummaryrefslogtreecommitdiff
path: root/src/venus/vkr_device_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/venus/vkr_device_memory.c')
-rw-r--r--src/venus/vkr_device_memory.c317
1 files changed, 237 insertions, 80 deletions
diff --git a/src/venus/vkr_device_memory.c b/src/venus/vkr_device_memory.c
index 516c55dc..a014b742 100644
--- a/src/venus/vkr_device_memory.c
+++ b/src/venus/vkr_device_memory.c
@@ -5,6 +5,8 @@
#include "vkr_device_memory.h"
+#include <gbm.h>
+
#include "venus-protocol/vn_protocol_renderer_transport.h"
#include "vkr_device_memory_gen.h"
@@ -37,100 +39,210 @@ vkr_get_fd_handle_type_from_virgl_fd_type(
return true;
}
-static void
-vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
- struct vn_command_vkAllocateMemory *args)
+static bool
+vkr_get_fd_info_from_resource_info(struct vkr_context *ctx,
+ struct vkr_physical_device *physical_dev,
+ const VkImportMemoryResourceInfoMESA *res_info,
+ VkImportMemoryFdInfoKHR *out)
{
- struct vkr_context *ctx = dispatch->data;
+ struct vkr_resource_attachment *att = NULL;
+ enum virgl_resource_fd_type fd_type;
+ int fd = -1;
+ VkExternalMemoryHandleTypeFlagBits handle_type;
- struct vkr_device *dev = vkr_device_from_handle(args->device);
+ att = vkr_context_get_resource(ctx, res_info->resourceId);
+ if (!att) {
+ vkr_log("failed to import resource: invalid res_id %u", res_info->resourceId);
+ vkr_cs_decoder_set_fatal(&ctx->decoder);
+ return false;
+ }
-#ifdef FORCE_ENABLE_DMABUF
- VkExportMemoryAllocateInfo local_export_info;
- if (dev->physical_device->EXT_external_memory_dma_buf) {
- VkExportMemoryAllocateInfo *export_info = vkr_find_pnext(
- args->pAllocateInfo->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
- if (export_info) {
- export_info->handleTypes |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
- } else {
- local_export_info = (const VkExportMemoryAllocateInfo){
- .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
- .pNext = args->pAllocateInfo->pNext,
- .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
- };
- ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_export_info;
- }
+ fd_type = virgl_resource_export_fd(att->resource, &fd);
+ if (fd_type == VIRGL_RESOURCE_FD_INVALID)
+ return false;
+
+ if (!vkr_get_fd_handle_type_from_virgl_fd_type(physical_dev, fd_type, &handle_type)) {
+ close(fd);
+ return false;
}
-#endif
- /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR */
- VkImportMemoryResourceInfoMESA *import_resource_info = NULL;
- VkImportMemoryFdInfoKHR import_fd_info = {
+ *out = (VkImportMemoryFdInfoKHR){
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
- .fd = -1,
+ .pNext = res_info->pNext,
+ .fd = fd,
+ .handleType = handle_type,
};
- VkBaseInStructure *pprev = (VkBaseInStructure *)args->pAllocateInfo;
- while (pprev->pNext) {
- if (pprev->pNext->sType == VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA) {
- import_resource_info = (VkImportMemoryResourceInfoMESA *)pprev->pNext;
- import_fd_info.pNext = pprev->pNext->pNext;
- pprev->pNext = (const struct VkBaseInStructure *)&import_fd_info;
- break;
- }
- pprev = (VkBaseInStructure *)pprev->pNext;
- }
- if (import_resource_info) {
- uint32_t res_id = import_resource_info->resourceId;
- struct vkr_resource_attachment *att =
- util_hash_table_get(ctx->resource_table, uintptr_to_pointer(res_id));
- if (!att) {
- vkr_cs_decoder_set_fatal(&ctx->decoder);
- return;
- }
+ return true;
+}
- enum virgl_resource_fd_type fd_type =
- virgl_resource_export_fd(att->resource, &import_fd_info.fd);
- if (!vkr_get_fd_handle_type_from_virgl_fd_type(dev->physical_device, fd_type,
- &import_fd_info.handleType)) {
- close(import_fd_info.fd);
- args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
- return;
- }
+static VkResult
+vkr_get_fd_info_from_allocation_info(struct vkr_physical_device *physical_dev,
+ const VkMemoryAllocateInfo *alloc_info,
+ struct gbm_bo **out_gbm_bo,
+ VkImportMemoryFdInfoKHR *out_fd_info)
+{
+#ifdef MINIGBM
+ const uint32_t gbm_bo_use_flags =
+ GBM_BO_USE_LINEAR | GBM_BO_USE_SW_READ_RARELY | GBM_BO_USE_SW_WRITE_RARELY;
+#else
+ const uint32_t gbm_bo_use_flags = GBM_BO_USE_LINEAR;
+#endif
+
+ struct gbm_bo *gbm_bo;
+ int fd = -1;
+
+ assert(physical_dev->gbm_device);
+
+ /*
+ * Reject here for simplicity. Letting VkPhysicalDeviceVulkan11Properties return
+ * min(maxMemoryAllocationSize, UINT32_MAX) will affect unmappable scenarios.
+ */
+ if (alloc_info->allocationSize > UINT32_MAX)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ /* 4K alignment is used on all implementations we support. */
+ gbm_bo =
+ gbm_bo_create(physical_dev->gbm_device, align(alloc_info->allocationSize, 4096), 1,
+ GBM_FORMAT_R8, gbm_bo_use_flags);
+ if (!gbm_bo)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ /* gbm_bo_get_fd returns negative error code on failure */
+ fd = gbm_bo_get_fd(gbm_bo);
+ if (fd < 0) {
+ gbm_bo_destroy(gbm_bo);
+ return fd == -EMFILE ? VK_ERROR_TOO_MANY_OBJECTS : VK_ERROR_OUT_OF_HOST_MEMORY;
}
- const VkPhysicalDeviceMemoryProperties *mem_props =
- &dev->physical_device->memory_properties;
- const uint32_t mt_index = args->pAllocateInfo->memoryTypeIndex;
- const uint32_t property_flags = mem_props->memoryTypes[mt_index].propertyFlags;
+ *out_gbm_bo = gbm_bo;
+ *out_fd_info = (VkImportMemoryFdInfoKHR){
+ .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
+ .pNext = alloc_info->pNext,
+ .fd = fd,
+ .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ };
+ return VK_SUCCESS;
+}
- /* get valid fd types */
+static void
+vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch,
+ struct vn_command_vkAllocateMemory *args)
+{
+ struct vkr_context *ctx = dispatch->data;
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vkr_physical_device *physical_dev = dev->physical_device;
+ VkBaseInStructure *prev_of_res_info = NULL;
+ VkImportMemoryResourceInfoMESA *res_info = NULL;
+ VkImportMemoryFdInfoKHR local_import_info = { .fd = -1 };
+ VkExportMemoryAllocateInfo *export_info = vkr_find_struct(
+ args->pAllocateInfo->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
+ const bool no_dma_buf_export =
+ !export_info ||
+ !(export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ struct vkr_device_memory *mem = NULL;
+ const uint32_t mem_type_index = args->pAllocateInfo->memoryTypeIndex;
+ const uint32_t property_flags =
+ physical_dev->memory_properties.memoryTypes[mem_type_index].propertyFlags;
uint32_t valid_fd_types = 0;
- const VkBaseInStructure *pnext = args->pAllocateInfo->pNext;
- while (pnext) {
- if (pnext->sType == VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO) {
- const VkExportMemoryAllocateInfo *export = (const void *)pnext;
+ struct gbm_bo *gbm_bo = NULL;
+
+ /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR in place */
+ prev_of_res_info = vkr_find_prev_struct(
+ args->pAllocateInfo, VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA);
+ if (prev_of_res_info) {
+ res_info = (VkImportMemoryResourceInfoMESA *)prev_of_res_info->pNext;
+ if (!vkr_get_fd_info_from_resource_info(ctx, physical_dev, res_info,
+ &local_import_info)) {
+ args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE;
+ return;
+ }
- if (export->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
- valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE;
- if (export->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
- valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF;
+ prev_of_res_info->pNext = (const struct VkBaseInStructure *)&local_import_info;
+ }
- break;
+ /* XXX Force dma_buf/opaque fd export or gbm bo import until a new extension that
+ * supports direct export from host visible memory
+ *
+ * Most VkImage and VkBuffer are non-external while most VkDeviceMemory are external
+ * if allocated with a host visible memory type. We still violate the spec by binding
+ * external memory to non-external image or buffer, which needs spec changes with a
+ * new extension.
+ *
+ * Skip forcing external if a valid VkImportMemoryResourceInfoMESA is provided, since
+ * the mapping will be directly set up from the existing virgl resource.
+ */
+ VkExportMemoryAllocateInfo local_export_info;
+ if ((property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) && !res_info) {
+ /* An implementation can support dma_buf import along with opaque fd export/import.
+ * If the client driver is using external memory and requesting dma_buf, without
+ * dma_buf fd export support, we must use gbm bo import path instead of forcing
+ * opaque fd export. e.g. the client driver uses external memory for wsi image.
+ */
+ if (dev->physical_device->is_dma_buf_fd_export_supported ||
+ (dev->physical_device->is_opaque_fd_export_supported && no_dma_buf_export)) {
+ VkExternalMemoryHandleTypeFlagBits handle_type =
+ dev->physical_device->is_dma_buf_fd_export_supported
+ ? VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
+ : VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+ if (export_info) {
+ export_info->handleTypes |= handle_type;
+ } else {
+ local_export_info = (const VkExportMemoryAllocateInfo){
+ .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
+ .pNext = args->pAllocateInfo->pNext,
+ .handleTypes = handle_type,
+ };
+ export_info = &local_export_info;
+ ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_export_info;
+ }
+ } else if (dev->physical_device->EXT_external_memory_dma_buf) {
+ /* Allocate gbm bo to force dma_buf fd import. */
+ VkResult result;
+
+ if (export_info) {
+ /* Strip export info since valid_fd_types can only be dma_buf here. */
+ VkBaseInStructure *prev_of_export_info = vkr_find_prev_struct(
+ args->pAllocateInfo, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO);
+
+ prev_of_export_info->pNext = export_info->pNext;
+ export_info = NULL;
+ }
+
+ result = vkr_get_fd_info_from_allocation_info(physical_dev, args->pAllocateInfo,
+ &gbm_bo, &local_import_info);
+ if (result != VK_SUCCESS) {
+ args->ret = result;
+ return;
+ }
+
+ ((VkMemoryAllocateInfo *)args->pAllocateInfo)->pNext = &local_import_info;
+
+ valid_fd_types = 1 << VIRGL_RESOURCE_FD_DMABUF;
}
- pnext = pnext->pNext;
}
- struct vkr_device_memory *mem = vkr_device_memory_create_and_add(ctx, args);
+ if (export_info) {
+ if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT)
+ valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE;
+ if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
+ valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF;
+ }
+
+ mem = vkr_device_memory_create_and_add(ctx, args);
if (!mem) {
- if (import_resource_info)
- close(import_fd_info.fd);
+ if (local_import_info.fd >= 0)
+ close(local_import_info.fd);
+ if (gbm_bo)
+ gbm_bo_destroy(gbm_bo);
return;
}
- mem->device = args->device;
+ mem->device = dev;
mem->property_flags = property_flags;
mem->valid_fd_types = valid_fd_types;
- list_inithead(&mem->exported_head);
+ mem->gbm_bo = gbm_bo;
+ mem->allocation_size = args->pAllocateInfo->allocationSize;
+ mem->memory_type_index = mem_type_index;
}
static void
@@ -141,8 +253,7 @@ vkr_dispatch_vkFreeMemory(struct vn_dispatch_context *dispatch,
if (!mem)
return;
- list_del(&mem->exported_head);
-
+ vkr_device_memory_release(mem);
vkr_device_memory_destroy_and_remove(dispatch->data, args);
}
@@ -151,8 +262,12 @@ vkr_dispatch_vkGetDeviceMemoryCommitment(
UNUSED struct vn_dispatch_context *dispatch,
struct vn_command_vkGetDeviceMemoryCommitment *args)
{
+ struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
+
vn_replace_vkGetDeviceMemoryCommitment_args_handle(args);
- vkGetDeviceMemoryCommitment(args->device, args->memory, args->pCommittedMemoryInBytes);
+ vk->GetDeviceMemoryCommitment(args->device, args->memory,
+ args->pCommittedMemoryInBytes);
}
static void
@@ -161,9 +276,10 @@ vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress(
struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress *args)
{
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
vn_replace_vkGetDeviceMemoryOpaqueCaptureAddress_args_handle(args);
- args->ret = dev->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo);
+ args->ret = vk->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo);
}
static void
@@ -173,10 +289,11 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
{
struct vkr_context *ctx = dispatch->data;
struct vkr_device *dev = vkr_device_from_handle(args->device);
+ struct vn_device_proc_table *vk = &dev->proc_table;
- struct vkr_resource_attachment *att =
- util_hash_table_get(ctx->resource_table, uintptr_to_pointer(args->resourceId));
+ struct vkr_resource_attachment *att = vkr_context_get_resource(ctx, args->resourceId);
if (!att) {
+ vkr_log("failed to query resource props: invalid res_id %u", args->resourceId);
vkr_cs_decoder_set_fatal(&ctx->decoder);
return;
}
@@ -198,8 +315,7 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
.memoryTypeBits = 0,
};
vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(args);
- args->ret =
- dev->get_memory_fd_properties(args->device, handle_type, fd, &mem_fd_props);
+ args->ret = vk->GetMemoryFdPropertiesKHR(args->device, handle_type, fd, &mem_fd_props);
if (args->ret != VK_SUCCESS) {
close(fd);
return;
@@ -207,7 +323,7 @@ vkr_dispatch_vkGetMemoryResourcePropertiesMESA(
args->pMemoryResourceProperties->memoryTypeBits = mem_fd_props.memoryTypeBits;
- VkMemoryResourceAllocationSizeProperties100000MESA *alloc_size_props = vkr_find_pnext(
+ VkMemoryResourceAllocationSizeProperties100000MESA *alloc_size_props = vkr_find_struct(
args->pMemoryResourceProperties->pNext,
VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_100000_MESA);
if (alloc_size_props)
@@ -235,3 +351,44 @@ vkr_context_init_device_memory_dispatch(struct vkr_context *ctx)
dispatch->dispatch_vkGetMemoryResourcePropertiesMESA =
vkr_dispatch_vkGetMemoryResourcePropertiesMESA;
}
+
+void
+vkr_device_memory_release(struct vkr_device_memory *mem)
+{
+ if (mem->gbm_bo)
+ gbm_bo_destroy(mem->gbm_bo);
+}
+
+int
+vkr_device_memory_export_fd(struct vkr_device_memory *mem,
+ VkExternalMemoryHandleTypeFlagBits handle_type,
+ int *out_fd)
+{
+ struct vn_device_proc_table *vk = &mem->device->proc_table;
+ int fd = -1;
+
+ if (mem->gbm_bo) {
+ /* mem->gbm_bo is a gbm bo backing non-external mappable memory */
+ assert((handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) &&
+ (mem->valid_fd_types == 1 << VIRGL_RESOURCE_FD_DMABUF));
+
+ /* gbm_bo_get_fd returns negative error code on failure */
+ fd = gbm_bo_get_fd(mem->gbm_bo);
+ if (fd < 0)
+ return fd;
+ } else {
+ VkDevice dev_handle = mem->device->base.handle.device;
+ VkDeviceMemory mem_handle = mem->base.handle.device_memory;
+ const VkMemoryGetFdInfoKHR fd_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,
+ .memory = mem_handle,
+ .handleType = handle_type,
+ };
+ VkResult result = vk->GetMemoryFdKHR(dev_handle, &fd_info, &fd);
+ if (result != VK_SUCCESS)
+ return result == VK_ERROR_TOO_MANY_OBJECTS ? -EMFILE : -ENOMEM;
+ }
+
+ *out_fd = fd;
+ return 0;
+}