aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-03-25 12:31:19 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-03-25 12:31:19 +0000
commit868b25569fccb242fdd5b9dbaf2b2d1a01588784 (patch)
treebd57fa9cc63db0affac1a8917126b77b700e3f25
parent784beba8cf2a771bb95757b884ef0303bd1f98f7 (diff)
parent7b50a545f11ef5e1390fd4e18a0f1e4c0820a231 (diff)
downloadlibdrm-android13-mainline-go-extservices-release.tar.gz
Snap for 8358640 from 7b50a545f11ef5e1390fd4e18a0f1e4c0820a231 to mainline-go-extservices-releaseaml_go_ext_330912000android13-mainline-go-extservices-release
Change-Id: If906db109c1dc8edc03bd9693b307c53cf1948a4
-rw-r--r--.gitlab-ci.yml258
-rw-r--r--.gitlab-ci/debian-install.sh2
-rw-r--r--README.rst18
-rw-r--r--amdgpu/amdgpu-symbols.txt2
-rw-r--r--amdgpu/amdgpu.h31
-rw-r--r--amdgpu/amdgpu_bo.c24
-rw-r--r--amdgpu/amdgpu_device.c5
-rw-r--r--amdgpu/amdgpu_gpu_info.c15
-rw-r--r--amdgpu/amdgpu_vamgr.c133
-rw-r--r--amdgpu/meson.build2
-rw-r--r--core-symbols.txt5
-rw-r--r--data/amdgpu.ids373
-rw-r--r--etnaviv/etnaviv_bo.c13
-rw-r--r--etnaviv/meson.build2
-rw-r--r--exynos/exynos_drm.c6
-rw-r--r--exynos/meson.build2
-rw-r--r--freedreno/freedreno_bo.c10
-rw-r--r--freedreno/meson.build2
-rwxr-xr-xgen_prebuilt_intermediates.sh2
-rw-r--r--gen_table_fourcc.py84
-rw-r--r--generated_static_table_fourcc.h49
-rw-r--r--include/drm/amdgpu_drm.h52
-rw-r--r--include/drm/drm.h250
-rw-r--r--include/drm/drm_fourcc.h141
-rw-r--r--include/drm/drm_mode.h168
-rw-r--r--include/drm/virtgpu_drm.h66
-rw-r--r--intel/i915_pciids.h29
-rw-r--r--intel/intel_bufmgr_gem.c286
-rw-r--r--intel/intel_chipset.c1
-rw-r--r--intel/intel_decode.c39
-rw-r--r--intel/meson.build2
-rw-r--r--libdrm_macros.h2
-rw-r--r--libkms/meson.build2
-rw-r--r--libkms/vmwgfx.c3
-rw-r--r--man/drm-memory.7.rst17
-rw-r--r--meson.build50
-rw-r--r--nouveau/meson.build2
-rw-r--r--nouveau/nouveau.c35
-rw-r--r--nouveau/private.h17
-rw-r--r--nouveau/pushbuf.c15
-rw-r--r--omap/meson.build2
-rw-r--r--omap/omap_drm.c10
-rw-r--r--radeon/meson.build2
-rw-r--r--radeon/radeon_bo.h1
-rw-r--r--radeon/radeon_bo_gem.c7
-rw-r--r--tegra/meson.build2
-rw-r--r--tegra/tegra.c6
-rw-r--r--tests/amdgpu/amdgpu_stress.c418
-rw-r--r--tests/amdgpu/amdgpu_test.c115
-rw-r--r--tests/amdgpu/amdgpu_test.h44
-rw-r--r--tests/amdgpu/basic_tests.c48
-rw-r--r--tests/amdgpu/bo_tests.c4
-rw-r--r--tests/amdgpu/cs_tests.c7
-rw-r--r--tests/amdgpu/deadlock_tests.c18
-rw-r--r--tests/amdgpu/hotunplug_tests.c445
-rw-r--r--tests/amdgpu/meson.build12
-rw-r--r--tests/amdgpu/security_tests.c13
-rw-r--r--tests/amdgpu/syncobj_tests.c22
-rw-r--r--tests/amdgpu/vce_tests.c2
-rw-r--r--tests/amdgpu/vcn_tests.c2
-rw-r--r--tests/amdgpu/vm_tests.c9
-rw-r--r--tests/modeprint/modeprint.c2
-rw-r--r--tests/modetest/modetest.c121
-rw-r--r--tests/proptest/proptest.c2
-rw-r--r--xf86drm.c592
-rw-r--r--xf86drm.h15
-rw-r--r--xf86drmMode.c111
-rw-r--r--xf86drmMode.h21
68 files changed, 3388 insertions, 880 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b99dc912..876be951 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -12,47 +12,181 @@
# main repository, it's recommended to remove the image from the source
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
-variables:
- UPSTREAM_REPO: mesa/drm
- DEBIAN_TAG: "2020-11-15"
- DEBIAN_VERSION: buster-slim
- DEBIAN_IMAGE: "$CI_REGISTRY_IMAGE/debian/$DEBIAN_VERSION:$DEBIAN_TAG"
+.templates_sha: &template_sha 567700e483aabed992d0a4fea84994a0472deff6 # see https://docs.gitlab.com/ee/ci/yaml/#includefile
include:
- - project: 'wayland/ci-templates'
- ref: 0a9bdd33a98f05af6761ab118b5074952242aab0
- file: '/templates/debian.yml'
+ - project: 'freedesktop/ci-templates'
+ ref: *template_sha
+ file:
+ - '/templates/debian.yml'
+ - '/templates/freebsd.yml'
+ - '/templates/ci-fairy.yml'
-stages:
- - containers
- - build
+variables:
+ FDO_UPSTREAM_REPO: mesa/drm
+ FDO_REPO_SUFFIX: "$BUILD_OS/$BUILD_ARCH"
+stages:
+ - "Base container"
+ - "Build"
-# When & how to run the CI
-.ci-run-policy:
- except:
- - schedules
- retry:
- max: 2
- when:
- - runner_system_failure
+.ci-rules:
+ rules:
+ - when: on_success
# CONTAINERS
-debian:
- stage: containers
+.os-debian:
+ variables:
+ BUILD_OS: debian
+ FDO_DISTRIBUTION_VERSION: buster
+ FDO_DISTRIBUTION_PACKAGES: 'build-essential docbook-xsl libatomic-ops-dev libcairo2-dev libcunit1-dev libpciaccess-dev meson ninja-build pkg-config python3 python3-pip python3-wheel python3-setuptools python3-docutils valgrind'
+ FDO_DISTRIBUTION_EXEC: 'pip3 install meson==0.52.1'
+ # bump this tag every time you change something which requires rebuilding the
+ # base image
+ FDO_DISTRIBUTION_TAG: "2021-08-03.0"
+
+.debian-x86_64:
+ extends:
+ - .os-debian
+ variables:
+ BUILD_ARCH: "x86-64"
+
+.debian-aarch64:
+ extends:
+ - .os-debian
+ variables:
+ BUILD_ARCH: "aarch64"
+
+.debian-armv7:
+ extends:
+ - .os-debian
+ variables:
+ BUILD_ARCH: "armv7"
+
+.os-freebsd:
+ variables:
+ BUILD_OS: freebsd
+ FDO_DISTRIBUTION_VERSION: "13.0"
+ FDO_DISTRIBUTION_PACKAGES: 'meson ninja pkgconf libpciaccess libpthread-stubs py38-docutils cairo'
+ # bump this tag every time you change something which requires rebuilding the
+ # base image
+ FDO_DISTRIBUTION_TAG: "2021-11-10.1"
+
+.freebsd-x86_64:
+ extends:
+ - .os-freebsd
+ variables:
+ BUILD_ARCH: "x86_64"
+
+# Build our base container image, which contains the core distribution, the
+# toolchain, and all our build dependencies. This will be reused in the build
+# stage.
+x86_64-debian-container_prep:
+ extends:
+ - .ci-rules
+ - .debian-x86_64
+ - .fdo.container-build@debian
+ stage: "Base container"
+ variables:
+ GIT_STRATEGY: none
+
+aarch64-debian-container_prep:
+ extends:
+ - .ci-rules
+ - .debian-aarch64
+ - .fdo.container-build@debian
+ tags:
+ - aarch64
+ stage: "Base container"
+ variables:
+ GIT_STRATEGY: none
+
+armv7-debian-container_prep:
+ extends:
+ - .ci-rules
+ - .debian-armv7
+ - .fdo.container-build@debian
+ tags:
+ - aarch64
+ stage: "Base container"
+ variables:
+ GIT_STRATEGY: none
+ FDO_BASE_IMAGE: "arm32v7/debian:$FDO_DISTRIBUTION_VERSION"
+
+x86_64-freebsd-container_prep:
+ extends:
+ - .ci-rules
+ - .freebsd-x86_64
+ - .fdo.qemu-build@freebsd@x86_64
+ stage: "Base container"
+ variables:
+ GIT_STRATEGY: none
+
+# Core build environment.
+.build-env:
+ variables:
+ MESON_BUILD_TYPE: "-Dbuildtype=debug -Doptimization=0 -Db_sanitize=address,undefined"
+
+# OS/architecture-specific variants
+.build-env-debian-x86_64:
extends:
- - .ci-run-policy
- - .debian@container-ifnot-exists
+ - .fdo.suffixed-image@debian
+ - .debian-x86_64
+ - .build-env
+ needs:
+ - job: x86_64-debian-container_prep
+ artifacts: false
+
+.build-env-debian-aarch64:
+ extends:
+ - .fdo.suffixed-image@debian
+ - .debian-aarch64
+ - .build-env
variables:
- GIT_STRATEGY: none # no need to pull the whole tree for rebuilding the image
- DEBIAN_EXEC: 'bash .gitlab-ci/debian-install.sh'
+ # At least with the versions we have, the LSan runtime makes fork unusably
+ # slow on AArch64, which is bad news since the test suite decides to fork
+ # for every single subtest. For now, in order to get AArch64 builds and
+ # tests into CI, just assume that we're not going to leak any more on
+ # AArch64 than we would on ARMv7 or x86-64.
+ ASAN_OPTIONS: "detect_leaks=0"
+ tags:
+ - aarch64
+ needs:
+ - job: aarch64-debian-container_prep
+ artifacts: false
+.build-env-debian-armv7:
+ extends:
+ - .fdo.suffixed-image@debian
+ - .debian-armv7
+ - .build-env
+ tags:
+ - aarch64
+ needs:
+ - job: armv7-debian-container_prep
+ artifacts: false
+
+.build-env-freebsd-x86_64:
+ variables:
+ # Compiling with ASan+UBSan appears to trigger an infinite loop in the
+ # compiler shipped with FreeBSD 13.0, so we only use UBSan here.
+ # Additionally, sanitizers can't be used with b_lundef on FreeBSD.
+ MESON_BUILD_TYPE: "-Dbuildtype=debug -Db_sanitize=undefined -Db_lundef=false"
+ extends:
+ - .fdo.suffixed-image@freebsd
+ - .freebsd-x86_64
+ - .build-env
+ needs:
+ - job: x86_64-freebsd-container_prep
+ artifacts: false
# BUILD
-.meson-build:
- stage: build
+.do-build:
+ extends:
+ - .ci-rules
+ stage: "Build"
variables:
GIT_DEPTH: 10
script:
@@ -74,7 +208,6 @@ debian:
-D valgrind=auto
-D vc4=true
-D vmwgfx=true
- ${CROSS+--cross /cross_file-$CROSS.txt}
- ninja -C build
- ninja -C build test
- DESTDIR=$PWD/install ninja -C build install
@@ -83,43 +216,55 @@ debian:
paths:
- build/meson-logs/*
-meson-x86_64:
+.do-build-qemu:
extends:
- - .ci-run-policy
- - .meson-build
- image: $DEBIAN_IMAGE
- needs:
- - debian
-
-meson-i386:
- extends: meson-x86_64
- variables:
- CROSS: i386
+ - .ci-rules
+ stage: "Build"
+ script:
+ # Start the VM and copy our workspace to the VM
+ - /app/vmctl start
+ - scp -r $PWD "vm:"
+ # The `set +e is needed to ensure that we always copy the meson logs back to
+ # the workspace to see details about the failed tests.
+ - |
+ set +e
+ /app/vmctl exec "pkg info; cd $CI_PROJECT_NAME ; meson build -D amdgpu=true -D cairo-tests=true -D intel=true -D libkms=true -D man-pages=true -D nouveau=false -D radeon=true -D valgrind=auto && ninja -C build"
+ set -ex
+ scp -r vm:$CI_PROJECT_NAME/build/meson-logs .
+ /app/vmctl exec "ninja -C $CI_PROJECT_NAME/build install"
+ mkdir -p $PREFIX && scp -r vm:$PREFIX/ $PREFIX/
+ # Finally, shut down the VM.
+ - /app/vmctl stop
+ artifacts:
+ when: on_failure
+ paths:
+ - build/meson-logs/*
-meson-aarch64:
- extends: meson-x86_64
- variables:
- CROSS: arm64
+# Full build and test.
+x86_64-debian-build:
+ extends:
+ - .build-env-debian-x86_64
+ - .do-build
-meson-armhf:
- extends: meson-x86_64
- variables:
- CROSS: armhf
+aarch64-debian-build:
+ extends:
+ - .build-env-debian-aarch64
+ - .do-build
-meson-ppc64el:
- extends: meson-x86_64
- variables:
- CROSS: ppc64el
+armv7-debian-build:
+ extends:
+ - .build-env-debian-armv7
+ - .do-build
+# Daily build
meson-arch-daily:
rules:
- if: '$SCHEDULE == "arch-daily"'
when: on_success
- when: never
- image: archlinux/base
+ image: archlinux/archlinux:base-devel
before_script:
- pacman -Syu --noconfirm --needed
- base-devel
cairo
cunit
libatomic_ops
@@ -127,4 +272,9 @@ meson-arch-daily:
meson
valgrind
python-docutils
- extends: .meson-build
+ extends: .do-build
+
+x86_64-freebsd-build:
+ extends:
+ - .build-env-freebsd-x86_64
+ - .do-build-qemu
diff --git a/.gitlab-ci/debian-install.sh b/.gitlab-ci/debian-install.sh
index 886e808f..ab901360 100644
--- a/.gitlab-ci/debian-install.sh
+++ b/.gitlab-ci/debian-install.sh
@@ -63,4 +63,4 @@ done
# Test that the oldest Meson version we claim to support is still supported
-pip3 install meson==0.43
+pip3 install meson==0.46
diff --git a/README.rst b/README.rst
index da995d0b..74608031 100644
--- a/README.rst
+++ b/README.rst
@@ -13,6 +13,24 @@ but a new libdrm will always work with an older kernel.
libdrm is a low-level library, typically used by graphics drivers such as
the Mesa drivers, the X drivers, libva and similar projects.
+Syncing with the Linux kernel headers
+-------------------------------------
+
+The library should be regularly updated to match the recent changes in the
+`include/uapi/drm/`.
+
+libdrm maintains a human-readable version for the token format modifier, with
+the simpler ones being extracted automatically from `drm_fourcc.h` header file
+with the help of a python script. This might not always possible, as some of
+the vendors require decoding/extracting them programmatically. For that
+reason one can enhance the current vendor functions to include/provide the
+newly added token formats, or, in case there's no such decoding
+function, to add one that performs the tasks of extracting them.
+
+For simpler format modifier tokens there's a script (gen_table_fourcc.py) that
+creates a static table, by going over `drm_fourcc.h` header file. The script
+could be further modified if it can't handle new (simpler) token format
+modifiers instead of the generated static table.
Compiling
---------
diff --git a/amdgpu/amdgpu-symbols.txt b/amdgpu/amdgpu-symbols.txt
index e3bafaab..af2b6439 100644
--- a/amdgpu/amdgpu-symbols.txt
+++ b/amdgpu/amdgpu-symbols.txt
@@ -53,6 +53,7 @@ amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore
amdgpu_device_deinitialize
+amdgpu_device_get_fd
amdgpu_device_initialize
amdgpu_find_bo_by_cpu_mapping
amdgpu_get_marketing_name
@@ -66,6 +67,7 @@ amdgpu_query_hw_ip_count
amdgpu_query_hw_ip_info
amdgpu_query_info
amdgpu_query_sensor_info
+amdgpu_query_video_caps_info
amdgpu_read_mm_registers
amdgpu_va_range_alloc
amdgpu_va_range_free
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 188179c9..cde8585c 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -546,6 +546,19 @@ int amdgpu_device_initialize(int fd,
*/
int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
+/**
+ *
+ * /param device_handle - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ *
+ * \return Returns the drm fd used for operations on this
+ * device. This is still owned by the library and hence
+ * should not be closed. Guaranteed to be valid until
+ * #amdgpu_device_deinitialize gets called.
+ *
+*/
+int amdgpu_device_get_fd(amdgpu_device_handle device_handle);
+
/*
* Memory Management
*
@@ -1238,6 +1251,23 @@ int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
unsigned size, void *value);
/**
+ * Query information about video capabilities
+ *
+ * The return sizeof(struct drm_amdgpu_info_video_caps)
+ *
+ * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
+ * \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE)
+ * \param size - \c [in] Size of the returned value.
+ * \param value - \c [out] Pointer to the return value.
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+ unsigned size, void *value);
+
+/**
* Read a set of consecutive memory-mapped registers.
* Not all registers are allowed to be read by userspace.
*
@@ -1263,6 +1293,7 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
*/
#define AMDGPU_VA_RANGE_32_BIT 0x1
#define AMDGPU_VA_RANGE_HIGH 0x2
+#define AMDGPU_VA_RANGE_REPLAYABLE 0x4
/**
* Allocate virtual address range
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 5bdb8fe8..54b1fb90 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -39,14 +39,6 @@
#include "amdgpu_internal.h"
#include "util_math.h"
-static int amdgpu_close_kms_handle(int fd, uint32_t handle)
-{
- struct drm_gem_close args = {};
-
- args.handle = handle;
- return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
-}
-
static int amdgpu_bo_create(amdgpu_device_handle dev,
uint64_t size,
uint32_t handle,
@@ -101,7 +93,7 @@ drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
- amdgpu_close_kms_handle(dev->fd, args.out.handle);
+ drmCloseBufferHandle(dev->fd, args.out.handle);
}
out:
@@ -216,7 +208,7 @@ static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
bo->flink_name = flink.name;
if (bo->dev->flink_fd != bo->dev->fd)
- amdgpu_close_kms_handle(bo->dev->flink_fd, handle);
+ drmCloseBufferHandle(bo->dev->flink_fd, handle);
pthread_mutex_lock(&bo->dev->bo_table_mutex);
r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
@@ -342,8 +334,8 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
close(dma_fd);
if (r)
goto free_bo_handle;
- r = amdgpu_close_kms_handle(dev->flink_fd,
- open_arg.handle);
+ r = drmCloseBufferHandle(dev->flink_fd,
+ open_arg.handle);
if (r)
goto free_bo_handle;
}
@@ -381,12 +373,12 @@ drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
free_bo_handle:
if (flink_name && open_arg.handle)
- amdgpu_close_kms_handle(dev->flink_fd, open_arg.handle);
+ drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
if (bo)
amdgpu_bo_free(bo);
else
- amdgpu_close_kms_handle(dev->fd, handle);
+ drmCloseBufferHandle(dev->fd, handle);
unlock:
pthread_mutex_unlock(&dev->bo_table_mutex);
return r;
@@ -415,7 +407,7 @@ drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
amdgpu_bo_cpu_unmap(bo);
}
- amdgpu_close_kms_handle(dev->fd, bo->handle);
+ drmCloseBufferHandle(dev->fd, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
}
@@ -598,7 +590,7 @@ drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
pthread_mutex_unlock(&dev->bo_table_mutex);
if (r) {
- amdgpu_close_kms_handle(dev->fd, args.handle);
+ drmCloseBufferHandle(dev->fd, args.handle);
}
out:
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index 76b4e5eb..73fd27f6 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -285,6 +285,11 @@ drm_public int amdgpu_device_deinitialize(amdgpu_device_handle dev)
return 0;
}
+drm_public int amdgpu_device_get_fd(amdgpu_device_handle device_handle)
+{
+ return device_handle->fd;
+}
+
drm_public const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
{
return dev->marketing_name;
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
index 777087f2..9f8695ce 100644
--- a/amdgpu/amdgpu_gpu_info.c
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -331,3 +331,18 @@ drm_public int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned senso
return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
sizeof(struct drm_amdgpu_info));
}
+
+drm_public int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type,
+ unsigned size, void *value)
+{
+ struct drm_amdgpu_info request;
+
+ memset(&request, 0, sizeof(request));
+ request.return_pointer = (uintptr_t)value;
+ request.return_size = size;
+ request.query = AMDGPU_INFO_VIDEO_CAPS;
+ request.sensor_info.type = cap_type;
+
+ return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
+ sizeof(struct drm_amdgpu_info));
+}
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index d25d4216..077a9fc8 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -69,65 +69,99 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
pthread_mutex_destroy(&mgr->bo_va_mutex);
}
-static drm_private uint64_t
+static drm_private int
+amdgpu_vamgr_subtract_hole(struct amdgpu_bo_va_hole *hole, uint64_t start_va,
+ uint64_t end_va)
+{
+ if (start_va > hole->offset && end_va - hole->offset < hole->size) {
+ struct amdgpu_bo_va_hole *n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+ if (!n)
+ return -ENOMEM;
+
+ n->size = start_va - hole->offset;
+ n->offset = hole->offset;
+ list_add(&n->list, &hole->list);
+
+ hole->size -= (end_va - hole->offset);
+ hole->offset = end_va;
+ } else if (start_va > hole->offset) {
+ hole->size = start_va - hole->offset;
+ } else if (end_va - hole->offset < hole->size) {
+ hole->size -= (end_va - hole->offset);
+ hole->offset = end_va;
+ } else {
+ list_del(&hole->list);
+ free(hole);
+ }
+
+ return 0;
+}
+
+static drm_private int
amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
- uint64_t alignment, uint64_t base_required)
+ uint64_t alignment, uint64_t base_required,
+ bool search_from_top, uint64_t *va_out)
{
struct amdgpu_bo_va_hole *hole, *n;
- uint64_t offset = 0, waste = 0;
+ uint64_t offset = 0;
+ int ret;
alignment = MAX2(alignment, mgr->va_alignment);
size = ALIGN(size, mgr->va_alignment);
if (base_required % alignment)
- return AMDGPU_INVALID_VA_ADDRESS;
+ return -EINVAL;
pthread_mutex_lock(&mgr->bo_va_mutex);
- LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
- if (base_required) {
- if (hole->offset > base_required ||
- (hole->offset + hole->size) < (base_required + size))
- continue;
- waste = base_required - hole->offset;
- offset = base_required;
- } else {
- offset = hole->offset;
- waste = offset % alignment;
- waste = waste ? alignment - waste : 0;
- offset += waste;
- if (offset >= (hole->offset + hole->size)) {
- continue;
+ if (!search_from_top) {
+ LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
+ if (base_required) {
+ if (hole->offset > base_required ||
+ (hole->offset + hole->size) < (base_required + size))
+ continue;
+ offset = base_required;
+ } else {
+ uint64_t waste = hole->offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ offset = hole->offset + waste;
+ if (offset >= (hole->offset + hole->size) ||
+ size > (hole->offset + hole->size) - offset) {
+ continue;
+ }
}
- }
- if (!waste && hole->size == size) {
- offset = hole->offset;
- list_del(&hole->list);
- free(hole);
+ ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
+ *va_out = offset;
+ return ret;
}
- if ((hole->size - waste) > size) {
- if (waste) {
- n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
- n->size = waste;
- n->offset = hole->offset;
- list_add(&n->list, &hole->list);
+ } else {
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ if (base_required) {
+ if (hole->offset > base_required ||
+ (hole->offset + hole->size) < (base_required + size))
+ continue;
+ offset = base_required;
+ } else {
+ if (size > hole->size)
+ continue;
+
+ offset = hole->offset + hole->size - size;
+ offset -= offset % alignment;
+ if (offset < hole->offset) {
+ continue;
+ }
}
- hole->size -= (size + waste);
- hole->offset += size + waste;
- pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
- }
- if ((hole->size - waste) == size) {
- hole->size = waste;
+
+ ret = amdgpu_vamgr_subtract_hole(hole, offset, offset + size);
pthread_mutex_unlock(&mgr->bo_va_mutex);
- return offset;
+ *va_out = offset;
+ return ret;
}
}
pthread_mutex_unlock(&mgr->bo_va_mutex);
- return AMDGPU_INVALID_VA_ADDRESS;
+ return -ENOMEM;
}
static drm_private void
@@ -196,6 +230,8 @@ drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
uint64_t flags)
{
struct amdgpu_bo_va_mgr *vamgr;
+ bool search_from_top = !!(flags & AMDGPU_VA_RANGE_REPLAYABLE);
+ int ret;
/* Clear the flag when the high VA manager is not initialized */
if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
@@ -216,21 +252,22 @@ drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
size = ALIGN(size, vamgr->va_alignment);
- *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
- va_base_alignment, va_base_required);
+ ret = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required,
+ search_from_top, va_base_allocated);
- if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
- (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+ if (!(flags & AMDGPU_VA_RANGE_32_BIT) && ret) {
/* fallback to 32bit address */
if (flags & AMDGPU_VA_RANGE_HIGH)
vamgr = &dev->vamgr_high_32;
else
vamgr = &dev->vamgr_32;
- *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
- va_base_alignment, va_base_required);
+ ret = amdgpu_vamgr_find_va(vamgr, size,
+ va_base_alignment, va_base_required,
+ search_from_top, va_base_allocated);
}
- if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
+ if (!ret) {
struct amdgpu_va* va;
va = calloc(1, sizeof(struct amdgpu_va));
if(!va){
@@ -243,11 +280,9 @@ drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
va->range = va_range_type;
va->vamgr = vamgr;
*va_range_handle = va;
- } else {
- return -EINVAL;
}
- return 0;
+ return ret;
}
drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
diff --git a/amdgpu/meson.build b/amdgpu/meson.build
index d5c5f397..3301a10e 100644
--- a/amdgpu/meson.build
+++ b/amdgpu/meson.build
@@ -21,7 +21,7 @@
datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
-libdrm_amdgpu = shared_library(
+libdrm_amdgpu = library(
'drm_amdgpu',
[
files(
diff --git a/core-symbols.txt b/core-symbols.txt
index 410054b3..31bbcf8f 100644
--- a/core-symbols.txt
+++ b/core-symbols.txt
@@ -22,6 +22,7 @@ drmAuthMagic
drmAvailable
drmCheckModesettingSupported
drmClose
+drmCloseBufferHandle
drmCloseOnce
drmCommandNone
drmCommandRead
@@ -57,6 +58,7 @@ drmGetContextPrivateMapping
drmGetContextTag
drmGetDevice
drmGetDevice2
+drmGetDeviceFromDevId
drmGetDeviceNameFromFd
drmGetDeviceNameFromFd2
drmGetDevices
@@ -109,6 +111,7 @@ drmModeCrtcSetGamma
drmModeDestroyPropertyBlob
drmModeDetachMode
drmModeDirtyFB
+drmModeFormatModifierBlobIterNext
drmModeFreeConnector
drmModeFreeCrtc
drmModeFreeEncoder
@@ -196,3 +199,5 @@ drmUnmap
drmUnmapBufs
drmUpdateDrawableInfo
drmWaitVBlank
+drmGetFormatModifierName
+drmGetFormatModifierVendor
diff --git a/data/amdgpu.ids b/data/amdgpu.ids
index 93c1c763..0040a38a 100644
--- a/data/amdgpu.ids
+++ b/data/amdgpu.ids
@@ -4,105 +4,106 @@
# device_id, revision_id, product_name <-- single tab after comma
1.0.0
-15DD, C3, AMD Radeon(TM) Vega 3 Graphics
-15DD, CB, AMD Radeon(TM) Vega 3 Graphics
-15DD, CE, AMD Radeon(TM) Vega 3 Graphics
-15DD, D8, AMD Radeon(TM) Vega 3 Graphics
-15DD, CC, AMD Radeon(TM) Vega 6 Graphics
-15DD, D9, AMD Radeon(TM) Vega 6 Graphics
-15DD, C2, AMD Radeon(TM) Vega 8 Graphics
-15DD, C4, AMD Radeon(TM) Vega 8 Graphics
-15DD, C8, AMD Radeon(TM) Vega 8 Graphics
-15DD, CA, AMD Radeon(TM) Vega 8 Graphics
-15DD, D1, AMD Radeon(TM) Vega 8 Graphics
-15DD, D5, AMD Radeon(TM) Vega 8 Graphics
-15DD, D7, AMD Radeon(TM) Vega 8 Graphics
-15DD, C3, AMD Radeon(TM) Vega 10 Graphics
-15DD, D0, AMD Radeon(TM) Vega 10 Graphics
-15DD, C1, AMD Radeon(TM) Vega 11 Graphics
-15DD, C6, AMD Radeon(TM) Vega 11 Graphics
-15DD, C9, AMD Radeon(TM) Vega 11 Graphics
-15DD, D3, AMD Radeon(TM) Vega 11 Graphics
-15DD, D6, AMD Radeon(TM) Vega 11 Graphics
+15DD, C3, AMD Radeon Vega 3 Graphics
+15DD, CB, AMD Radeon Vega 3 Graphics
+15DD, CE, AMD Radeon Vega 3 Graphics
+15DD, D8, AMD Radeon Vega 3 Graphics
+15DD, CC, AMD Radeon Vega 6 Graphics
+15DD, D9, AMD Radeon Vega 6 Graphics
+15DD, C2, AMD Radeon Vega 8 Graphics
+15DD, C4, AMD Radeon Vega 8 Graphics
+15DD, C8, AMD Radeon Vega 8 Graphics
+15DD, CA, AMD Radeon Vega 8 Graphics
+15DD, D1, AMD Radeon Vega 8 Graphics
+15DD, D5, AMD Radeon Vega 8 Graphics
+15DD, D7, AMD Radeon Vega 8 Graphics
+15DD, C3, AMD Radeon Vega 10 Graphics
+15DD, D0, AMD Radeon Vega 10 Graphics
+15DD, C1, AMD Radeon Vega 11 Graphics
+15DD, C6, AMD Radeon Vega 11 Graphics
+15DD, C9, AMD Radeon Vega 11 Graphics
+15DD, D3, AMD Radeon Vega 11 Graphics
+15DD, D6, AMD Radeon Vega 11 Graphics
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
-15D8, 93, AMD Radeon(TM) Vega 1 Graphics
-15D8, C4, AMD Radeon(TM) Vega 3 Graphics
-15D8, C5, AMD Radeon(TM) Vega 3 Graphics
-15D8, CC, AMD Radeon(TM) Vega 3 Graphics
-15D8, CE, AMD Radeon(TM) Vega 3 Graphics
-15D8, CF, AMD Radeon(TM) Vega 3 Graphics
-15D8, D4, AMD Radeon(TM) Vega 3 Graphics
-15D8, DC, AMD Radeon(TM) Vega 3 Graphics
-15D8, DD, AMD Radeon(TM) Vega 3 Graphics
-15D8, DE, AMD Radeon(TM) Vega 3 Graphics
-15D8, DF, AMD Radeon(TM) Vega 3 Graphics
-15D8, E3, AMD Radeon(TM) Vega 3 Graphics
-15D8, E4, AMD Radeon(TM) Vega 3 Graphics
-15D8, A3, AMD Radeon(TM) Vega 6 Graphics
-15D8, B3, AMD Radeon(TM) Vega 6 Graphics
-15D8, C3, AMD Radeon(TM) Vega 6 Graphics
-15D8, D3, AMD Radeon(TM) Vega 6 Graphics
-15D8, A2, AMD Radeon(TM) Vega 8 Graphics
-15D8, B2, AMD Radeon(TM) Vega 8 Graphics
-15D8, C2, AMD Radeon(TM) Vega 8 Graphics
-15D8, C9, AMD Radeon(TM) Vega 8 Graphics
-15D8, CB, AMD Radeon(TM) Vega 8 Graphics
-15D8, D2, AMD Radeon(TM) Vega 8 Graphics
-15D8, D9, AMD Radeon(TM) Vega 8 Graphics
-15D8, DB, AMD Radeon(TM) Vega 8 Graphics
-15D8, A1, AMD Radeon(TM) Vega 10 Graphics
-15D8, B1, AMD Radeon(TM) Vega 10 Graphics
-15D8, C1, AMD Radeon(TM) Vega 10 Graphics
-15D8, D1, AMD Radeon(TM) Vega 10 Graphics
-15D8, C8, AMD Radeon(TM) Vega 11 Graphics
-15D8, CA, AMD Radeon(TM) Vega 11 Graphics
-15D8, D8, AMD Radeon(TM) Vega 11 Graphics
-15D8, DA, AMD Radeon(TM) Vega 11 Graphics
+15D8, 93, AMD Radeon Vega 1 Graphics
+15D8, C4, AMD Radeon Vega 3 Graphics
+15D8, C5, AMD Radeon Vega 3 Graphics
+15D8, CC, AMD Radeon Vega 3 Graphics
+15D8, CE, AMD Radeon Vega 3 Graphics
+15D8, CF, AMD Radeon Vega 3 Graphics
+15D8, D4, AMD Radeon Vega 3 Graphics
+15D8, DC, AMD Radeon Vega 3 Graphics
+15D8, DD, AMD Radeon Vega 3 Graphics
+15D8, DE, AMD Radeon Vega 3 Graphics
+15D8, DF, AMD Radeon Vega 3 Graphics
+15D8, E3, AMD Radeon Vega 3 Graphics
+15D8, E4, AMD Radeon Vega 3 Graphics
+15D8, A3, AMD Radeon Vega 6 Graphics
+15D8, B3, AMD Radeon Vega 6 Graphics
+15D8, C3, AMD Radeon Vega 6 Graphics
+15D8, D3, AMD Radeon Vega 6 Graphics
+15D8, A2, AMD Radeon Vega 8 Graphics
+15D8, B2, AMD Radeon Vega 8 Graphics
+15D8, C2, AMD Radeon Vega 8 Graphics
+15D8, C9, AMD Radeon Vega 8 Graphics
+15D8, CB, AMD Radeon Vega 8 Graphics
+15D8, D2, AMD Radeon Vega 8 Graphics
+15D8, D9, AMD Radeon Vega 8 Graphics
+15D8, DB, AMD Radeon Vega 8 Graphics
+15D8, A1, AMD Radeon Vega 10 Graphics
+15D8, B1, AMD Radeon Vega 10 Graphics
+15D8, C1, AMD Radeon Vega 10 Graphics
+15D8, D1, AMD Radeon Vega 10 Graphics
+15D8, C8, AMD Radeon Vega 11 Graphics
+15D8, CA, AMD Radeon Vega 11 Graphics
+15D8, D8, AMD Radeon Vega 11 Graphics
+15D8, DA, AMD Radeon Vega 11 Graphics
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
-6600, 0, AMD Radeon HD 8600/8700M
-6600, 81, AMD Radeon (TM) R7 M370
-6601, 0, AMD Radeon (TM) HD 8500M/8700M
+163F, AE, AMD Custom GPU 0405
+6600, 0, AMD Radeon HD 8600 / 8700M
+6600, 81, AMD Radeon R7 M370
+6601, 0, AMD Radeon HD 8500M / 8700M
6604, 0, AMD Radeon R7 M265 Series
-6604, 81, AMD Radeon (TM) R7 M350
+6604, 81, AMD Radeon R7 M350
6605, 0, AMD Radeon R7 M260 Series
-6605, 81, AMD Radeon (TM) R7 M340
+6605, 81, AMD Radeon R7 M340
6606, 0, AMD Radeon HD 8790M
-6607, 0, AMD Radeon (TM) HD8530M
+6607, 0, AMD Radeon HD 8530M
6608, 0, AMD FirePro W2100
6610, 0, AMD Radeon HD 8600 Series
-6610, 81, AMD Radeon (TM) R7 350
-6610, 83, AMD Radeon (TM) R5 340
+6610, 81, AMD Radeon R7 350
+6610, 83, AMD Radeon R5 340
6611, 0, AMD Radeon HD 8500 Series
6613, 0, AMD Radeon HD 8500 series
6617, C7, AMD Radeon R7 240 Series
6640, 0, AMD Radeon HD 8950
-6640, 80, AMD Radeon (TM) R9 M380
+6640, 80, AMD Radeon R9 M380
6646, 0, AMD Radeon R9 M280X
-6646, 80, AMD Radeon (TM) R9 M470X
+6646, 80, AMD Radeon R9 M470X
6647, 0, AMD Radeon R9 M270X
-6647, 80, AMD Radeon (TM) R9 M380
+6647, 80, AMD Radeon R9 M380
6649, 0, AMD FirePro W5100
6658, 0, AMD Radeon R7 200 Series
665C, 0, AMD Radeon HD 7700 Series
665D, 0, AMD Radeon R7 200 Series
-665F, 81, AMD Radeon (TM) R7 300 Series
+665F, 81, AMD Radeon R7 300 Series
6660, 0, AMD Radeon HD 8600M Series
-6660, 81, AMD Radeon (TM) R5 M335
-6660, 83, AMD Radeon (TM) R5 M330
+6660, 81, AMD Radeon R5 M335
+6660, 83, AMD Radeon R5 M330
6663, 0, AMD Radeon HD 8500M Series
-6663, 83, AMD Radeon (TM) R5 M320
+6663, 83, AMD Radeon R5 M320
6664, 0, AMD Radeon R5 M200 Series
6665, 0, AMD Radeon R5 M200 Series
-6665, 83, AMD Radeon (TM) R5 M320
+6665, 83, AMD Radeon R5 M320
6667, 0, AMD Radeon R5 M200 Series
666F, 0, AMD Radeon HD 8500M
-66A1, 06, AMD Radeon (TM) Pro VII
+66A1, 06, AMD Radeon Pro VII
66AF, C1, AMD Radeon VII
6780, 0, ATI FirePro V (FireGL V) Graphics Adapter
678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
@@ -113,146 +114,148 @@
67A0, 0, AMD Radeon FirePro W9100
67A1, 0, AMD Radeon FirePro W8100
67B0, 0, AMD Radeon R9 200 Series
-67B0, 80, AMD Radeon (TM) R9 390 Series
+67B0, 80, AMD Radeon R9 390 Series
67B1, 0, AMD Radeon R9 200 Series
-67B1, 80, AMD Radeon (TM) R9 390 Series
+67B1, 80, AMD Radeon R9 390 Series
67B9, 0, AMD Radeon R9 200 Series
-67DF, C1, Radeon RX 580 Series
-67DF, C2, Radeon RX 570 Series
-67DF, C3, Radeon RX 580 Series
-67DF, C4, AMD Radeon (TM) RX 480 Graphics
-67DF, C5, AMD Radeon (TM) RX 470 Graphics
-67DF, C6, Radeon RX 570 Series
-67DF, C7, AMD Radeon (TM) RX 480 Graphics
-67DF, CF, AMD Radeon (TM) RX 470 Graphics
-67DF, D7, Radeon(TM) RX 470 Graphics
-67DF, E0, Radeon RX 470 Series
-67DF, E1, Radeon RX 590 Series
-67DF, E3, Radeon RX Series
-67DF, E7, Radeon RX 580 Series
-67DF, EF, Radeon RX 570 Series
-67DF, F7, Radeon RX P30PH
-67C2, 01, AMD Radeon (TM) Pro V7350x2
-67C2, 02, AMD Radeon (TM) Pro V7300X
-67C4, 00, AMD Radeon (TM) Pro WX 7100 Graphics
-67C7, 00, AMD Radeon (TM) Pro WX 5100 Graphics
-67C0, 00, AMD Radeon (TM) Pro WX 7100 Graphics
-67D0, 01, AMD Radeon (TM) Pro V7350x2
-67D0, 02, AMD Radeon (TM) Pro V7300X
-67E0, 00, AMD Radeon (TM) Pro WX Series
-67E3, 00, AMD Radeon (TM) Pro WX 4100
-67E8, 00, AMD Radeon (TM) Pro WX Series
-67E8, 01, AMD Radeon (TM) Pro WX Series
-67E8, 80, AMD Radeon (TM) E9260 Graphics
-67EB, 00, AMD Radeon (TM) Pro V5300X
-67EF, C0, AMD Radeon (TM) RX Graphics
-67EF, C1, AMD Radeon (TM) RX 460 Graphics
-67EF, C3, Radeon RX Series
-67EF, C5, AMD Radeon (TM) RX 460 Graphics
-67EF, C7, AMD Radeon (TM) RX Graphics
-67EF, CF, AMD Radeon (TM) RX 460 Graphics
-67EF, E2, RX 560X
-67EF, E0, Radeon RX 560 Series
-67EF, E1, Radeon RX Series
-67EF, E3, Radeon RX Series
-67EF, E5, Radeon RX 560 Series
-67EF, EF, AMD Radeon (TM) RX Graphics
-67EF, FF, Radeon(TM) RX 460 Graphics
-67FF, C0, AMD Radeon (TM) RX Graphics
-67FF, C1, AMD Radeon (TM) RX Graphics
-67FF, CF, Radeon RX 560 Series
-67FF, EF, Radeon RX 560 Series
-67FF, FF, Radeon RX 550 Series
+67DF, C1, AMD Radeon RX 580 Series
+67DF, C2, AMD Radeon RX 570 Series
+67DF, C3, AMD Radeon RX 580 Series
+67DF, C4, AMD Radeon RX 480 Graphics
+67DF, C5, AMD Radeon RX 470 Graphics
+67DF, C6, AMD Radeon RX 570 Series
+67DF, C7, AMD Radeon RX 480 Graphics
+67DF, CF, AMD Radeon RX 470 Graphics
+67DF, D7, AMD Radeon RX 470 Graphics
+67DF, E0, AMD Radeon RX 470 Series
+67DF, E1, AMD Radeon RX 590 Series
+67DF, E3, AMD Radeon RX Series
+67DF, E7, AMD Radeon RX 580 Series
+67DF, EF, AMD Radeon RX 570 Series
+67DF, F7, AMD Radeon RX P30PH
+67C2, 01, AMD Radeon Pro V7350x2
+67C2, 02, AMD Radeon Pro V7300X
+67C4, 00, AMD Radeon Pro WX 7100 Graphics
+67C4, 80, AMD Radeon E9560 / E9565 Graphics
+67C7, 00, AMD Radeon Pro WX 5100 Graphics
+67C7, 80, AMD Radeon E9390 Graphics
+67C0, 00, AMD Radeon Pro WX 7100 Graphics
+67D0, 01, AMD Radeon Pro V7350x2
+67D0, 02, AMD Radeon Pro V7300X
+67E0, 00, AMD Radeon Pro WX Series
+67E3, 00, AMD Radeon Pro WX 4100
+67E8, 00, AMD Radeon Pro WX Series
+67E8, 01, AMD Radeon Pro WX Series
+67E8, 80, AMD Radeon E9260 Graphics
+67EB, 00, AMD Radeon Pro V5300X
+67EF, C0, AMD Radeon RX Graphics
+67EF, C1, AMD Radeon RX 460 Graphics
+67EF, C3, AMD Radeon RX Series
+67EF, C5, AMD Radeon RX 460 Graphics
+67EF, C7, AMD Radeon RX Graphics
+67EF, CF, AMD Radeon RX 460 Graphics
+67EF, E2, AMD Radeon RX 560X
+67EF, E0, AMD Radeon RX 560 Series
+67EF, E1, AMD Radeon RX Series
+67EF, E3, AMD Radeon RX Series
+67EF, E5, AMD Radeon RX 560 Series
+67EF, EF, AMD Radeon RX Graphics
+67EF, FF, AMD Radeon RX 460 Graphics
+67FF, C0, AMD Radeon RX Graphics
+67FF, C1, AMD Radeon RX Graphics
+67FF, CF, AMD Radeon RX 560 Series
+67FF, EF, AMD Radeon RX 560 Series
+67FF, FF, AMD Radeon RX 550 Series
6800, 0, AMD Radeon HD 7970M
-6801, 0, AMD Radeon(TM) HD8970M
+6801, 0, AMD Radeon HD 8970M
6808, 0, ATI FirePro V(FireGL V) Graphics Adapter
6809, 0, ATI FirePro V(FireGL V) Graphics Adapter
-6810, 0, AMD Radeon(TM) HD 8800 Series
-6810, 81, AMD Radeon (TM) R7 370 Series
-6811, 0, AMD Radeon(TM) HD8800 Series
-6811, 81, AMD Radeon (TM) R7 300 Series
+6810, 0, AMD Radeon HD 8800 Series
+6810, 81, AMD Radeon R7 370 Series
+6811, 0, AMD Radeon HD 8800 Series
+6811, 81, AMD Radeon R7 300 Series
6818, 0, AMD Radeon HD 7800 Series
6819, 0, AMD Radeon HD 7800 Series
6820, 0, AMD Radeon HD 8800M Series
-6820, 81, AMD Radeon (TM) R9 M375
-6820, 83, AMD Radeon (TM) R9 M375X
+6820, 81, AMD Radeon R9 M375
+6820, 83, AMD Radeon R9 M375X
6821, 0, AMD Radeon HD 8800M Series
-6821, 87, AMD Radeon (TM) R7 M380
-6821, 83, AMD Radeon R9 (TM) M370X
+6821, 87, AMD Radeon R7 M380
+6821, 83, AMD Radeon R9 M370X
6822, 0, AMD Radeon E8860
6823, 0, AMD Radeon HD 8800M Series
6825, 0, AMD Radeon HD 7800M Series
6827, 0, AMD Radeon HD 7800M Series
6828, 0, ATI FirePro V(FireGL V) Graphics Adapter
682B, 0, AMD Radeon HD 8800M Series
-682B, 87, AMD Radeon (TM) R9 M360
+682B, 87, AMD Radeon R9 M360
682C, 0, AMD FirePro W4100
682D, 0, AMD Radeon HD 7700M Series
682F, 0, AMD Radeon HD 7700M Series
6835, 0, AMD Radeon R7 Series / HD 9000 Series
-6837, 0, AMD Radeon HD7700 Series
+6837, 0, AMD Radeon HD 7700 Series
683D, 0, AMD Radeon HD 7700 Series
683F, 0, AMD Radeon HD 7700 Series
-6860, 00, Radeon Instinct MI25
-6860, 01, Radeon Instinct MI25
-6860, 02, Radeon Instinct MI25
-6860, 03, Radeon Pro V340
-6860, 04, Radeon Instinct MI25x2
-6860, 07, Radeon (TM) Pro V320
-6861, 00, Radeon Pro WX 9100
-6862, 00, Radeon Pro SSG
-6863, 00, Radeon Vega Frontier Edition
-6864, 03, Radeon Pro V340
-6864, 04, Instinct MI25x2
-6868, 00, Radeon (TM) PRO WX 8200
-686C, 00, Radeon Instinct MI25 MxGPU
-686C, 01, Radeon Instinct MI25 MxGPU
-686C, 02, Radeon Instinct MI25 MxGPU
-686C, 03, Radeon Pro V340 MxGPU
-686C, 04, Radeon Instinct MI25x2 MxGPU
-686C, 05, Radeon Pro V340L MxGPU
-686C, 06, Radeon Instinct MI25 MxGPU
-687F, C0, Radeon RX Vega
-687F, C1, Radeon RX Vega
-687F, C3, Radeon RX Vega
+6860, 00, AMD Radeon Instinct MI25
+6860, 01, AMD Radeon Instinct MI25
+6860, 02, AMD Radeon Instinct MI25
+6860, 03, AMD Radeon Pro V340
+6860, 04, AMD Radeon Instinct MI25x2
+6860, 07, AMD Radeon Pro V320
+6861, 00, AMD Radeon Pro WX 9100
+6862, 00, AMD Radeon Pro SSG
+6863, 00, AMD Radeon Vega Frontier Edition
+6864, 03, AMD Radeon Pro V340
+6864, 04, AMD Radeon Instinct MI25x2
+6868, 00, AMD Radeon Pro WX 8200
+686C, 00, AMD Radeon Instinct MI25 MxGPU
+686C, 01, AMD Radeon Instinct MI25 MxGPU
+686C, 02, AMD Radeon Instinct MI25 MxGPU
+686C, 03, AMD Radeon Pro V340 MxGPU
+686C, 04, AMD Radeon Instinct MI25x2 MxGPU
+686C, 05, AMD Radeon Pro V340L MxGPU
+686C, 06, AMD Radeon Instinct MI25 MxGPU
+687F, C0, AMD Radeon RX Vega
+687F, C1, AMD Radeon RX Vega
+687F, C3, AMD Radeon RX Vega
6900, 0, AMD Radeon R7 M260
-6900, 81, AMD Radeon (TM) R7 M360
-6900, 83, AMD Radeon (TM) R7 M340
+6900, 81, AMD Radeon R7 M360
+6900, 83, AMD Radeon R7 M340
6901, 0, AMD Radeon R5 M255
6907, 0, AMD Radeon R5 M255
-6907, 87, AMD Radeon (TM) R5 M315
-6920, 0, AMD RADEON R9 M395X
-6920, 1, AMD RADEON R9 M390X
+6907, 87, AMD Radeon R5 M315
+6920, 0, AMD Radeon R9 M395X
+6920, 1, AMD Radeon R9 M390X
6921, 0, AMD Radeon R9 M295X
6929, 0, AMD FirePro S7150
692B, 0, AMD FirePro W7100
6938, 0, AMD Radeon R9 200 Series
6938, F0, AMD Radeon R9 200 Series
-6938, F1, AMD Radeon (TM) R9 380 Series
+6938, F1, AMD Radeon R9 380 Series
6939, F0, AMD Radeon R9 200 Series
6939, 0, AMD Radeon R9 200 Series
-6939, F1, AMD Radeon (TM) R9 380 Series
-6980, 00, Radeon Pro WX3100
-6981, 00, AMD Radeon (TM) Pro WX 3200 Series
-6981, 01, AMD Radeon (TM) Pro WX 3200 Series
-6981, 10, AMD Radeon (TM) Pro WX 3200 Series
-6985, 00, AMD Radeon Pro WX3100
+6939, F1, AMD Radeon R9 380 Series
+6980, 00, AMD Radeon Pro WX 3100
+6981, 00, AMD Radeon Pro WX 3200 Series
+6981, 01, AMD Radeon Pro WX 3200 Series
+6981, 10, AMD Radeon Pro WX 3200 Series
+6985, 00, AMD Radeon Pro WX 3100
6987, 80, AMD Embedded Radeon E9171
-6987, C0, Radeon 550X Series
+6987, C0, AMD Radeon 550X Series
6987, C1, AMD Radeon RX 640
-6987, C3, Radeon 540X Series
-6995, 00, AMD Radeon Pro WX2100
-6997, 00, Radeon Pro WX2100
+6987, C3, AMD Radeon 540X Series
+6995, 00, AMD Radeon Pro WX 2100
+6997, 00, AMD Radeon Pro WX 2100
699F, 81, AMD Embedded Radeon E9170 Series
-699F, C0, Radeon 500 Series
-699F, C1, Radeon 540 Series
-699F, C3, Radeon 500 Series
-699F, C7, Radeon RX550/550 Series
-7300, C1, AMD FirePro (TM) S9300 x2
-7300, C8, AMD Radeon (TM) R9 Fury Series
-7300, C9, Radeon (TM) Pro Duo
-7300, CB, AMD Radeon (TM) R9 Fury Series
-7300, CA, AMD Radeon (TM) R9 Fury Series
+699F, C0, AMD Radeon 500 Series
+699F, C1, AMD Radeon 540 Series
+699F, C3, AMD Radeon 500 Series
+699F, C7, AMD Radeon RX 550 / 550 Series
+7300, C1, AMD FirePro S9300 x2
+7300, C8, AMD Radeon R9 Fury Series
+7300, C9, AMD Radeon Pro Duo
+7300, CB, AMD Radeon R9 Fury Series
+7300, CA, AMD Radeon R9 Fury Series
7312, 00, AMD Radeon Pro W5700
731E, C6, AMD Radeon RX 5700XTB
731E, C7, AMD Radeon RX 5700B
@@ -264,16 +267,26 @@
731F, C5, AMD Radeon RX 5700 XT
731F, CA, AMD Radeon RX 5600 XT
731F, CB, AMD Radeon RX 5600 OEM
-7340, C1, Radeon RX 5500M
-7340, C5, Radeon RX 5500 XT
-7340, C7, Radeon RX 5500
+7340, C1, AMD Radeon RX 5500M
+7340, C5, AMD Radeon RX 5500 XT
+7340, C7, AMD Radeon RX 5500
7340, C9, AMD Radeon RX 5500XTB
-7340, CF, Radeon RX 5300
+7340, CF, AMD Radeon RX 5300
7341, 00, AMD Radeon Pro W5500
7347, 00, AMD Radeon Pro W5500M
+73A3, 00, AMD Radeon Pro W6800
+73AF, C0, AMD Radeon RX 6900 XT
73BF, C0, AMD Radeon RX 6900 XT
73BF, C1, AMD Radeon RX 6800 XT
73BF, C3, AMD Radeon RX 6800
+73DF, C1, AMD Radeon RX 6700 XT
+73DF, C3, AMD Radeon RX 6800M
+73DF, C5, AMD Radeon RX 6700 XT
+73DF, CF, AMD Radeon RX 6700M
+73E1, 00, AMD Radeon Pro W6600M
+73E3, 00, AMD Radeon Pro W6600
+73FF, C1, AMD Radeon RX 6600 XT
+73FF, C3, AMD Radeon RX 6600M
9874, C4, AMD Radeon R7 Graphics
9874, C5, AMD Radeon R6 Graphics
9874, C6, AMD Radeon R6 Graphics
diff --git a/etnaviv/etnaviv_bo.c b/etnaviv/etnaviv_bo.c
index 43ce6b4e..27123e67 100644
--- a/etnaviv/etnaviv_bo.c
+++ b/etnaviv/etnaviv_bo.c
@@ -48,12 +48,8 @@ drm_private void bo_del(struct etna_bo *bo)
drmHashDelete(bo->dev->name_table, bo->name);
if (bo->handle) {
- struct drm_gem_close req = {
- .handle = bo->handle,
- };
-
drmHashDelete(bo->dev->handle_table, bo->handle);
- drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(bo->dev->fd, bo->handle);
}
free(bo);
@@ -82,12 +78,7 @@ static struct etna_bo *bo_from_handle(struct etna_device *dev,
struct etna_bo *bo = calloc(sizeof(*bo), 1);
if (!bo) {
- struct drm_gem_close req = {
- .handle = handle,
- };
-
- drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
-
+ drmCloseBufferHandle(dev->fd, handle);
return NULL;
}
diff --git a/etnaviv/meson.build b/etnaviv/meson.build
index 6040cf63..8b82ed07 100644
--- a/etnaviv/meson.build
+++ b/etnaviv/meson.build
@@ -19,7 +19,7 @@
# SOFTWARE.
-libdrm_etnaviv = shared_library(
+libdrm_etnaviv = library(
'drm_etnaviv',
[
files(
diff --git a/exynos/exynos_drm.c b/exynos/exynos_drm.c
index b008ad73..3e322a17 100644
--- a/exynos/exynos_drm.c
+++ b/exynos/exynos_drm.c
@@ -176,11 +176,7 @@ drm_public void exynos_bo_destroy(struct exynos_bo *bo)
munmap(bo->vaddr, bo->size);
if (bo->handle) {
- struct drm_gem_close req = {
- .handle = bo->handle,
- };
-
- drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(bo->dev->fd, bo->handle);
}
free(bo);
diff --git a/exynos/meson.build b/exynos/meson.build
index 40d66fc1..7d1edfea 100644
--- a/exynos/meson.build
+++ b/exynos/meson.build
@@ -18,7 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-libdrm_exynos = shared_library(
+libdrm_exynos = library(
'drm_exynos',
[files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
c_args : libdrm_c_args,
diff --git a/freedreno/freedreno_bo.c b/freedreno/freedreno_bo.c
index efc5b71f..3cdc9737 100644
--- a/freedreno/freedreno_bo.c
+++ b/freedreno/freedreno_bo.c
@@ -62,10 +62,7 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
bo = dev->funcs->bo_from_handle(dev, size, handle);
if (!bo) {
- struct drm_gem_close req = {
- .handle = handle,
- };
- drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(dev->fd, handle);
return NULL;
}
bo->dev = fd_device_ref(dev);
@@ -263,13 +260,10 @@ drm_private void bo_del(struct fd_bo *bo)
*/
if (bo->handle) {
- struct drm_gem_close req = {
- .handle = bo->handle,
- };
drmHashDelete(bo->dev->handle_table, bo->handle);
if (bo->name)
drmHashDelete(bo->dev->name_table, bo->name);
- drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(bo->dev->fd, bo->handle);
}
bo->funcs->destroy(bo);
diff --git a/freedreno/meson.build b/freedreno/meson.build
index 63b84fc9..49e66593 100644
--- a/freedreno/meson.build
+++ b/freedreno/meson.build
@@ -39,7 +39,7 @@ if with_freedreno_kgsl
)
endif
-libdrm_freedreno = shared_library(
+libdrm_freedreno = library(
'drm_freedreno',
[files_freedreno, config_file],
c_args : libdrm_c_args,
diff --git a/gen_prebuilt_intermediates.sh b/gen_prebuilt_intermediates.sh
new file mode 100755
index 00000000..aad97ac3
--- /dev/null
+++ b/gen_prebuilt_intermediates.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 gen_table_fourcc.py include/drm/drm_fourcc.h generated_static_table_fourcc.h
diff --git a/gen_table_fourcc.py b/gen_table_fourcc.py
new file mode 100644
index 00000000..4236fd79
--- /dev/null
+++ b/gen_table_fourcc.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+# Copyright 2021 Collabora, Ltd.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial
+# portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# Helper script that reads drm_fourcc.h and writes a static table with the
+# simpler format token modifiers
+
+import sys
+import re
+
+filename = sys.argv[1]
+towrite = sys.argv[2]
+
+fm_re = {
+ 'intel': r'^#define I915_FORMAT_MOD_(\w+)',
+ 'others': r'^#define DRM_FORMAT_MOD_((?:ARM|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
+ 'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
+}
+
+def print_fm_intel(f, f_mod):
+ f.write(' {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
+
+# generic write func
+def print_fm(f, vendor, mod, f_name):
+ f.write(' {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
+
+with open(filename, "r") as f:
+ data = f.read()
+ for k, v in fm_re.items():
+ fm_re[k] = re.findall(v, data, flags=re.M)
+
+with open(towrite, "w") as f:
+ f.write('''\
+/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
+ that script instead of adding here entries manually! */
+static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
+''')
+ f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID_MODIFIER) },\n')
+ f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
+
+ for entry in fm_re['intel']:
+ print_fm_intel(f, entry)
+
+ for entry in fm_re['others']:
+ (vendor, mod) = entry.split('_', 1)
+ if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
+ continue
+ print_fm(f, vendor, mod, mod)
+
+ f.write('''\
+};
+''')
+
+ f.write('''\
+static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
+''')
+
+ for entry in fm_re['vendors']:
+ f.write(" {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
+
+ f.write('''\
+};
+''')
diff --git a/generated_static_table_fourcc.h b/generated_static_table_fourcc.h
new file mode 100644
index 00000000..a8d014d0
--- /dev/null
+++ b/generated_static_table_fourcc.h
@@ -0,0 +1,49 @@
+/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
+ that script instead of adding here entries manually! */
+static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
+ { DRM_MODIFIER_INVALID(NONE, INVALID_MODIFIER) },
+ { DRM_MODIFIER_LINEAR(NONE, LINEAR) },
+ { DRM_MODIFIER_INTEL(X_TILED, X_TILED) },
+ { DRM_MODIFIER_INTEL(Y_TILED, Y_TILED) },
+ { DRM_MODIFIER_INTEL(Yf_TILED, Yf_TILED) },
+ { DRM_MODIFIER_INTEL(Y_TILED_CCS, Y_TILED_CCS) },
+ { DRM_MODIFIER_INTEL(Yf_TILED_CCS, Yf_TILED_CCS) },
+ { DRM_MODIFIER_INTEL(Y_TILED_GEN12_RC_CCS, Y_TILED_GEN12_RC_CCS) },
+ { DRM_MODIFIER_INTEL(Y_TILED_GEN12_MC_CCS, Y_TILED_GEN12_MC_CCS) },
+ { DRM_MODIFIER_INTEL(Y_TILED_GEN12_RC_CCS_CC, Y_TILED_GEN12_RC_CCS_CC) },
+ { DRM_MODIFIER(SAMSUNG, 64_32_TILE, 64_32_TILE) },
+ { DRM_MODIFIER(SAMSUNG, 16_16_TILE, 16_16_TILE) },
+ { DRM_MODIFIER(QCOM, COMPRESSED, COMPRESSED) },
+ { DRM_MODIFIER(VIVANTE, TILED, TILED) },
+ { DRM_MODIFIER(VIVANTE, SUPER_TILED, SUPER_TILED) },
+ { DRM_MODIFIER(VIVANTE, SPLIT_TILED, SPLIT_TILED) },
+ { DRM_MODIFIER(VIVANTE, SPLIT_SUPER_TILED, SPLIT_SUPER_TILED) },
+ { DRM_MODIFIER(NVIDIA, TEGRA_TILED, TEGRA_TILED) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_ONE_GOB, 16BX2_BLOCK_ONE_GOB) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_TWO_GOB, 16BX2_BLOCK_TWO_GOB) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_FOUR_GOB, 16BX2_BLOCK_FOUR_GOB) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_EIGHT_GOB, 16BX2_BLOCK_EIGHT_GOB) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_SIXTEEN_GOB, 16BX2_BLOCK_SIXTEEN_GOB) },
+ { DRM_MODIFIER(NVIDIA, 16BX2_BLOCK_THIRTYTWO_GOB, 16BX2_BLOCK_THIRTYTWO_GOB) },
+ { DRM_MODIFIER(BROADCOM, VC4_T_TILED, VC4_T_TILED) },
+ { DRM_MODIFIER(BROADCOM, SAND32, SAND32) },
+ { DRM_MODIFIER(BROADCOM, SAND64, SAND64) },
+ { DRM_MODIFIER(BROADCOM, SAND128, SAND128) },
+ { DRM_MODIFIER(BROADCOM, SAND256, SAND256) },
+ { DRM_MODIFIER(BROADCOM, UIF, UIF) },
+ { DRM_MODIFIER(ARM, 16X16_BLOCK_U_INTERLEAVED, 16X16_BLOCK_U_INTERLEAVED) },
+ { DRM_MODIFIER(ALLWINNER, TILED, TILED) },
+};
+static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
+ { DRM_FORMAT_MOD_VENDOR_NONE, "NONE" },
+ { DRM_FORMAT_MOD_VENDOR_INTEL, "INTEL" },
+ { DRM_FORMAT_MOD_VENDOR_AMD, "AMD" },
+ { DRM_FORMAT_MOD_VENDOR_NVIDIA, "NVIDIA" },
+ { DRM_FORMAT_MOD_VENDOR_SAMSUNG, "SAMSUNG" },
+ { DRM_FORMAT_MOD_VENDOR_QCOM, "QCOM" },
+ { DRM_FORMAT_MOD_VENDOR_VIVANTE, "VIVANTE" },
+ { DRM_FORMAT_MOD_VENDOR_BROADCOM, "BROADCOM" },
+ { DRM_FORMAT_MOD_VENDOR_ARM, "ARM" },
+ { DRM_FORMAT_MOD_VENDOR_ALLWINNER, "ALLWINNER" },
+ { DRM_FORMAT_MOD_VENDOR_AMLOGIC, "AMLOGIC" },
+};
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 7fb9c09e..0cbd1540 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -116,8 +116,6 @@ extern "C" {
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
/* Flag that the memory should be in VRAM and cleared */
#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
-/* Flag that create shadow bo(GTT) while allocating vram bo */
-#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
/* Flag that BO is always valid in this VM */
@@ -138,6 +136,10 @@ extern "C" {
* accessing it with various hw blocks
*/
#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE (1 << 11)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -755,6 +757,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_SIZE 0x1
/* Subquery id: Query vbios image */
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
+ /* Subquery id: Query vbios info */
+ #define AMDGPU_INFO_VBIOS_INFO 0x3
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
/* Query sensor related information */
@@ -782,6 +786,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
@@ -878,6 +888,10 @@ struct drm_amdgpu_info {
struct {
__u32 type;
} sensor_info;
+
+ struct {
+ __u32 type;
+ } video_cap;
};
};
@@ -938,6 +952,15 @@ struct drm_amdgpu_info_firmware {
__u32 feature;
};
+struct drm_amdgpu_info_vbios {
+ __u8 name[64];
+ __u8 vbios_pn[64];
+ __u32 version;
+ __u32 pad;
+ __u8 vbios_ver_str[32];
+ __u8 date[32];
+};
+
#define AMDGPU_VRAM_TYPE_UNKNOWN 0
#define AMDGPU_VRAM_TYPE_GDDR1 1
#define AMDGPU_VRAM_TYPE_DDR2 2
@@ -1074,6 +1097,30 @@ struct drm_amdgpu_info_vce_clock_table {
__u32 pad;
};
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7
+#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8
+
+struct drm_amdgpu_info_video_codec_info {
+ __u32 valid;
+ __u32 max_width;
+ __u32 max_height;
+ __u32 max_pixels_per_frame;
+ __u32 max_level;
+ __u32 pad;
+};
+
+struct drm_amdgpu_info_video_caps {
+ struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
+};
+
/*
* Supported GPU families
*/
@@ -1087,6 +1134,7 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_RV 142 /* Raven */
#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#define AMDGPU_FAMILY_VGH 144 /* Van Gogh */
+#define AMDGPU_FAMILY_YC 146 /* Yellow Carp */
#if defined(__cplusplus)
}
diff --git a/include/drm/drm.h b/include/drm/drm.h
index c7fd2a35..398c396f 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
* Header for the Direct Rendering Manager
*
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
*/
/*
@@ -79,7 +78,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/**
+/*
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -95,7 +94,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/**
+/*
* Drawable information.
*/
struct drm_drawable_info {
@@ -103,7 +102,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/**
+/*
* Texture region,
*/
struct drm_tex_region {
@@ -114,7 +113,7 @@ struct drm_tex_region {
unsigned int age;
};
-/**
+/*
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -126,7 +125,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/**
+/*
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -143,7 +142,7 @@ struct drm_version {
char *desc; /**< User-space buffer to hold desc */
};
-/**
+/*
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -162,7 +161,7 @@ struct drm_block {
int unused;
};
-/**
+/*
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -177,7 +176,7 @@ struct drm_control {
int irq;
};
-/**
+/*
* Type of memory to map.
*/
enum drm_map_type {
@@ -189,7 +188,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/**
+/*
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -208,7 +207,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/**
+/*
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -225,7 +224,7 @@ struct drm_map {
/* Private data */
};
-/**
+/*
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -257,7 +256,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/**
+/*
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -268,7 +267,7 @@ struct drm_stats {
} data[15];
};
-/**
+/*
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -283,7 +282,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/**
+/*
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -293,7 +292,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/**
+/*
* DMA flags
*
* \warning
@@ -322,7 +321,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/**
+/*
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -345,7 +344,7 @@ struct drm_buf_desc {
*/
};
-/**
+/*
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -353,7 +352,7 @@ struct drm_buf_info {
struct drm_buf_desc *list;
};
-/**
+/*
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -361,7 +360,7 @@ struct drm_buf_free {
int *list;
};
-/**
+/*
* Buffer information
*
* \sa drm_buf_map.
@@ -373,7 +372,7 @@ struct drm_buf_pub {
void *address; /**< Address of buffer */
};
-/**
+/*
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -386,7 +385,7 @@ struct drm_buf_map {
struct drm_buf_pub *list; /**< Buffer information */
};
-/**
+/*
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -411,7 +410,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/**
+/*
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -421,7 +420,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/**
+/*
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -429,14 +428,14 @@ struct drm_ctx_res {
struct drm_ctx *contexts;
};
-/**
+/*
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/**
+/*
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -450,14 +449,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/**
+/*
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/**
+/*
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -499,7 +498,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/**
+/*
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -512,7 +511,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/**
+/*
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -522,7 +521,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/**
+/*
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -531,7 +530,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/**
+/*
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -543,7 +542,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/**
+/*
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -553,7 +552,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/**
+/*
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -574,7 +573,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/**
+/*
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -582,7 +581,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/**
+/*
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -592,14 +591,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -608,7 +607,7 @@ struct drm_gem_flink {
__u32 name;
};
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -620,33 +619,150 @@ struct drm_gem_open {
__u64 size;
};
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a CRTC index in the high bits of
+ * &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -655,9 +771,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -666,13 +785,25 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -680,6 +811,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -687,12 +822,15 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -944,7 +1082,7 @@ extern "C" {
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
-/**
+/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -955,7 +1093,7 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/**
+/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index ed0258c6..957c7be2 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -168,6 +168,13 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
@@ -474,7 +481,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consists out of four 256 byte units, which are also laid
+ * Each group therefore consits out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -528,6 +535,25 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
+ * Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
+ * compression.
+ *
+ * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -874,9 +900,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
/*
* The top 4 bits (out of the 56 bits alloted for specifying vendor specific
- * modifiers) denote the category for modifiers. Currently we have only two
- * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
- * different categories.
+ * modifiers) denote the category for modifiers. Currently we have three
+ * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
+ * sixteen different categories.
*/
#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
@@ -992,6 +1018,109 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AFBC_FORMAT_MOD_USM (1ULL << 12)
/*
+ * Arm Fixed-Rate Compression (AFRC) modifiers
+ *
+ * AFRC is a proprietary fixed rate image compression protocol and format,
+ * designed to provide guaranteed bandwidth and memory footprint
+ * reductions in graphics and media use-cases.
+ *
+ * AFRC buffers consist of one or more planes, with the same components
+ * and meaning as an uncompressed buffer using the same pixel format.
+ *
+ * Within each plane, the pixel/luma/chroma values are grouped into
+ * "coding unit" blocks which are individually compressed to a
+ * fixed size (in bytes). All coding units within a given plane of a buffer
+ * store the same number of values, and have the same compressed size.
+ *
+ * The coding unit size is configurable, allowing different rates of compression.
+ *
+ * The start of each AFRC buffer plane must be aligned to an alignment granule which
+ * depends on the coding unit size.
+ *
+ * Coding Unit Size Plane Alignment
+ * ---------------- ---------------
+ * 16 bytes 1024 bytes
+ * 24 bytes 512 bytes
+ * 32 bytes 2048 bytes
+ *
+ * Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
+ * to a multiple of the paging tile dimensions.
+ * The dimensions of each paging tile depend on whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Layout Paging Tile Width Paging Tile Height
+ * ------ ----------------- ------------------
+ * SCAN 16 coding units 4 coding units
+ * ROT 8 coding units 8 coding units
+ *
+ * The dimensions of each coding unit depend on the number of components
+ * in the compressed plane and whether the buffer is optimised for
+ * scanline (SCAN layout) or rotated (ROT layout) access.
+ *
+ * Number of Components in Plane Layout Coding Unit Width Coding Unit Height
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 SCAN 16 samples 4 samples
+ * Example: 16x4 luma samples in a 'Y' plane
+ * 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 1 ROT 8 samples 8 samples
+ * Example: 8x8 luma samples in a 'Y' plane
+ * 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 2 DONT CARE 8 samples 4 samples
+ * Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
+ * ----------------------------- --------- ----------------- ------------------
+ * 3 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer without alpha
+ * ----------------------------- --------- ----------------- ------------------
+ * 4 DONT CARE 4 samples 4 samples
+ * Example: 4x4 pixels in an RGB buffer with alpha
+ */
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
+
+#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
+
+/*
+ * AFRC coding unit size modifier.
+ *
+ * Indicates the number of bytes used to store each compressed coding unit for
+ * one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
+ * is the same for both Cb and Cr, which may be stored in separate planes.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
+ * each compressed coding unit in the first plane of the buffer. For RGBA buffers
+ * this is the only plane, while for semi-planar and fully-planar YUV buffers,
+ * this corresponds to the luma plane.
+ *
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
+ * each compressed coding unit in the second and third planes in the buffer.
+ * For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
+ *
+ * For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
+ * and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
+ * For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
+ * AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
+ */
+#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
+#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
+#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
+
+#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
+#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
+
+/*
+ * AFRC scanline memory layout.
+ *
+ * Indicates if the buffer uses the scanline-optimised layout
+ * for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
+ * The memory layout is the same for all planes.
+ */
+#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
+
+/*
* Arm 16x16 Block U-Interleaved modifier
*
* This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
@@ -1036,9 +1165,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Not all combinations are valid, and different SoCs may support different
* combinations of layout and options.
*/
-#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_layout_mask 0xff
#define __fourcc_mod_amlogic_options_shift 8
-#define __fourcc_mod_amlogic_options_mask 0xf
+#define __fourcc_mod_amlogic_options_mask 0xff
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
fourcc_mod_code(AMLOGIC, \
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 96416e6d..9b6722d4 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -218,6 +218,27 @@ extern "C" {
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+/**
+ * struct drm_mode_modeinfo - Display mode information.
+ * @clock: pixel clock in kHz
+ * @hdisplay: horizontal display size
+ * @hsync_start: horizontal sync start
+ * @hsync_end: horizontal sync end
+ * @htotal: horizontal total size
+ * @hskew: horizontal skew
+ * @vdisplay: vertical display size
+ * @vsync_start: vertical sync start
+ * @vsync_end: vertical sync end
+ * @vtotal: vertical total size
+ * @vscan: vertical scan
+ * @vrefresh: approximate vertical refresh rate in Hz
+ * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines
+ * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines
+ * @name: string describing the mode resolution
+ *
+ * This is the user-space API display mode information structure. For the
+ * kernel version see struct drm_display_mode.
+ */
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
@@ -367,28 +388,95 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_DPI 17
#define DRM_MODE_CONNECTOR_WRITEBACK 18
#define DRM_MODE_CONNECTOR_SPI 19
+#define DRM_MODE_CONNECTOR_USB 20
+/**
+ * struct drm_mode_get_connector - Get connector metadata.
+ *
+ * User-space can perform a GETCONNECTOR ioctl to retrieve information about a
+ * connector. User-space is expected to retrieve encoders, modes and properties
+ * by performing this ioctl at least twice: the first time to retrieve the
+ * number of elements, the second time to retrieve the elements themselves.
+ *
+ * To retrieve the number of elements, set @count_props and @count_encoders to
+ * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct
+ * drm_mode_modeinfo element.
+ *
+ * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr,
+ * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and
+ * @count_encoders to their capacity.
+ *
+ * Performing the ioctl only twice may be racy: the number of elements may have
+ * changed with a hotplug event in-between the two ioctls. User-space is
+ * expected to retry the last ioctl until the number of elements stabilizes.
+ * The kernel won't fill any array which doesn't have the expected length.
+ *
+ * **Force-probing a connector**
+ *
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
+ *
+ * User-space needs to force-probe connectors to ensure their metadata is
+ * up-to-date at startup and after receiving a hot-plug event. User-space
+ * may perform a forced-probe when the user explicitly requests it. User-space
+ * shouldn't perform a forced-probe in other situations.
+ */
struct drm_mode_get_connector {
-
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
__u64 encoders_ptr;
+ /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */
__u64 modes_ptr;
+ /** @props_ptr: Pointer to ``__u32`` array of property IDs. */
__u64 props_ptr;
+ /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */
__u64 prop_values_ptr;
+ /** @count_modes: Number of modes. */
__u32 count_modes;
+ /** @count_props: Number of properties. */
__u32 count_props;
+ /** @count_encoders: Number of encoders. */
__u32 count_encoders;
- __u32 encoder_id; /**< Current Encoder */
- __u32 connector_id; /**< Id */
+ /** @encoder_id: Object ID of the current encoder. */
+ __u32 encoder_id;
+ /** @connector_id: Object ID of the connector. */
+ __u32 connector_id;
+ /**
+ * @connector_type: Type of the connector.
+ *
+ * See DRM_MODE_CONNECTOR_* defines.
+ */
__u32 connector_type;
+ /**
+ * @connector_type_id: Type-specific connector number.
+ *
+ * This is not an object ID. This is a per-type connector number. Each
+ * (type, type_id) combination is unique across all connectors of a DRM
+ * device.
+ */
__u32 connector_type_id;
+ /**
+ * @connection: Status of the connector.
+ *
+ * See enum drm_connector_status.
+ */
__u32 connection;
- __u32 mm_width; /**< width in millimeters */
- __u32 mm_height; /**< height in millimeters */
+ /** @mm_width: Width of the connected sink in millimeters. */
+ __u32 mm_width;
+ /** @mm_height: Height of the connected sink in millimeters. */
+ __u32 mm_height;
+ /**
+ * @subpixel: Subpixel order of the connected sink.
+ *
+ * See enum subpixel_order.
+ */
__u32 subpixel;
+ /** @pad: Padding, must be zero. */
__u32 pad;
};
@@ -417,7 +505,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
- * without being aware that this could be triggering a lengthy modeset.
+ * witout being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@@ -904,25 +992,24 @@ struct drm_format_modifier {
};
/**
- * struct drm_mode_create_blob - Create New block property
- * @data: Pointer to data to copy.
- * @length: Length of data to copy.
- * @blob_id: new property ID.
+ * struct drm_mode_create_blob - Create New blob property
+ *
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
- /** Pointer to data to copy. */
+ /** @data: Pointer to data to copy. */
__u64 data;
- /** Length of data to copy. */
+ /** @length: Length of data to copy. */
__u32 length;
- /** Return: new property ID. */
+ /** @blob_id: Return: new property ID. */
__u32 blob_id;
};
/**
* struct drm_mode_destroy_blob - Destroy user blob
* @blob_id: blob_id to destroy
+ *
* Destroy a user-created blob property.
*
* User-space can release blobs as soon as they do not need to refer to them by
@@ -937,36 +1024,32 @@ struct drm_mode_destroy_blob {
/**
* struct drm_mode_create_lease - Create lease
- * @object_ids: Pointer to array of object ids.
- * @object_count: Number of object ids.
- * @flags: flags for new FD.
- * @lessee_id: unique identifier for lessee.
- * @fd: file descriptor to new drm_master file.
+ *
* Lease mode resources, creating another drm_master.
*/
struct drm_mode_create_lease {
- /** Pointer to array of object ids (__u32) */
+ /** @object_ids: Pointer to array of object ids (__u32) */
__u64 object_ids;
- /** Number of object ids */
+ /** @object_count: Number of object ids */
__u32 object_count;
- /** flags for new FD (O_CLOEXEC, etc) */
+ /** @flags: flags for new FD (O_CLOEXEC, etc) */
__u32 flags;
- /** Return: unique identifier for lessee. */
+ /** @lessee_id: Return: unique identifier for lessee. */
__u32 lessee_id;
- /** Return: file descriptor to new drm_master file */
+ /** @fd: Return: file descriptor to new drm_master file */
__u32 fd;
};
/**
* struct drm_mode_list_lessees - List lessees
- * @count_lessees: Number of lessees.
- * @pad: pad.
- * @lessees_ptr: Pointer to lessess.
- * List lesses from a drm_master
+ *
+ * List lesses from a drm_master.
*/
struct drm_mode_list_lessees {
- /** Number of lessees.
+ /**
+ * @count_lessees: Number of lessees.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -974,23 +1057,26 @@ struct drm_mode_list_lessees {
* the size and then the data.
*/
__u32 count_lessees;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to lessees.
- * pointer to __u64 array of lessee ids
+ /**
+ * @lessees_ptr: Pointer to lessees.
+ *
+ * Pointer to __u64 array of lessee ids
*/
__u64 lessees_ptr;
};
/**
* struct drm_mode_get_lease - Get Lease
- * @count_objects: Number of leased objects.
- * @pad: pad.
- * @objects_ptr: Pointer to objects.
- * Get leased objects
+ *
+ * Get leased objects.
*/
struct drm_mode_get_lease {
- /** Number of leased objects.
+ /**
+ * @count_objects: Number of leased objects.
+ *
* On input, provides length of the array.
* On output, provides total number. No
* more than the input number will be written
@@ -998,22 +1084,22 @@ struct drm_mode_get_lease {
* the size and then the data.
*/
__u32 count_objects;
+ /** @pad: Padding. */
__u32 pad;
- /** Pointer to objects.
- * pointer to __u32 array of object ids
+ /**
+ * @objects_ptr: Pointer to objects.
+ *
+ * Pointer to __u32 array of object ids.
*/
__u64 objects_ptr;
};
/**
* struct drm_mode_revoke_lease - Revoke lease
- * @lessee_id: Unique ID of lessee.
- * Revoke lease
*/
struct drm_mode_revoke_lease {
- /** Unique ID of lessee
- */
+ /** @lessee_id: Unique ID of lessee */
__u32 lessee_id;
};
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index f06a789f..a13e20cc 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -46,12 +46,16 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -67,10 +71,17 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
struct drm_virtgpu_getparam {
__u64 param;
@@ -100,7 +111,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
struct drm_virtgpu_3d_box {
@@ -117,6 +128,8 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -124,6 +137,8 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -140,6 +155,47 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -175,6 +231,14 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/intel/i915_pciids.h b/intel/i915_pciids.h
index ebd0dd1c..c00ac546 100644
--- a/intel/i915_pciids.h
+++ b/intel/i915_pciids.h
@@ -632,17 +632,38 @@
INTEL_VGA_DEVICE(0x4905, info), \
INTEL_VGA_DEVICE(0x4906, info), \
INTEL_VGA_DEVICE(0x4907, info), \
- INTEL_VGA_DEVICE(0x4908, info)
+ INTEL_VGA_DEVICE(0x4908, info), \
+ INTEL_VGA_DEVICE(0x4909, info)
/* ADL-S */
#define INTEL_ADLS_IDS(info) \
INTEL_VGA_DEVICE(0x4680, info), \
- INTEL_VGA_DEVICE(0x4681, info), \
INTEL_VGA_DEVICE(0x4682, info), \
- INTEL_VGA_DEVICE(0x4683, info), \
+ INTEL_VGA_DEVICE(0x4688, info), \
+ INTEL_VGA_DEVICE(0x468A, info), \
INTEL_VGA_DEVICE(0x4690, info), \
- INTEL_VGA_DEVICE(0x4691, info), \
INTEL_VGA_DEVICE(0x4692, info), \
INTEL_VGA_DEVICE(0x4693, info)
+/* ADL-P */
+#define INTEL_ADLP_IDS(info) \
+ INTEL_VGA_DEVICE(0x46A0, info), \
+ INTEL_VGA_DEVICE(0x46A1, info), \
+ INTEL_VGA_DEVICE(0x46A2, info), \
+ INTEL_VGA_DEVICE(0x46A3, info), \
+ INTEL_VGA_DEVICE(0x46A6, info), \
+ INTEL_VGA_DEVICE(0x46A8, info), \
+ INTEL_VGA_DEVICE(0x46AA, info), \
+ INTEL_VGA_DEVICE(0x462A, info), \
+ INTEL_VGA_DEVICE(0x4626, info), \
+ INTEL_VGA_DEVICE(0x4628, info), \
+ INTEL_VGA_DEVICE(0x46B0, info), \
+ INTEL_VGA_DEVICE(0x46B1, info), \
+ INTEL_VGA_DEVICE(0x46B2, info), \
+ INTEL_VGA_DEVICE(0x46B3, info), \
+ INTEL_VGA_DEVICE(0x46C0, info), \
+ INTEL_VGA_DEVICE(0x46C1, info), \
+ INTEL_VGA_DEVICE(0x46C2, info), \
+ INTEL_VGA_DEVICE(0x46C3, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 023af61f..b28ea74d 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -114,7 +114,6 @@ typedef struct _drm_intel_bufmgr_gem {
pthread_mutex_t lock;
- struct drm_i915_gem_exec_object *exec_objects;
struct drm_i915_gem_exec_object2 *exec2_objects;
drm_intel_bo **exec_bos;
int exec_size;
@@ -480,44 +479,6 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
* access flags.
*/
static void
-drm_intel_add_validate_buffer(drm_intel_bo *bo)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- int index;
-
- if (bo_gem->validate_index != -1)
- return;
-
- /* Extend the array of validation entries as necessary. */
- if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
- int new_size = bufmgr_gem->exec_size * 2;
-
- if (new_size == 0)
- new_size = 5;
-
- bufmgr_gem->exec_objects =
- realloc(bufmgr_gem->exec_objects,
- sizeof(*bufmgr_gem->exec_objects) * new_size);
- bufmgr_gem->exec_bos =
- realloc(bufmgr_gem->exec_bos,
- sizeof(*bufmgr_gem->exec_bos) * new_size);
- bufmgr_gem->exec_size = new_size;
- }
-
- index = bufmgr_gem->exec_count;
- bo_gem->validate_index = index;
- /* Fill in array entry */
- bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
- bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
- bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
- bufmgr_gem->exec_objects[index].alignment = bo->align;
- bufmgr_gem->exec_objects[index].offset = 0;
- bufmgr_gem->exec_bos[index] = bo;
- bufmgr_gem->exec_count++;
-}
-
-static void
drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -1191,7 +1152,6 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- struct drm_gem_close close;
int ret;
DRMLISTDEL(&bo_gem->vma_list);
@@ -1215,11 +1175,9 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
/* Close this object */
- memclear(close);
- close.handle = bo_gem->gem_handle;
- ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ ret = drmCloseBufferHandle(bufmgr_gem->fd, bo_gem->gem_handle);
if (ret != 0) {
- DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+ DBG("drmCloseBufferHandle %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
free(bo);
@@ -1732,6 +1690,82 @@ drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
return drm_intel_gem_bo_unmap(bo);
}
+static bool is_cache_coherent(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_caching arg = {};
+
+ arg.handle = bo_gem->gem_handle;
+ if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_CACHING, &arg))
+ assert(false);
+ return arg.caching != I915_CACHING_NONE;
+}
+
+static void set_domain(drm_intel_bo *bo, uint32_t read, uint32_t write)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain arg = {};
+
+ arg.handle = bo_gem->gem_handle;
+ arg.read_domains = read;
+ arg.write_domain = write;
+ if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &arg))
+ assert(false);
+}
+
+static int mmap_write(drm_intel_bo *bo, unsigned long offset,
+ unsigned long length, const void *buf)
+{
+ void *map = NULL;
+
+ if (!length)
+ return 0;
+
+ if (is_cache_coherent(bo)) {
+ map = drm_intel_gem_bo_map__cpu(bo);
+ if (map)
+ set_domain(bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ }
+ if (!map) {
+ map = drm_intel_gem_bo_map__wc(bo);
+ if (map)
+ set_domain(bo, I915_GEM_DOMAIN_WC, I915_GEM_DOMAIN_WC);
+ }
+
+ assert(map);
+ memcpy((char *)map + offset, buf, length);
+ drm_intel_gem_bo_unmap(bo);
+ return 0;
+}
+
+static int mmap_read(drm_intel_bo *bo, unsigned long offset,
+ unsigned long length, void *buf)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ void *map = NULL;
+
+ if (!length)
+ return 0;
+
+ if (bufmgr_gem->has_llc || is_cache_coherent(bo)) {
+ map = drm_intel_gem_bo_map__cpu(bo);
+ if (map)
+ set_domain(bo, I915_GEM_DOMAIN_CPU, 0);
+ }
+ if (!map) {
+ map = drm_intel_gem_bo_map__wc(bo);
+ if (map)
+ set_domain(bo, I915_GEM_DOMAIN_WC, 0);
+ }
+
+ assert(map);
+ memcpy(buf, (char *)map + offset, length);
+ drm_intel_gem_bo_unmap(bo);
+ return 0;
+}
+
static int
drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
@@ -1752,14 +1786,20 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PWRITE,
&pwrite);
- if (ret != 0) {
+ if (ret)
ret = -errno;
+
+ if (ret != 0 && ret != -EOPNOTSUPP) {
DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
+ return ret;
}
- return ret;
+ if (ret == -EOPNOTSUPP)
+ mmap_write(bo, offset, size, data);
+
+ return 0;
}
static int
@@ -1807,14 +1847,20 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PREAD,
&pread);
- if (ret != 0) {
+ if (ret)
ret = -errno;
+
+ if (ret != 0 && ret != -EOPNOTSUPP) {
DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
+ return ret;
}
- return ret;
+ if (ret == -EOPNOTSUPP)
+ mmap_read(bo, offset, size, data);
+
+ return 0;
}
/** Waits for all GPU rendering with the object to have completed. */
@@ -1914,11 +1960,9 @@ static void
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
- struct drm_gem_close close_bo;
int i, ret;
free(bufmgr_gem->exec2_objects);
- free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
pthread_mutex_destroy(&bufmgr_gem->lock);
@@ -1940,9 +1984,8 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
/* Release userptr bo kept hanging around for optimisation. */
if (bufmgr_gem->userptr_active.ptr) {
- memclear(close_bo);
- close_bo.handle = bufmgr_gem->userptr_active.handle;
- ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+ ret = drmCloseBufferHandle(bufmgr_gem->fd,
+ bufmgr_gem->userptr_active.handle);
free(bufmgr_gem->userptr_active.ptr);
if (ret)
fprintf(stderr,
@@ -2178,31 +2221,6 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
* index values into the validation list.
*/
static void
-drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
-{
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- int i;
-
- if (bo_gem->relocs == NULL)
- return;
-
- for (i = 0; i < bo_gem->reloc_count; i++) {
- drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
-
- if (target_bo == bo)
- continue;
-
- drm_intel_gem_bo_mark_mmaps_incoherent(bo);
-
- /* Continue walking the tree depth-first. */
- drm_intel_gem_bo_process_reloc(target_bo);
-
- /* Add the target to the validate list */
- drm_intel_add_validate_buffer(target_bo);
- }
-}
-
-static void
drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
@@ -2242,30 +2260,6 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
}
}
-
-static void
-drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
-{
- int i;
-
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
- drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-
- /* Update the buffer offset */
- if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
- DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
- bo_gem->gem_handle, bo_gem->name,
- upper_32_bits(bo->offset64),
- lower_32_bits(bo->offset64),
- upper_32_bits(bufmgr_gem->exec_objects[i].offset),
- lower_32_bits(bufmgr_gem->exec_objects[i].offset));
- bo->offset64 = bufmgr_gem->exec_objects[i].offset;
- bo->offset = bufmgr_gem->exec_objects[i].offset;
- }
- }
-}
-
static void
drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
{
@@ -2302,73 +2296,6 @@ drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
}
static int
-drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
- drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
-{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
- struct drm_i915_gem_execbuffer execbuf;
- int ret, i;
-
- if (to_bo_gem(bo)->has_error)
- return -ENOMEM;
-
- pthread_mutex_lock(&bufmgr_gem->lock);
- /* Update indices and set up the validate list. */
- drm_intel_gem_bo_process_reloc(bo);
-
- /* Add the batch buffer to the validation list. There are no
- * relocations pointing to it.
- */
- drm_intel_add_validate_buffer(bo);
-
- memclear(execbuf);
- execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
- execbuf.buffer_count = bufmgr_gem->exec_count;
- execbuf.batch_start_offset = 0;
- execbuf.batch_len = used;
- execbuf.cliprects_ptr = (uintptr_t) cliprects;
- execbuf.num_cliprects = num_cliprects;
- execbuf.DR1 = 0;
- execbuf.DR4 = DR4;
-
- ret = drmIoctl(bufmgr_gem->fd,
- DRM_IOCTL_I915_GEM_EXECBUFFER,
- &execbuf);
- if (ret != 0) {
- ret = -errno;
- if (errno == ENOSPC) {
- DBG("Execbuffer fails to pin. "
- "Estimate: %u. Actual: %u. Available: %u\n",
- drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
- bufmgr_gem->
- exec_count),
- drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
- bufmgr_gem->
- exec_count),
- (unsigned int)bufmgr_gem->gtt_size);
- }
- }
- drm_intel_update_buffer_offsets(bufmgr_gem);
-
- if (bufmgr_gem->bufmgr.debug)
- drm_intel_gem_dump_validation_list(bufmgr_gem);
-
- for (i = 0; i < bufmgr_gem->exec_count; i++) {
- drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
-
- bo_gem->idle = false;
-
- /* Disconnect the buffer from the validate list */
- bo_gem->validate_index = -1;
- bufmgr_gem->exec_bos[i] = NULL;
- }
- bufmgr_gem->exec_count = 0;
- pthread_mutex_unlock(&bufmgr_gem->lock);
-
- return ret;
-}
-
-static int
do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
int in_fence, int *out_fence,
@@ -2845,9 +2772,7 @@ drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-
- if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
- bufmgr_gem->fenced_relocs = true;
+ bufmgr_gem->fenced_relocs = true;
}
/**
@@ -3612,7 +3537,6 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, tmp;
- bool exec2 = false;
pthread_mutex_lock(&bufmgr_list_mutex);
@@ -3686,8 +3610,12 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
gp.param = I915_PARAM_HAS_EXECBUF2;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
- if (!ret)
- exec2 = true;
+ if (ret) {
+ fprintf(stderr, "i915 does not support EXECBUFER2\n");
+ free(bufmgr_gem);
+ bufmgr_gem = NULL;
+ goto exit;
+ }
gp.param = I915_PARAM_HAS_BSD;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
@@ -3790,12 +3718,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
- /* Use the new one if available */
- if (exec2) {
- bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
- bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
- } else
- bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
+ bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
diff --git a/intel/intel_chipset.c b/intel/intel_chipset.c
index fda3de1a..f0da6d81 100644
--- a/intel/intel_chipset.c
+++ b/intel/intel_chipset.c
@@ -35,6 +35,7 @@ static const struct pci_device {
uint16_t gen;
} pciids[] = {
/* Keep ids sorted by gen; latest gen first */
+ INTEL_ADLP_IDS(12),
INTEL_ADLS_IDS(12),
INTEL_RKL_IDS(12),
INTEL_DG1_IDS(12),
diff --git a/intel/intel_decode.c b/intel/intel_decode.c
index e0a51664..be6f7798 100644
--- a/intel/intel_decode.c
+++ b/intel/intel_decode.c
@@ -3815,32 +3815,35 @@ drm_public struct drm_intel_decode *
drm_intel_decode_context_alloc(uint32_t devid)
{
struct drm_intel_decode *ctx;
+ int gen = 0;
- ctx = calloc(1, sizeof(struct drm_intel_decode));
- if (!ctx)
- return NULL;
-
- ctx->devid = devid;
- ctx->out = stdout;
-
- if (intel_get_genx(devid, &ctx->gen))
+ if (intel_get_genx(devid, &gen))
;
else if (IS_GEN8(devid))
- ctx->gen = 8;
+ gen = 8;
else if (IS_GEN7(devid))
- ctx->gen = 7;
+ gen = 7;
else if (IS_GEN6(devid))
- ctx->gen = 6;
+ gen = 6;
else if (IS_GEN5(devid))
- ctx->gen = 5;
+ gen = 5;
else if (IS_GEN4(devid))
- ctx->gen = 4;
+ gen = 4;
else if (IS_9XX(devid))
- ctx->gen = 3;
- else {
- assert(IS_GEN2(devid));
- ctx->gen = 2;
- }
+ gen = 3;
+ else if (IS_GEN2(devid))
+ gen = 2;
+
+ if (!gen)
+ return NULL;
+
+ ctx = calloc(1, sizeof(struct drm_intel_decode));
+ if (!ctx)
+ return NULL;
+
+ ctx->devid = devid;
+ ctx->gen = gen;
+ ctx->out = stdout;
return ctx;
}
diff --git a/intel/meson.build b/intel/meson.build
index 4d3f1ebd..5fa06c28 100644
--- a/intel/meson.build
+++ b/intel/meson.build
@@ -18,7 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-libdrm_intel = shared_library(
+libdrm_intel = library(
'drm_intel',
[
files(
diff --git a/libdrm_macros.h b/libdrm_macros.h
index 0dca8273..5caaa801 100644
--- a/libdrm_macros.h
+++ b/libdrm_macros.h
@@ -45,7 +45,7 @@
#include <sys/mman.h>
-#if defined(ANDROID) && !defined(__LP64__)
+#if defined(__BIONIC__) && !defined(__LP64__)
#include <errno.h> /* for EINVAL */
static inline void *drm_mmap(void *addr, size_t length, int prot, int flags,
diff --git a/libkms/meson.build b/libkms/meson.build
index 216be4df..8d17bb2e 100644
--- a/libkms/meson.build
+++ b/libkms/meson.build
@@ -41,7 +41,7 @@ if with_exynos
libkms_include += include_directories('../exynos')
endif
-libkms = shared_library(
+libkms = library(
'kms',
[files_libkms, config_file],
c_args : libdrm_c_args,
diff --git a/libkms/vmwgfx.c b/libkms/vmwgfx.c
index f0e40be7..1984399c 100644
--- a/libkms/vmwgfx.c
+++ b/libkms/vmwgfx.c
@@ -25,6 +25,9 @@
*
**************************************************************************/
+#ifdef __FreeBSD__
+#define _WANT_KERNEL_ERRNO
+#endif
#include <errno.h>
#include <stdlib.h>
diff --git a/man/drm-memory.7.rst b/man/drm-memory.7.rst
index c272c994..7d09eeb1 100644
--- a/man/drm-memory.7.rst
+++ b/man/drm-memory.7.rst
@@ -169,19 +169,10 @@ rendering, cursors and CPU-access. See the libgbm library for more information
or look at the driver-dependent man-pages (for example **drm-intel**\ (7) or
**drm-radeon**\ (7)).
-GEM-buffers can be closed with the ``DRM_IOCTL_GEM_CLOSE`` ioctl. It takes as
-argument a structure of type ``struct drm_gem_close``:
-
-::
-
- struct drm_gem_close {
- __u32 handle;
- __u32 pad;
- };
-
-The *handle* field is the GEM-handle to be closed. The *pad* field is unused
-padding. It must be zeroed. After this call the GEM handle cannot be used by
-this process anymore and may be reused for new GEM objects by the GEM API.
+GEM-buffers can be closed with **drmCloseBufferHandle**\ (3). It takes as
+argument the GEM-handle to be closed. After this call the GEM handle cannot be
+used by this process anymore and may be reused for new GEM objects by the GEM
+API.
If you want to share GEM-objects between different processes, you can create a
name for them and pass this name to other processes which can then open this
diff --git a/meson.build b/meson.build
index 07d2e086..5824da8a 100644
--- a/meson.build
+++ b/meson.build
@@ -21,9 +21,9 @@
project(
'libdrm',
['c'],
- version : '2.4.104',
+ version : '2.4.109',
license : 'MIT',
- meson_version : '>= 0.43',
+ meson_version : '>= 0.46',
default_options : ['buildtype=debugoptimized', 'c_std=gnu99'],
)
@@ -44,6 +44,8 @@ dep_threads = dependency('threads')
cc = meson.get_compiler('c')
+android = cc.compiles('''int func() { return __ANDROID__; }''')
+
symbols_check = find_program('symbols-check.py')
prog_nm = find_program('nm')
@@ -51,6 +53,11 @@ prog_nm = find_program('nm')
intel_atomics = false
lib_atomics = false
+python3 = import('python').find_installation()
+format_mod_static_table = custom_target('format_mod_static_table',
+ output : 'generated_static_table_fourcc.h', input: 'include/drm/drm_fourcc.h',
+ command : [python3, files('gen_table_fourcc.py'), '@INPUT@', '@OUTPUT@'])
+
dep_atomic_ops = dependency('atomic_ops', required : false)
if cc.links('''
int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
@@ -166,7 +173,7 @@ endif
with_libkms = false
_libkms = get_option('libkms')
if _libkms != 'false'
- with_libkms = _libkms == 'true' or ['linux', 'freebsd', 'dragonfly'].contains(host_machine.system())
+ with_libkms = _libkms == 'true' or (['linux', 'freebsd', 'dragonfly'].contains(host_machine.system()) and not android)
endif
# Among others FreeBSD does not have a separate dl library.
@@ -294,20 +301,29 @@ add_project_arguments('-include', '@0@'.format(config_file), language : 'c')
inc_root = include_directories('.')
inc_drm = include_directories('include/drm')
-libdrm = shared_library(
- 'drm',
- [files(
- 'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
- 'xf86drmMode.c'
- ),
- config_file,
- ],
- c_args : libdrm_c_args,
- dependencies : [dep_valgrind, dep_rt, dep_m],
- include_directories : inc_drm,
- version : '2.4.0',
- install : true,
-)
+libdrm_files = [files(
+ 'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
+ 'xf86drmMode.c'
+ ),
+ config_file, format_mod_static_table
+]
+
+if android
+ libdrm = library('drm', libdrm_files,
+ c_args : libdrm_c_args,
+ dependencies : [dep_valgrind, dep_rt, dep_m],
+ include_directories : inc_drm,
+ install : true,
+ )
+else
+ libdrm = library('drm', libdrm_files,
+ c_args : libdrm_c_args,
+ dependencies : [dep_valgrind, dep_rt, dep_m],
+ include_directories : inc_drm,
+ install : true,
+ version: '2.4.0'
+ )
+endif
test(
'core-symbols-check',
diff --git a/nouveau/meson.build b/nouveau/meson.build
index 9bd58fca..af45336c 100644
--- a/nouveau/meson.build
+++ b/nouveau/meson.build
@@ -19,7 +19,7 @@
# SOFTWARE.
-libdrm_nouveau = shared_library(
+libdrm_nouveau = library(
'drm_nouveau',
[files( 'nouveau.c', 'pushbuf.c', 'bufctx.c', 'abi16.c'), config_file],
c_args : libdrm_c_args,
diff --git a/nouveau/nouveau.c b/nouveau/nouveau.c
index f18d1426..7b4efded 100644
--- a/nouveau/nouveau.c
+++ b/nouveau/nouveau.c
@@ -46,19 +46,35 @@
#include "nvif/ioctl.h"
#include "nvif/unpack.h"
-#ifdef DEBUG
+drm_private FILE *nouveau_out = NULL;
drm_private uint32_t nouveau_debug = 0;
static void
-debug_init(char *args)
+debug_init(void)
{
- if (args) {
- int n = strtol(args, NULL, 0);
+ static bool once = false;
+ char *debug, *out;
+
+ if (once)
+ return;
+ once = true;
+
+ debug = getenv("NOUVEAU_LIBDRM_DEBUG");
+ if (debug) {
+ int n = strtol(debug, NULL, 0);
if (n >= 0)
nouveau_debug = n;
+
+ }
+
+ nouveau_out = stderr;
+ out = getenv("NOUVEAU_LIBDRM_OUT");
+ if (out) {
+ FILE *fout = fopen(out, "w");
+ if (fout)
+ nouveau_out = fout;
}
}
-#endif
static int
nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
@@ -327,9 +343,7 @@ nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
struct nouveau_drm *drm;
drmVersionPtr ver;
-#ifdef DEBUG
- debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
-#endif
+ debug_init();
if (!(drm = calloc(1, sizeof(*drm))))
return -ENOMEM;
@@ -593,7 +607,6 @@ nouveau_bo_del(struct nouveau_bo *bo)
struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
- struct drm_gem_close req = { .handle = bo->handle };
if (nvbo->head.next) {
pthread_mutex_lock(&nvdev->lock);
@@ -607,11 +620,11 @@ nouveau_bo_del(struct nouveau_bo *bo)
* might cause the bo to be closed accidentally while
* re-importing.
*/
- drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(drm->fd, bo->handle);
}
pthread_mutex_unlock(&nvdev->lock);
} else {
- drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(drm->fd, bo->handle);
}
if (bo->map)
drm_munmap(bo->map, bo->size);
diff --git a/nouveau/private.h b/nouveau/private.h
index 034a958e..b81d4b1e 100644
--- a/nouveau/private.h
+++ b/nouveau/private.h
@@ -1,6 +1,8 @@
#ifndef __NOUVEAU_LIBDRM_PRIVATE_H__
#define __NOUVEAU_LIBDRM_PRIVATE_H__
+#include <stdio.h>
+
#include <libdrm_macros.h>
#include <xf86drm.h>
#include <xf86atomic.h>
@@ -9,18 +11,19 @@
#include "nouveau.h"
-#ifdef DEBUG
+/*
+ * 0x00000001 dump all pushbuffers
+ * 0x00000002 submit pushbuffers synchronously
+ * 0x80000000 if compiled with SIMULATE return -EINVAL for all pb submissions
+ */
drm_private extern uint32_t nouveau_debug;
+drm_private extern FILE *nouveau_out;
#define dbg_on(lvl) (nouveau_debug & (1 << lvl))
#define dbg(lvl, fmt, args...) do { \
if (dbg_on((lvl))) \
- fprintf(stderr, "nouveau: "fmt, ##args); \
+ fprintf(nouveau_out, "nouveau: "fmt, ##args); \
} while(0)
-#else
-#define dbg_on(lvl) (0)
-#define dbg(lvl, fmt, args...)
-#endif
-#define err(fmt, args...) fprintf(stderr, "nouveau: "fmt, ##args)
+#define err(fmt, args...) fprintf(nouveau_out, "nouveau: "fmt, ##args)
struct nouveau_client_kref {
struct drm_nouveau_gem_pushbuf_bo *kref;
diff --git a/nouveau/pushbuf.c b/nouveau/pushbuf.c
index e5f73f0d..5fadd7a9 100644
--- a/nouveau/pushbuf.c
+++ b/nouveau/pushbuf.c
@@ -29,6 +29,7 @@
#include <string.h>
#include <assert.h>
#include <errno.h>
+#include <inttypes.h>
#include <xf86drm.h>
#include <xf86atomic.h>
@@ -274,9 +275,10 @@ pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
kref = krec->buffer;
for (i = 0; i < krec->nr_buffer; i++, kref++) {
- err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
+ bo = (void *)(uintptr_t)kref->user_priv;
+ err("ch%d: buf %08x %08x %08x %08x %08x %p 0x%"PRIx64" 0x%"PRIx64"\n", chid, i,
kref->handle, kref->valid_domains,
- kref->read_domains, kref->write_domains);
+ kref->read_domains, kref->write_domains, bo->map, bo->offset, bo->size);
}
krel = krec->reloc;
@@ -292,11 +294,14 @@ pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
kref = krec->buffer + kpsh->bo_index;
bo = (void *)(unsigned long)kref->user_priv;
bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
- end = bgn + (kpsh->length /4);
+ end = bgn + ((kpsh->length & 0x7fffff) /4);
- err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
+ err("ch%d: psh %s%08x %010llx %010llx\n", chid,
+ bo->map ? "" : "(unmapped) ", kpsh->bo_index,
(unsigned long long)kpsh->offset,
(unsigned long long)(kpsh->offset + kpsh->length));
+ if (!bo->map)
+ continue;
while (bgn < end)
err("\t0x%08x\n", *bgn++);
}
@@ -336,6 +341,8 @@ pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
req.suffix0 = nvpb->suffix0;
req.suffix1 = nvpb->suffix1;
req.vram_available = 0; /* for valgrind */
+ if (dbg_on(1))
+ req.vram_available |= NOUVEAU_GEM_PUSHBUF_SYNC;
req.gart_available = 0;
if (dbg_on(0))
diff --git a/omap/meson.build b/omap/meson.build
index 53330b61..bfd59f05 100644
--- a/omap/meson.build
+++ b/omap/meson.build
@@ -18,7 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-libdrm_omap = shared_library(
+libdrm_omap = library(
'drm_omap',
[files('omap_drm.c'), config_file],
include_directories : [inc_root, inc_drm],
diff --git a/omap/omap_drm.c b/omap/omap_drm.c
index ffacea69..aa273660 100644
--- a/omap/omap_drm.c
+++ b/omap/omap_drm.c
@@ -174,10 +174,7 @@ static struct omap_bo * bo_from_handle(struct omap_device *dev,
{
struct omap_bo *bo = calloc(sizeof(*bo), 1);
if (!bo) {
- struct drm_gem_close req = {
- .handle = handle,
- };
- drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(dev->fd, handle);
return NULL;
}
bo->dev = omap_device_ref(dev);
@@ -365,12 +362,9 @@ drm_public void omap_bo_del(struct omap_bo *bo)
}
if (bo->handle) {
- struct drm_gem_close req = {
- .handle = bo->handle,
- };
pthread_mutex_lock(&table_lock);
drmHashDelete(bo->dev->handle_table, bo->handle);
- drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ drmCloseBufferHandle(bo->dev->fd, bo->handle);
pthread_mutex_unlock(&table_lock);
}
diff --git a/radeon/meson.build b/radeon/meson.build
index ca128329..31fe9cd0 100644
--- a/radeon/meson.build
+++ b/radeon/meson.build
@@ -19,7 +19,7 @@
# SOFTWARE.
-libdrm_radeon = shared_library(
+libdrm_radeon = library(
'drm_radeon',
[
files(
diff --git a/radeon/radeon_bo.h b/radeon/radeon_bo.h
index 37478a0d..6e20c6cb 100644
--- a/radeon/radeon_bo.h
+++ b/radeon/radeon_bo.h
@@ -48,7 +48,6 @@ struct radeon_bo {
uint32_t size;
};
-struct radeon_bo_manager;
void radeon_bo_debug(struct radeon_bo *bo, const char *op);
diff --git a/radeon/radeon_bo_gem.c b/radeon/radeon_bo_gem.c
index 86f7c007..bbe72ce0 100644
--- a/radeon/radeon_bo_gem.c
+++ b/radeon/radeon_bo_gem.c
@@ -125,7 +125,6 @@ static void bo_ref(struct radeon_bo_int *boi)
static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
- struct drm_gem_close args;
if (boi->cref) {
return (struct radeon_bo *)boi;
@@ -134,12 +133,8 @@ static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
drm_munmap(bo_gem->priv_ptr, boi->size);
}
- /* Zero out args to make valgrind happy */
- memset(&args, 0, sizeof(args));
-
/* close object */
- args.handle = boi->handle;
- drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_CLOSE, &args);
+ drmCloseBufferHandle(boi->bom->fd, boi->handle);
memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
free(bo_gem);
return NULL;
diff --git a/tegra/meson.build b/tegra/meson.build
index 88613b9c..edddf72b 100644
--- a/tegra/meson.build
+++ b/tegra/meson.build
@@ -18,7 +18,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-libdrm_tegra = shared_library(
+libdrm_tegra = library(
'drm_tegra',
[files('tegra.c'), config_file],
include_directories : [inc_root, inc_drm],
diff --git a/tegra/tegra.c b/tegra/tegra.c
index cf00a3ca..420b171c 100644
--- a/tegra/tegra.c
+++ b/tegra/tegra.c
@@ -38,15 +38,11 @@
static void drm_tegra_bo_free(struct drm_tegra_bo *bo)
{
struct drm_tegra *drm = bo->drm;
- struct drm_gem_close args;
if (bo->map)
munmap(bo->map, bo->size);
- memset(&args, 0, sizeof(args));
- args.handle = bo->handle;
-
- drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &args);
+ drmCloseBufferHandle(drm->fd, bo->handle);
free(bo);
}
diff --git a/tests/amdgpu/amdgpu_stress.c b/tests/amdgpu/amdgpu_stress.c
new file mode 100644
index 00000000..5c5c88c5
--- /dev/null
+++ b/tests/amdgpu/amdgpu_stress.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include "drm.h"
+#include "xf86drmMode.h"
+#include "xf86drm.h"
+#include "amdgpu.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+
+#define MAX_CARDS_SUPPORTED 4
+#define NUM_BUFFER_OBJECTS 1024
+
+#define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
+ (((sub_op) & 0xFF) << 8) | \
+ (((op) & 0xFF) << 0))
+
+#define SDMA_OPCODE_COPY 1
+# define SDMA_COPY_SUB_OPCODE_LINEAR 0
+
+
+#define SDMA_PACKET_SI(op, b, t, s, cnt) ((((op) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((cnt) & 0xFFFFF) << 0))
+#define SDMA_OPCODE_COPY_SI 3
+
+
+/** Help string for command line parameters */
+static const char usage[] =
+ "Usage: %s [-?h] [-b v|g|vg size] "
+ "[-c from to size count]\n"
+ "where:\n"
+ " b - Allocate a BO in VRAM, GTT or VRAM|GTT of size bytes.\n"
+ " This flag can be used multiple times. The first bo will\n"
+ " have id `1`, then second id `2`, ...\n"
+ " c - Copy size bytes from BO (bo_id1) to BO (bo_id2), count times\n"
+ " h - Display this help\n"
+ "\n"
+ "Sizes can be postfixes with k, m or g for kilo, mega and gigabyte scaling\n";
+
+/** Specified options strings for getopt */
+static const char options[] = "?hb:c:";
+
+/* Open AMD devices.
+ * Returns the fd of the first device it could open.
+ */
+static int amdgpu_open_device(void)
+{
+ drmDevicePtr devices[MAX_CARDS_SUPPORTED];
+ unsigned int i;
+ int drm_count;
+
+ drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
+ if (drm_count < 0) {
+ fprintf(stderr, "drmGetDevices2() returned an error %d\n",
+ drm_count);
+ return drm_count;
+ }
+
+ for (i = 0; i < drm_count; i++) {
+ drmVersionPtr version;
+ int fd;
+
+ /* If this is not PCI device, skip*/
+ if (devices[i]->bustype != DRM_BUS_PCI)
+ continue;
+
+ /* If this is not AMD GPU vender ID, skip*/
+ if (devices[i]->deviceinfo.pci->vendor_id != 0x1002)
+ continue;
+
+ if (!(devices[i]->available_nodes & 1 << DRM_NODE_RENDER))
+ continue;
+
+ fd = open(devices[i]->nodes[DRM_NODE_RENDER], O_RDWR | O_CLOEXEC);
+
+ /* This node is not available. */
+ if (fd < 0) continue;
+
+ version = drmGetVersion(fd);
+ if (!version) {
+ fprintf(stderr,
+ "Warning: Cannot get version for %s."
+ "Error is %s\n",
+ devices[i]->nodes[DRM_NODE_RENDER],
+ strerror(errno));
+ close(fd);
+ continue;
+ }
+
+ if (strcmp(version->name, "amdgpu")) {
+ /* This is not AMDGPU driver, skip.*/
+ drmFreeVersion(version);
+ close(fd);
+ continue;
+ }
+
+ drmFreeVersion(version);
+ drmFreeDevices(devices, drm_count);
+ return fd;
+ }
+
+ return -1;
+}
+
+amdgpu_device_handle device_handle;
+amdgpu_context_handle context_handle;
+
+amdgpu_bo_handle resources[NUM_BUFFER_OBJECTS];
+uint64_t virtual[NUM_BUFFER_OBJECTS];
+unsigned int num_buffers;
+uint32_t *pm4;
+
+int alloc_bo(uint32_t domain, uint64_t size)
+{
+ struct amdgpu_bo_alloc_request request = {};
+ amdgpu_bo_handle bo;
+ amdgpu_va_handle va;
+ uint64_t addr;
+ int r;
+
+ if (num_buffers >= NUM_BUFFER_OBJECTS)
+ return -ENOSPC;
+
+ request.alloc_size = size;
+ request.phys_alignment = 0;
+ request.preferred_heap = domain;
+ request.flags = 0;
+ r = amdgpu_bo_alloc(device_handle, &request, &bo);
+ if (r)
+ return r;
+
+ r = amdgpu_va_range_alloc(device_handle, amdgpu_gpu_va_range_general,
+ size, 0, 0, &addr, &va, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_va_op_raw(device_handle, bo, 0, size, addr,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE, AMDGPU_VA_OP_MAP);
+ if (r)
+ return r;
+
+ resources[num_buffers] = bo;
+ virtual[num_buffers] = addr;
+ fprintf(stdout, "Allocated BO number %u at 0x%lx, domain 0x%x, size %lu\n",
+ num_buffers++, addr, domain, size);
+ return 0;
+}
+
+int submit_ib(uint32_t from, uint32_t to, uint64_t size, uint32_t count)
+{
+ struct amdgpu_cs_request ibs_request;
+ struct amdgpu_cs_fence fence_status;
+ struct amdgpu_cs_ib_info ib_info;
+ uint64_t copied = size, delta;
+ struct timespec start, stop;
+
+ uint64_t src = virtual[from];
+ uint64_t dst = virtual[to];
+ uint32_t expired;
+ int i, r;
+
+ i = 0;
+ while (size) {
+ uint64_t bytes = size < 0x40000 ? size : 0x40000;
+
+ if (device_handle->info.family_id == AMDGPU_FAMILY_SI) {
+ pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0,
+ bytes);
+ pm4[i++] = 0xffffffff & dst;
+ pm4[i++] = 0xffffffff & src;
+ pm4[i++] = (0xffffffff00000000 & dst) >> 32;
+ pm4[i++] = (0xffffffff00000000 & src) >> 32;
+ } else {
+ pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_COPY_SUB_OPCODE_LINEAR,
+ 0);
+ if ( device_handle->info.family_id >= AMDGPU_FAMILY_AI)
+ pm4[i++] = bytes - 1;
+ else
+ pm4[i++] = bytes;
+ pm4[i++] = 0;
+ pm4[i++] = 0xffffffff & src;
+ pm4[i++] = (0xffffffff00000000 & src) >> 32;
+ pm4[i++] = 0xffffffff & dst;
+ pm4[i++] = (0xffffffff00000000 & dst) >> 32;
+ }
+
+ size -= bytes;
+ src += bytes;
+ dst += bytes;
+ }
+
+ memset(&ib_info, 0, sizeof(ib_info));
+ ib_info.ib_mc_address = virtual[0];
+ ib_info.size = i;
+
+ memset(&ibs_request, 0, sizeof(ibs_request));
+ ibs_request.ip_type = AMDGPU_HW_IP_DMA;
+ ibs_request.ring = 0;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.fence_info.handle = NULL;
+
+ r = clock_gettime(CLOCK_MONOTONIC, &start);
+ if (r)
+ return errno;
+
+ r = amdgpu_bo_list_create(device_handle, num_buffers, resources, NULL,
+ &ibs_request.resources);
+ if (r)
+ return r;
+
+ for (i = 0; i < count; ++i) {
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_bo_list_destroy(ibs_request.resources);
+ if (r)
+ return r;
+
+ memset(&fence_status, 0, sizeof(fence_status));
+ fence_status.ip_type = ibs_request.ip_type;
+ fence_status.ip_instance = 0;
+ fence_status.ring = ibs_request.ring;
+ fence_status.context = context_handle;
+ fence_status.fence = ibs_request.seq_no;
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+ if (r)
+ return r;
+
+ r = clock_gettime(CLOCK_MONOTONIC, &stop);
+ if (r)
+ return errno;
+
+ delta = stop.tv_nsec + stop.tv_sec * 1000000000UL;
+ delta -= start.tv_nsec + start.tv_sec * 1000000000UL;
+
+ fprintf(stdout, "Submitted %u IBs to copy from %u(%lx) to %u(%lx) %lu bytes took %lu usec\n",
+ count, from, virtual[from], to, virtual[to], copied, delta / 1000);
+ return 0;
+}
+
+void next_arg(int argc, char **argv, const char *msg)
+{
+ optarg = argv[optind++];
+ if (optind > argc || optarg[0] == '-') {
+ fprintf(stderr, "%s\n", msg);
+ exit(EXIT_FAILURE);
+ }
+}
+
+uint64_t parse_size(void)
+{
+ uint64_t size;
+ char ext[2];
+
+ ext[0] = 0;
+ if (sscanf(optarg, "%li%1[kmgKMG]", &size, ext) < 1) {
+ fprintf(stderr, "Can't parse size arg: %s\n", optarg);
+ exit(EXIT_FAILURE);
+ }
+ switch (ext[0]) {
+ case 'k':
+ case 'K':
+ size *= 1024;
+ break;
+ case 'm':
+ case 'M':
+ size *= 1024 * 1024;
+ break;
+ case 'g':
+ case 'G':
+ size *= 1024 * 1024 * 1024;
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+int main(int argc, char **argv)
+{
+ uint32_t major_version, minor_version;
+ uint32_t domain, from, to, count;
+ uint64_t size;
+ int fd, r, c;
+
+ fd = amdgpu_open_device();
+ if (fd < 0) {
+ perror("Cannot open AMDGPU device");
+ exit(EXIT_FAILURE);
+ }
+
+ r = amdgpu_device_initialize(fd, &major_version, &minor_version, &device_handle);
+ if (r) {
+ fprintf(stderr, "amdgpu_device_initialize returned %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+
+ r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+ if (r) {
+ fprintf(stderr, "amdgpu_cs_ctx_create returned %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc == 1) {
+ fprintf(stderr, usage, argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ r = alloc_bo(AMDGPU_GEM_DOMAIN_GTT, 2ULL * 1024 * 1024);
+ if (r) {
+ fprintf(stderr, "Buffer allocation failed with %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+
+ r = amdgpu_bo_cpu_map(resources[0], (void **)&pm4);
+ if (r) {
+ fprintf(stderr, "Buffer mapping failed with %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+
+ opterr = 0;
+ while ((c = getopt(argc, argv, options)) != -1) {
+ switch (c) {
+ case 'b':
+ if (!strcmp(optarg, "v"))
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ else if (!strcmp(optarg, "g"))
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ else if (!strcmp(optarg, "vg"))
+ domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
+ else {
+ fprintf(stderr, "Invalid domain: %s\n", optarg);
+ exit(EXIT_FAILURE);
+ }
+ next_arg(argc, argv, "Missing buffer size");
+ size = parse_size();
+ if (size < getpagesize()) {
+ fprintf(stderr, "Buffer size to small %lu\n", size);
+ exit(EXIT_FAILURE);
+ }
+ r = alloc_bo(domain, size);
+ if (r) {
+ fprintf(stderr, "Buffer allocation failed with %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case 'c':
+ if (sscanf(optarg, "%u", &from) != 1) {
+ fprintf(stderr, "Can't parse from buffer: %s\n", optarg);
+ exit(EXIT_FAILURE);
+ }
+ next_arg(argc, argv, "Missing to buffer");
+ if (sscanf(optarg, "%u", &to) != 1) {
+ fprintf(stderr, "Can't parse to buffer: %s\n", optarg);
+ exit(EXIT_FAILURE);
+ }
+ next_arg(argc, argv, "Missing size");
+ size = parse_size();
+ next_arg(argc, argv, "Missing count");
+ count = parse_size();
+ r = submit_ib(from, to, size, count);
+ if (r) {
+ fprintf(stderr, "IB submission failed with %d\n", r);
+ exit(EXIT_FAILURE);
+ }
+ break;
+ case '?':
+ case 'h':
+ fprintf(stderr, usage, argv[0]);
+ exit(EXIT_SUCCESS);
+ default:
+ fprintf(stderr, usage, argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
index 8c867678..c4224c91 100644
--- a/tests/amdgpu/amdgpu_test.c
+++ b/tests/amdgpu/amdgpu_test.c
@@ -37,6 +37,18 @@
#include <sys/time.h>
#include <stdarg.h>
#include <stdint.h>
+#ifdef __linux__
+#include <linux/limits.h>
+#elif __FreeBSD__
+/* SPECNAMELEN in FreeBSD is defined here: */
+#include <sys/param.h>
+#endif
+#ifdef MAJOR_IN_MKDEV
+#include <sys/mkdev.h>
+#endif
+#ifdef MAJOR_IN_SYSMACROS
+#include <sys/sysmacros.h>
+#endif
#include "drm.h"
#include "xf86drmMode.h"
@@ -59,6 +71,7 @@
#define RAS_TESTS_STR "RAS Tests"
#define SYNCOBJ_TIMELINE_TESTS_STR "SYNCOBJ TIMELINE Tests"
#define SECURITY_TESTS_STR "Security Tests"
+#define HOTUNPLUG_TESTS_STR "Hotunplug Tests"
/**
* Open handles for amdgpu devices
@@ -137,6 +150,12 @@ static CU_SuiteInfo suites[] = {
.pCleanupFunc = suite_security_tests_clean,
.pTests = security_tests,
},
+ {
+ .pName = HOTUNPLUG_TESTS_STR,
+ .pInitFunc = suite_hotunplug_tests_init,
+ .pCleanupFunc = suite_hotunplug_tests_clean,
+ .pTests = hotunplug_tests,
+ },
CU_SUITE_INFO_NULL,
};
@@ -198,6 +217,10 @@ static Suites_Active_Status suites_active_stat[] = {
.pName = SECURITY_TESTS_STR,
.pActive = suite_security_tests_enable,
},
+ {
+ .pName = HOTUNPLUG_TESTS_STR,
+ .pActive = suite_hotunplug_tests_enable,
+ },
};
@@ -339,12 +362,13 @@ static int amdgpu_open_devices(int open_render_node)
/* Close AMD devices.
*/
-static void amdgpu_close_devices()
+void amdgpu_close_devices()
{
int i;
for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
- if (drm_amdgpu[i] >=0)
+ if (drm_amdgpu[i] >=0) {
close(drm_amdgpu[i]);
+ }
}
/* Print AMD devices information */
@@ -430,7 +454,8 @@ static void amdgpu_disable_suites()
{
amdgpu_device_handle device_handle;
uint32_t major_version, minor_version, family_id;
- int i;
+ drmDevicePtr devices[MAX_CARDS_SUPPORTED];
+ int i, drm_count;
int size = sizeof(suites_active_stat) / sizeof(suites_active_stat[0]);
if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
@@ -442,6 +467,8 @@ static void amdgpu_disable_suites()
if (amdgpu_device_deinitialize(device_handle))
return;
+ drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
+
/* Set active status for suites based on their policies */
for (i = 0; i < size; ++i)
if (amdgpu_set_suite_active(suites_active_stat[i].pName,
@@ -496,9 +523,6 @@ static void amdgpu_disable_suites()
"gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
- if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))
- fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
-
if (amdgpu_set_test_active(BASIC_TESTS_STR, "bo eviction Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
@@ -524,6 +548,84 @@ static void amdgpu_disable_suites()
//if (family_id < AMDGPU_FAMILY_AI || family_id > AMDGPU_FAMILY_RV)
if (amdgpu_set_test_active(BASIC_TESTS_STR, "GPU reset Test", CU_FALSE))
fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
+
+ /* You need at least 2 devices for this */
+ if (drm_count < 2)
+ if (amdgpu_set_test_active(HOTUNPLUG_TESTS_STR, "Unplug with exported fence", CU_FALSE))
+ fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
+}
+
+int test_device_index;
+
+int amdgpu_open_device_on_test_index(int render_node)
+{
+ int i;
+
+ if (amdgpu_open_devices(open_render_node) <= 0) {
+ perror("Cannot open AMDGPU device");
+ return -1;
+ }
+
+ if (test_device_index >= 0) {
+ /* Most tests run on device of drm_amdgpu[0].
+ * Swap the chosen device to drm_amdgpu[0].
+ */
+ i = drm_amdgpu[0];
+ drm_amdgpu[0] = drm_amdgpu[test_device_index];
+ drm_amdgpu[test_device_index] = i;
+ }
+
+ return 0;
+
+
+}
+
+
+static bool amdgpu_node_is_drm(int maj, int min)
+{
+#ifdef __linux__
+ char path[64];
+ struct stat sbuf;
+
+ snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device/drm",
+ maj, min);
+ return stat(path, &sbuf) == 0;
+#elif defined(__FreeBSD__)
+ char name[SPECNAMELEN];
+
+ if (!devname_r(makedev(maj, min), S_IFCHR, name, sizeof(name)))
+ return 0;
+ /* Handle drm/ and dri/ as both are present in different FreeBSD version
+ * FreeBSD on amd64/i386/powerpc external kernel modules create node in
+ * in /dev/drm/ and links in /dev/dri while a WIP in kernel driver creates
+ * only device nodes in /dev/dri/ */
+ return (!strncmp(name, "drm/", 4) || !strncmp(name, "dri/", 4));
+#else
+ return maj == DRM_MAJOR;
+#endif
+}
+
+char *amdgpu_get_device_from_fd(int fd)
+{
+#ifdef __linux__
+ struct stat sbuf;
+ char path[PATH_MAX + 1];
+ unsigned int maj, min;
+
+ if (fstat(fd, &sbuf))
+ return NULL;
+
+ maj = major(sbuf.st_rdev);
+ min = minor(sbuf.st_rdev);
+
+ if (!amdgpu_node_is_drm(maj, min) || !S_ISCHR(sbuf.st_mode))
+ return NULL;
+
+ snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
+ return strdup(path);
+#else
+ return NULL;
+#endif
}
/* The main() function for setting up and running the tests.
@@ -541,7 +643,6 @@ int main(int argc, char **argv)
int display_devices = 0;/* By default not to display devices' info */
CU_pSuite pSuite = NULL;
CU_pTest pTest = NULL;
- int test_device_index;
int display_list = 0;
int force_run = 0;
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index 98cec698..cc12756d 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -273,6 +273,29 @@ amdgpu_command_submission_write_linear_helper_with_secure(amdgpu_device_handle
unsigned ip_type,
bool secure);
+
+
+/**
+ * Initialize hotunplug test suite
+ */
+int suite_hotunplug_tests_init();
+
+/**
+ * Deinitialize hotunplug test suite
+ */
+int suite_hotunplug_tests_clean();
+
+/**
+ * Decide if the suite is enabled by default or not.
+ */
+CU_BOOL suite_hotunplug_tests_enable(void);
+
+/**
+ * Tests in uvd enc test suite
+ */
+extern CU_TestInfo hotunplug_tests[];
+
+
/**
* Helper functions
*/
@@ -449,13 +472,18 @@ static inline CU_ErrorCode amdgpu_set_test_active(const char *suite_name,
return r;
}
-static inline bool asic_is_arcturus(uint32_t asic_id)
+
+static inline bool asic_is_gfx_pipe_removed(uint32_t family_id, uint32_t chip_id, uint32_t chip_rev)
{
- switch(asic_id) {
- /* Arcturus asic DID */
- case 0x738C:
- case 0x7388:
- case 0x738E:
+
+ if (family_id != AMDGPU_FAMILY_AI)
+ return false;
+
+ switch (chip_id - chip_rev) {
+ /* Arcturus */
+ case 0x32:
+ /* Aldebaran */
+ case 0x3c:
return true;
default:
return false;
@@ -471,4 +499,8 @@ void amdgpu_test_exec_cs_helper_raw(amdgpu_device_handle device_handle,
struct amdgpu_cs_request *ibs_request,
bool secure);
+void amdgpu_close_devices();
+int amdgpu_open_device_on_test_index(int render_node);
+char *amdgpu_get_device_from_fd(int fd);
+
#endif /* #ifdef _AMDGPU_TEST_H_ */
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index dc9ed947..0180f9ce 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -46,6 +46,8 @@ static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
static uint32_t family_id;
+static uint32_t chip_id;
+static uint32_t chip_rev;
static void amdgpu_query_info_test(void);
static void amdgpu_command_submission_gfx(void);
@@ -341,9 +343,10 @@ enum cs_type {
};
static const uint32_t bufferclear_cs_shader_gfx9[] = {
- 0xD1FD0000, 0x04010C08, 0x7E020204, 0x7E040205,
- 0x7E060206, 0x7E080207, 0xE01C2000, 0x80000100,
- 0xBF810000
+ 0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+ 0x7e020280, 0x7e040204, 0x7e060205, 0x7e080206,
+ 0x7e0a0207, 0xe01c2000, 0x80000200, 0xbf8c0000,
+ 0xbf810000
};
static const uint32_t bufferclear_cs_shader_registers_gfx9[][2] = {
@@ -357,8 +360,9 @@ static const uint32_t bufferclear_cs_shader_registers_gfx9[][2] = {
static const uint32_t bufferclear_cs_shader_registers_num_gfx9 = 5;
static const uint32_t buffercopy_cs_shader_gfx9[] = {
- 0xD1FD0000, 0x04010C08, 0xE00C2000, 0x80000100,
- 0xBF8C0F70, 0xE01C2000, 0x80010100, 0xBF810000
+ 0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+ 0x7e020280, 0xe00c2000, 0x80000200, 0xbf8c0f70,
+ 0xe01c2000, 0x80010200, 0xbf810000
};
static const uint32_t preamblecache_gfx9[] = {
@@ -617,19 +621,21 @@ int amdgpu_bo_alloc_and_map_raw(amdgpu_device_handle dev, unsigned size,
CU_BOOL suite_basic_tests_enable(void)
{
- uint32_t asic_id;
if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle))
return CU_FALSE;
- asic_id = device_handle->info.asic_id;
+
+ family_id = device_handle->info.family_id;
+ chip_id = device_handle->info.chip_external_rev;
+ chip_rev = device_handle->info.chip_rev;
if (amdgpu_device_deinitialize(device_handle))
return CU_FALSE;
- /* disable gfx engine basic test cases for Arturus due to no CPG */
- if (asic_is_arcturus(asic_id)) {
+ /* disable gfx engine basic test cases for some asics have no CPG */
+ if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
if (amdgpu_set_test_active("Basic Tests",
"Command submission Test (GFX)",
CU_FALSE))
@@ -1066,6 +1072,14 @@ static void amdgpu_semaphore_test(void)
amdgpu_bo_list_handle bo_list[2];
amdgpu_va_handle va_handle[2];
int r, i;
+ struct amdgpu_gpu_info gpu_info = {0};
+ unsigned gc_ip_type;
+
+ r = amdgpu_query_gpu_info(device_handle, &gpu_info);
+ CU_ASSERT_EQUAL(r, 0);
+
+ gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
+ AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
if (family_id == AMDGPU_FAMILY_SI) {
sdma_nop = SDMA_PACKET_SI(SDMA_NOP_SI, 0, 0, 0, 0);
@@ -1108,14 +1122,14 @@ static void amdgpu_semaphore_test(void)
r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem);
CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
+ r = amdgpu_cs_wait_semaphore(context_handle[0], gc_ip_type, 0, 0, sem);
CU_ASSERT_EQUAL(r, 0);
ptr = ib_result_cpu[1];
ptr[0] = gfx_nop;
ib_info[1].ib_mc_address = ib_result_mc_address[1];
ib_info[1].size = 1;
- ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request[1].ip_type = gc_ip_type;
ibs_request[1].number_of_ibs = 1;
ibs_request[1].ibs = &ib_info[1];
ibs_request[1].resources = bo_list[1];
@@ -1125,7 +1139,7 @@ static void amdgpu_semaphore_test(void)
CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle[0];
- fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.ip_type = gc_ip_type;
fence_status.ip_instance = 0;
fence_status.fence = ibs_request[1].seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
@@ -1139,24 +1153,24 @@ static void amdgpu_semaphore_test(void)
ib_info[0].ib_mc_address = ib_result_mc_address[0];
ib_info[0].size = 1;
- ibs_request[0].ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request[0].ip_type = gc_ip_type;
ibs_request[0].number_of_ibs = 1;
ibs_request[0].ibs = &ib_info[0];
ibs_request[0].resources = bo_list[0];
ibs_request[0].fence_info.handle = NULL;
r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
+ r = amdgpu_cs_signal_semaphore(context_handle[0], gc_ip_type, 0, 0, sem);
CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
+ r = amdgpu_cs_wait_semaphore(context_handle[1], gc_ip_type, 0, 0, sem);
CU_ASSERT_EQUAL(r, 0);
ptr = ib_result_cpu[1];
ptr[0] = gfx_nop;
ib_info[1].ib_mc_address = ib_result_mc_address[1];
ib_info[1].size = 1;
- ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request[1].ip_type = gc_ip_type;
ibs_request[1].number_of_ibs = 1;
ibs_request[1].ibs = &ib_info[1];
ibs_request[1].resources = bo_list[1];
@@ -1166,7 +1180,7 @@ static void amdgpu_semaphore_test(void)
CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle[1];
- fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.ip_type = gc_ip_type;
fence_status.ip_instance = 0;
fence_status.fence = ibs_request[1].seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
index 4c11665a..8fc7fe26 100644
--- a/tests/amdgpu/bo_tests.c
+++ b/tests/amdgpu/bo_tests.c
@@ -168,7 +168,7 @@ static void amdgpu_bo_metadata(void)
struct amdgpu_bo_info info = {0};
int r;
- meta.size_metadata = 1;
+ meta.size_metadata = 4;
meta.umd_metadata[0] = 0xdeadbeef;
r = amdgpu_bo_set_metadata(buffer_handle, &meta);
@@ -177,7 +177,7 @@ static void amdgpu_bo_metadata(void)
r = amdgpu_bo_query_info(buffer_handle, &info);
CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(info.metadata.size_metadata, 1);
+ CU_ASSERT_EQUAL(info.metadata.size_metadata, 4);
CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
}
diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
index 10124c15..f5096781 100644
--- a/tests/amdgpu/cs_tests.c
+++ b/tests/amdgpu/cs_tests.c
@@ -64,21 +64,20 @@ CU_TestInfo cs_tests[] = {
CU_BOOL suite_cs_tests_enable(void)
{
- uint32_t asic_id;
-
if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle))
return CU_FALSE;
family_id = device_handle->info.family_id;
- asic_id = device_handle->info.asic_id;
+ chip_id = device_handle->info.chip_external_rev;
+ chip_rev = device_handle->info.chip_rev;
if (amdgpu_device_deinitialize(device_handle))
return CU_FALSE;
if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
- asic_is_arcturus(asic_id)) {
+ asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
printf("\n\nThe ASIC NOT support UVD, suite disabled\n");
return CU_FALSE;
}
diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
index 248cc339..07a39446 100644
--- a/tests/amdgpu/deadlock_tests.c
+++ b/tests/amdgpu/deadlock_tests.c
@@ -106,6 +106,10 @@ static uint32_t minor_version;
static pthread_t stress_thread;
static uint32_t *ptr;
+static uint32_t family_id;
+static uint32_t chip_rev;
+static uint32_t chip_id;
+
int use_uc_mtype = 0;
static void amdgpu_deadlock_helper(unsigned ip_type);
@@ -124,25 +128,27 @@ static void amdgpu_draw_hang_slow_gfx(void);
CU_BOOL suite_deadlock_tests_enable(void)
{
CU_BOOL enable = CU_TRUE;
- uint32_t asic_id;
if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
&minor_version, &device_handle))
return CU_FALSE;
+ family_id = device_handle->info.family_id;
+ chip_id = device_handle->info.chip_external_rev;
+ chip_rev = device_handle->info.chip_rev;
+
/*
* Only enable for ASICs supporting GPU reset and for which it's enabled
* by default (currently GFX8/9 dGPUS)
*/
- if (device_handle->info.family_id != AMDGPU_FAMILY_VI &&
- device_handle->info.family_id != AMDGPU_FAMILY_AI &&
- device_handle->info.family_id != AMDGPU_FAMILY_CI) {
+ if (family_id != AMDGPU_FAMILY_VI &&
+ family_id != AMDGPU_FAMILY_AI &&
+ family_id != AMDGPU_FAMILY_CI) {
printf("\n\nGPU reset is not enabled for the ASIC, deadlock suite disabled\n");
enable = CU_FALSE;
}
- asic_id = device_handle->info.asic_id;
- if (asic_is_arcturus(asic_id)) {
+ if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
if (amdgpu_set_test_active("Deadlock Tests",
"gfx ring block test (set amdgpu.lockup_timeout=50)",
CU_FALSE))
diff --git a/tests/amdgpu/hotunplug_tests.c b/tests/amdgpu/hotunplug_tests.c
new file mode 100644
index 00000000..23ea1407
--- /dev/null
+++ b/tests/amdgpu/hotunplug_tests.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#if HAVE_ALLOCA_H
+# include <alloca.h>
+#endif
+
+#include "CUnit/Basic.h"
+
+#include "amdgpu_test.h"
+#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
+#include "xf86drm.h"
+#include <pthread.h>
+
+#define GFX_COMPUTE_NOP 0xffff1000
+
+static amdgpu_device_handle device_handle;
+static uint32_t major_version;
+static uint32_t minor_version;
+static char *sysfs_remove = NULL;
+static bool do_cs;
+
+CU_BOOL suite_hotunplug_tests_enable(void)
+{
+ CU_BOOL enable = CU_TRUE;
+ drmDevicePtr device;
+
+ if (drmGetDevice2(drm_amdgpu[0], DRM_DEVICE_GET_PCI_REVISION, &device)) {
+ printf("\n\nGPU Failed to get DRM device PCI info!\n");
+ return CU_FALSE;
+ }
+
+ if (device->bustype != DRM_BUS_PCI) {
+ printf("\n\nGPU device is not on PCI bus!\n");
+ amdgpu_device_deinitialize(device_handle);
+ return CU_FALSE;
+ }
+
+ /* Disable until the hot-unplug support in kernel gets into drm-next */
+ if (major_version < 0xff)
+ enable = false;
+
+ if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
+ &minor_version, &device_handle))
+ return CU_FALSE;
+
+ /* TODO Once DRM version for unplug feature ready compare here agains it*/
+
+ if (amdgpu_device_deinitialize(device_handle))
+ return CU_FALSE;
+
+ return enable;
+}
+
+int suite_hotunplug_tests_init(void)
+{
+ /* We need to open/close device at each test manually */
+ amdgpu_close_devices();
+
+ return CUE_SUCCESS;
+}
+
+int suite_hotunplug_tests_clean(void)
+{
+
+
+ return CUE_SUCCESS;
+}
+
+static int amdgpu_hotunplug_trigger(const char *pathname)
+{
+ int fd, len;
+
+ fd = open(pathname, O_WRONLY);
+ if (fd < 0)
+ return -errno;
+
+ len = write(fd, "1", 1);
+ close(fd);
+
+ return len;
+}
+
+static int amdgpu_hotunplug_setup_test()
+{
+ int r;
+ char *tmp_str;
+
+ if (amdgpu_open_device_on_test_index(open_render_node) < 0) {
+ printf("\n\n Failed to reopen device file!\n");
+ return CUE_SINIT_FAILED;
+
+
+
+ }
+
+ r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
+ &minor_version, &device_handle);
+
+ if (r) {
+ if ((r == -EACCES) && (errno == EACCES))
+ printf("\n\nError:%s. "
+ "Hint:Try to run this test program as root.",
+ strerror(errno));
+ return CUE_SINIT_FAILED;
+ }
+
+ tmp_str = amdgpu_get_device_from_fd(drm_amdgpu[0]);
+ if (!tmp_str){
+ printf("\n\n Device path not found!\n");
+ return CUE_SINIT_FAILED;
+ }
+
+ sysfs_remove = realloc(tmp_str, strlen(tmp_str) * 2);
+ strcat(sysfs_remove, "/remove");
+
+ return 0;
+}
+
+static int amdgpu_hotunplug_teardown_test()
+{
+ if (amdgpu_device_deinitialize(device_handle))
+ return CUE_SCLEAN_FAILED;
+
+ amdgpu_close_devices();
+
+ if (sysfs_remove)
+ free(sysfs_remove);
+
+ return 0;
+}
+
+static inline int amdgpu_hotunplug_remove()
+{
+ return amdgpu_hotunplug_trigger(sysfs_remove);
+}
+
+static inline int amdgpu_hotunplug_rescan()
+{
+ return amdgpu_hotunplug_trigger("/sys/bus/pci/rescan");
+}
+
+static int amdgpu_cs_sync(amdgpu_context_handle context,
+ unsigned int ip_type,
+ int ring,
+ unsigned int seqno)
+{
+ struct amdgpu_cs_fence fence = {
+ .context = context,
+ .ip_type = ip_type,
+ .ring = ring,
+ .fence = seqno,
+ };
+ uint32_t expired;
+
+ return amdgpu_cs_query_fence_status(&fence,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+}
+
+static void *amdgpu_nop_cs()
+{
+ amdgpu_bo_handle ib_result_handle;
+ void *ib_result_cpu;
+ uint64_t ib_result_mc_address;
+ uint32_t *ptr;
+ int i, r;
+ amdgpu_bo_list_handle bo_list;
+ amdgpu_va_handle va_handle;
+ amdgpu_context_handle context;
+ struct amdgpu_cs_request ibs_request;
+ struct amdgpu_cs_ib_info ib_info;
+
+ r = amdgpu_cs_ctx_create(device_handle, &context);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ &ib_result_handle, &ib_result_cpu,
+ &ib_result_mc_address, &va_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ptr = ib_result_cpu;
+ for (i = 0; i < 16; ++i)
+ ptr[i] = GFX_COMPUTE_NOP;
+
+ r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+ ib_info.ib_mc_address = ib_result_mc_address;
+ ib_info.size = 16;
+
+ memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+ ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request.ring = 0;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.resources = bo_list;
+
+ while (do_cs)
+ amdgpu_cs_submit(context, 0, &ibs_request, 1);
+
+ amdgpu_cs_sync(context, AMDGPU_HW_IP_GFX, 0, ibs_request.seq_no);
+ amdgpu_bo_list_destroy(bo_list);
+ amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+ ib_result_mc_address, 4096);
+
+ amdgpu_cs_ctx_free(context);
+
+ return (void *)0;
+}
+
+static pthread_t* amdgpu_create_cs_thread()
+{
+ int r;
+ pthread_t *thread = malloc(sizeof(*thread));
+ if (!thread)
+ return NULL;
+
+ do_cs = true;
+
+ r = pthread_create(thread, NULL, amdgpu_nop_cs, NULL);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Give thread enough time to start*/
+ usleep(100000);
+ return thread;
+}
+
+static void amdgpu_destroy_cs_thread(pthread_t *thread)
+{
+ void *status;
+
+ do_cs = false;
+
+ pthread_join(*thread, &status);
+ CU_ASSERT_EQUAL(status, 0);
+
+ free(thread);
+}
+
+
+static void amdgpu_hotunplug_test(bool with_cs)
+{
+ int r;
+ pthread_t *thread = NULL;
+
+ r = amdgpu_hotunplug_setup_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ if (with_cs) {
+ thread = amdgpu_create_cs_thread();
+ CU_ASSERT_NOT_EQUAL(thread, NULL);
+ }
+
+ r = amdgpu_hotunplug_remove();
+ CU_ASSERT_EQUAL(r > 0, 1);
+
+ if (with_cs)
+ amdgpu_destroy_cs_thread(thread);
+
+ r = amdgpu_hotunplug_teardown_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ r = amdgpu_hotunplug_rescan();
+ CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+static void amdgpu_hotunplug_simple(void)
+{
+ amdgpu_hotunplug_test(false);
+}
+
+static void amdgpu_hotunplug_with_cs(void)
+{
+ amdgpu_hotunplug_test(true);
+}
+
+static void amdgpu_hotunplug_with_exported_bo(void)
+{
+ int r;
+ uint32_t dma_buf_fd;
+ unsigned int *ptr;
+ amdgpu_bo_handle bo_handle;
+
+ struct amdgpu_bo_alloc_request request = {
+ .alloc_size = 4096,
+ .phys_alignment = 4096,
+ .preferred_heap = AMDGPU_GEM_DOMAIN_GTT,
+ .flags = 0,
+ };
+
+ r = amdgpu_hotunplug_setup_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ amdgpu_bo_alloc(device_handle, &request, &bo_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_export(bo_handle, amdgpu_bo_handle_type_dma_buf_fd, &dma_buf_fd);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
+ CU_ASSERT_NOT_EQUAL(ptr, MAP_FAILED);
+
+ r = amdgpu_hotunplug_remove();
+ CU_ASSERT_EQUAL(r > 0, 1);
+
+ amdgpu_bo_free(bo_handle);
+
+ r = amdgpu_hotunplug_teardown_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ *ptr = 0xdeafbeef;
+
+ munmap(ptr, 4096);
+ close (dma_buf_fd);
+
+ r = amdgpu_hotunplug_rescan();
+ CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+static void amdgpu_hotunplug_with_exported_fence(void)
+{
+ amdgpu_bo_handle ib_result_handle;
+ void *ib_result_cpu;
+ uint64_t ib_result_mc_address;
+ uint32_t *ptr, sync_obj_handle, sync_obj_handle2;
+ int i, r;
+ amdgpu_bo_list_handle bo_list;
+ amdgpu_va_handle va_handle;
+ uint32_t major2, minor2;
+ amdgpu_device_handle device2;
+ amdgpu_context_handle context;
+ struct amdgpu_cs_request ibs_request;
+ struct amdgpu_cs_ib_info ib_info;
+ struct amdgpu_cs_fence fence_status = {0};
+ int shared_fd;
+
+ r = amdgpu_hotunplug_setup_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ r = amdgpu_device_initialize(drm_amdgpu[1], &major2, &minor2, &device2);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_create(device_handle, &context);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ &ib_result_handle, &ib_result_cpu,
+ &ib_result_mc_address, &va_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ptr = ib_result_cpu;
+ for (i = 0; i < 16; ++i)
+ ptr[i] = GFX_COMPUTE_NOP;
+
+ r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+ ib_info.ib_mc_address = ib_result_mc_address;
+ ib_info.size = 16;
+
+ memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+ ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+ ibs_request.ring = 0;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.resources = bo_list;
+
+ CU_ASSERT_EQUAL(amdgpu_cs_submit(context, 0, &ibs_request, 1), 0);
+
+ fence_status.context = context;
+ fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.ip_instance = 0;
+ fence_status.fence = ibs_request.seq_no;
+
+ CU_ASSERT_EQUAL(amdgpu_cs_fence_to_handle(device_handle, &fence_status,
+ AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ,
+ &sync_obj_handle),
+ 0);
+
+ CU_ASSERT_EQUAL(amdgpu_cs_export_syncobj(device_handle, sync_obj_handle, &shared_fd), 0);
+
+ CU_ASSERT_EQUAL(amdgpu_cs_import_syncobj(device2, shared_fd, &sync_obj_handle2), 0);
+
+ CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device_handle, sync_obj_handle), 0);
+
+ CU_ASSERT_EQUAL(amdgpu_bo_list_destroy(bo_list), 0);
+ CU_ASSERT_EQUAL(amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+ ib_result_mc_address, 4096), 0);
+ CU_ASSERT_EQUAL(amdgpu_cs_ctx_free(context), 0);
+
+ r = amdgpu_hotunplug_remove();
+ CU_ASSERT_EQUAL(r > 0, 1);
+
+ CU_ASSERT_EQUAL(amdgpu_cs_syncobj_wait(device2, &sync_obj_handle2, 1, 100000000, 0, NULL), 0);
+
+ CU_ASSERT_EQUAL(amdgpu_cs_destroy_syncobj(device2, sync_obj_handle2), 0);
+
+ amdgpu_device_deinitialize(device2);
+
+ r = amdgpu_hotunplug_teardown_test();
+ CU_ASSERT_EQUAL(r , 0);
+
+ r = amdgpu_hotunplug_rescan();
+ CU_ASSERT_EQUAL(r > 0, 1);
+}
+
+
+CU_TestInfo hotunplug_tests[] = {
+ { "Unplug card and rescan the bus to plug it back", amdgpu_hotunplug_simple },
+ { "Same as first test but with command submission", amdgpu_hotunplug_with_cs },
+ { "Unplug with exported bo", amdgpu_hotunplug_with_exported_bo },
+ { "Unplug with exported fence", amdgpu_hotunplug_with_exported_fence },
+ CU_TEST_INFO_NULL,
+};
diff --git a/tests/amdgpu/meson.build b/tests/amdgpu/meson.build
index eb16a50c..3a3b7601 100644
--- a/tests/amdgpu/meson.build
+++ b/tests/amdgpu/meson.build
@@ -25,6 +25,7 @@ if dep_cunit.found()
'amdgpu_test.c', 'basic_tests.c', 'bo_tests.c', 'cs_tests.c',
'vce_tests.c', 'uvd_enc_tests.c', 'vcn_tests.c', 'deadlock_tests.c',
'vm_tests.c', 'ras_tests.c', 'syncobj_tests.c', 'security_tests.c',
+ 'hotunplug_tests.c'
),
dependencies : [dep_cunit, dep_threads, dep_atomic_ops],
include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],
@@ -32,3 +33,14 @@ if dep_cunit.found()
install : with_install_tests,
)
endif
+
+amdgpu_stress = executable(
+ 'amdgpu_stress',
+ files(
+ 'amdgpu_stress.c'
+ ),
+ dependencies : [dep_threads, dep_atomic_ops],
+ include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],
+ link_with : [libdrm, libdrm_amdgpu],
+ install : with_install_tests,
+)
diff --git a/tests/amdgpu/security_tests.c b/tests/amdgpu/security_tests.c
index eed695a3..280e862d 100644
--- a/tests/amdgpu/security_tests.c
+++ b/tests/amdgpu/security_tests.c
@@ -315,7 +315,7 @@ static void amdgpu_secure_bounce(void)
SECURE_BUFFER_SIZE,
page_size,
AMDGPU_GEM_DOMAIN_VRAM,
- 0 /* AMDGPU_GEM_CREATE_ENCRYPTED */,
+ AMDGPU_GEM_CREATE_ENCRYPTED,
&bob);
if (res) {
PRINT_ERROR(res);
@@ -323,9 +323,9 @@ static void amdgpu_secure_bounce(void)
goto Out_free_Alice;
}
- /* sDMA clear copy from Alice to Bob.
+ /* sDMA TMZ copy from Alice to Bob.
*/
- amdgpu_bo_lcopy(&sb_ctx, &bob, &alice, SECURE_BUFFER_SIZE, 0);
+ amdgpu_bo_lcopy(&sb_ctx, &bob, &alice, SECURE_BUFFER_SIZE, 1);
/* Move Bob to the GTT domain.
*/
@@ -336,9 +336,9 @@ static void amdgpu_secure_bounce(void)
goto Out_free_all;
}
- /* sDMA clear copy from Bob to Alice.
+ /* sDMA TMZ copy from Bob to Alice.
*/
- amdgpu_bo_lcopy(&sb_ctx, &alice, &bob, SECURE_BUFFER_SIZE, 0);
+ amdgpu_bo_lcopy(&sb_ctx, &alice, &bob, SECURE_BUFFER_SIZE, 1);
/* Verify the contents of Alice.
*/
@@ -432,7 +432,8 @@ CU_BOOL suite_security_tests_enable(void)
&minor_version, &device_handle))
return CU_FALSE;
- if (device_handle->info.family_id != AMDGPU_FAMILY_RV) {
+
+ if (!(device_handle->dev_info.ids_flags & AMDGPU_IDS_FLAGS_TMZ)) {
printf("\n\nDon't support TMZ (trust memory zone), security suite disabled\n");
enable = CU_FALSE;
}
diff --git a/tests/amdgpu/syncobj_tests.c b/tests/amdgpu/syncobj_tests.c
index 3a7b38eb..690bea01 100644
--- a/tests/amdgpu/syncobj_tests.c
+++ b/tests/amdgpu/syncobj_tests.c
@@ -33,6 +33,10 @@ static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
+static uint32_t family_id;
+static uint32_t chip_id;
+static uint32_t chip_rev;
+
static void amdgpu_syncobj_timeline_test(void);
CU_BOOL suite_syncobj_timeline_tests_enable(void)
@@ -100,6 +104,18 @@ static int syncobj_command_submission_helper(uint32_t syncobj_handle, bool
int i, r;
uint64_t seq_no;
static uint32_t *ptr;
+ struct amdgpu_gpu_info gpu_info = {0};
+ unsigned gc_ip_type;
+
+ r = amdgpu_query_gpu_info(device_handle, &gpu_info);
+ CU_ASSERT_EQUAL(r, 0);
+
+ family_id = device_handle->info.family_id;
+ chip_id = device_handle->info.chip_external_rev;
+ chip_rev = device_handle->info.chip_rev;
+
+ gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
+ AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
CU_ASSERT_EQUAL(r, 0);
@@ -125,11 +141,11 @@ static int syncobj_command_submission_helper(uint32_t syncobj_handle, bool
chunk_data.ib_data._pad = 0;
chunk_data.ib_data.va_start = ib_result_mc_address;
chunk_data.ib_data.ib_bytes = 16 * 4;
- chunk_data.ib_data.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX :
+ chunk_data.ib_data.ip_type = wait_or_signal ? gc_ip_type :
AMDGPU_HW_IP_DMA;
chunk_data.ib_data.ip_instance = 0;
chunk_data.ib_data.ring = 0;
- chunk_data.ib_data.flags = 0;
+ chunk_data.ib_data.flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
chunks[1].chunk_id = wait_or_signal ?
AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT :
@@ -151,7 +167,7 @@ static int syncobj_command_submission_helper(uint32_t syncobj_handle, bool
memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
fence_status.context = context_handle;
- fence_status.ip_type = wait_or_signal ? AMDGPU_HW_IP_GFX:
+ fence_status.ip_type = wait_or_signal ? gc_ip_type :
AMDGPU_HW_IP_DMA;
fence_status.ip_instance = 0;
fence_status.ring = 0;
diff --git a/tests/amdgpu/vce_tests.c b/tests/amdgpu/vce_tests.c
index 5434e444..4e925cae 100644
--- a/tests/amdgpu/vce_tests.c
+++ b/tests/amdgpu/vce_tests.c
@@ -116,7 +116,7 @@ CU_BOOL suite_vce_tests_enable(void)
return CU_FALSE;
if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
- asic_is_arcturus(asic_id)) {
+ asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
return CU_FALSE;
}
diff --git a/tests/amdgpu/vcn_tests.c b/tests/amdgpu/vcn_tests.c
index 1ca66297..628b4910 100644
--- a/tests/amdgpu/vcn_tests.c
+++ b/tests/amdgpu/vcn_tests.c
@@ -114,7 +114,7 @@ CU_BOOL suite_vcn_tests_enable(void)
if (r != 0 || !info.available_rings ||
(family_id < AMDGPU_FAMILY_RV &&
(family_id == AMDGPU_FAMILY_AI &&
- chip_id != (chip_rev + 0x32)))) { /* Arcturus */
+ (chip_id - chip_rev) < 0x32))) { /* Arcturus */
printf("\n\nThe ASIC NOT support VCN, suite disabled\n");
return CU_FALSE;
}
diff --git a/tests/amdgpu/vm_tests.c b/tests/amdgpu/vm_tests.c
index 95011ea0..b94999c5 100644
--- a/tests/amdgpu/vm_tests.c
+++ b/tests/amdgpu/vm_tests.c
@@ -30,6 +30,9 @@
static amdgpu_device_handle device_handle;
static uint32_t major_version;
static uint32_t minor_version;
+static uint32_t family_id;
+static uint32_t chip_id;
+static uint32_t chip_rev;
static void amdgpu_vmid_reserve_test(void);
static void amdgpu_vm_unaligned_map(void);
@@ -110,7 +113,11 @@ static void amdgpu_vmid_reserve_test(void)
r = amdgpu_query_gpu_info(device_handle, &gpu_info);
CU_ASSERT_EQUAL(r, 0);
- gc_ip_type = (asic_is_arcturus(gpu_info.asic_id)) ?
+ family_id = device_handle->info.family_id;
+ chip_id = device_handle->info.chip_external_rev;
+ chip_rev = device_handle->info.chip_rev;
+
+ gc_ip_type = (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) ?
AMDGPU_HW_IP_COMPUTE : AMDGPU_HW_IP_GFX;
r = amdgpu_cs_ctx_create(device_handle, &context_handle);
diff --git a/tests/modeprint/modeprint.c b/tests/modeprint/modeprint.c
index ad727e12..f424f19d 100644
--- a/tests/modeprint/modeprint.c
+++ b/tests/modeprint/modeprint.c
@@ -113,7 +113,7 @@ static int printProperty(int fd, drmModeResPtr res, drmModePropertyPtr props, ui
} else {
for (j = 0; j < props->count_enums; j++) {
- printf("\t\t%lld = %s\n", props->enums[j].value, props->enums[j].name);
+ printf("\t\t%" PRId64 " = %s\n", props->enums[j].value, props->enums[j].name);
if (props->enums[j].value == value)
name = props->enums[j].name;
}
diff --git a/tests/modetest/modetest.c b/tests/modetest/modetest.c
index fc75383a..5fd22f79 100644
--- a/tests/modetest/modetest.c
+++ b/tests/modetest/modetest.c
@@ -265,61 +265,44 @@ static void dump_blob(struct device *dev, uint32_t blob_id)
static const char *modifier_to_string(uint64_t modifier)
{
- switch (modifier) {
- case DRM_FORMAT_MOD_INVALID:
- return "INVALID";
- case DRM_FORMAT_MOD_LINEAR:
- return "LINEAR";
- case I915_FORMAT_MOD_X_TILED:
- return "X_TILED";
- case I915_FORMAT_MOD_Y_TILED:
- return "Y_TILED";
- case I915_FORMAT_MOD_Yf_TILED:
- return "Yf_TILED";
- case I915_FORMAT_MOD_Y_TILED_CCS:
- return "Y_TILED_CCS";
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- return "Yf_TILED_CCS";
- case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
- return "SAMSUNG_64_32_TILE";
- case DRM_FORMAT_MOD_VIVANTE_TILED:
- return "VIVANTE_TILED";
- case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
- return "VIVANTE_SUPER_TILED";
- case DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED:
- return "VIVANTE_SPLIT_TILED";
- case DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED:
- return "VIVANTE_SPLIT_SUPER_TILED";
- case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
- return "NVIDIA_TEGRA_TILED";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
- return "NVIDIA_16BX2_BLOCK(0)";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
- return "NVIDIA_16BX2_BLOCK(1)";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
- return "NVIDIA_16BX2_BLOCK(2)";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
- return "NVIDIA_16BX2_BLOCK(3)";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
- return "NVIDIA_16BX2_BLOCK(4)";
- case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
- return "NVIDIA_16BX2_BLOCK(5)";
- case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
- return "MOD_BROADCOM_VC4_T_TILED";
- case DRM_FORMAT_MOD_QCOM_COMPRESSED:
- return "QCOM_COMPRESSED";
- default:
- return "(UNKNOWN MODIFIER)";
+ static char mod_string[4096];
+
+ char *modifier_name = drmGetFormatModifierName(modifier);
+ char *vendor_name = drmGetFormatModifierVendor(modifier);
+ memset(mod_string, 0x00, sizeof(mod_string));
+
+ if (!modifier_name) {
+ if (vendor_name)
+ snprintf(mod_string, sizeof(mod_string), "%s_%s",
+ vendor_name, "UNKNOWN_MODIFIER");
+ else
+ snprintf(mod_string, sizeof(mod_string), "%s_%s",
+ "UNKNOWN_VENDOR", "UNKNOWN_MODIFIER");
+ /* safe, as free is no-op for NULL */
+ free(vendor_name);
+ return mod_string;
+ }
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ snprintf(mod_string, sizeof(mod_string), "%s", modifier_name);
+ free(modifier_name);
+ free(vendor_name);
+ return mod_string;
}
+
+ snprintf(mod_string, sizeof(mod_string), "%s_%s",
+ vendor_name, modifier_name);
+
+ free(modifier_name);
+ free(vendor_name);
+ return mod_string;
}
static void dump_in_formats(struct device *dev, uint32_t blob_id)
{
- uint32_t i, j;
+ drmModeFormatModifierIterator iter = {0};
drmModePropertyBlobPtr blob;
- struct drm_format_modifier_blob *header;
- uint32_t *formats;
- struct drm_format_modifier *modifiers;
+ uint32_t fmt = 0;
printf("\t\tin_formats blob decoded:\n");
blob = drmModeGetPropertyBlob(dev->fd, blob_id);
@@ -328,23 +311,19 @@ static void dump_in_formats(struct device *dev, uint32_t blob_id)
return;
}
- header = blob->data;
- formats = (uint32_t *) ((char *) header + header->formats_offset);
- modifiers = (struct drm_format_modifier *)
- ((char *) header + header->modifiers_offset);
-
- for (i = 0; i < header->count_formats; i++) {
- printf("\t\t\t");
- dump_fourcc(formats[i]);
- printf(": ");
- for (j = 0; j < header->count_modifiers; j++) {
- uint64_t mask = 1ULL << i;
- if (modifiers[j].formats & mask)
- printf(" %s", modifier_to_string(modifiers[j].modifier));
+ while (drmModeFormatModifierBlobIterNext(blob, &iter)) {
+ if (!fmt || fmt != iter.fmt) {
+ printf("%s\t\t\t", !fmt ? "" : "\n");
+ fmt = iter.fmt;
+ dump_fourcc(fmt);
+ printf(": ");
}
- printf("\n");
+
+ printf(" %s", modifier_to_string(iter.mod));
}
+ printf("\n");
+
drmModeFreePropertyBlob(blob);
}
@@ -396,7 +375,7 @@ static void dump_prop(struct device *dev, drmModePropertyPtr prop,
if (drm_property_type_is(prop, DRM_MODE_PROP_ENUM)) {
printf("\t\tenums:");
for (i = 0; i < prop->count_enums; i++)
- printf(" %s=%llu", prop->enums[i].name,
+ printf(" %s=%"PRIu64, prop->enums[i].name,
prop->enums[i].value);
printf("\n");
} else if (drm_property_type_is(prop, DRM_MODE_PROP_BITMASK)) {
@@ -1725,13 +1704,21 @@ static void set_planes(struct device *dev, struct plane_arg *p, unsigned int cou
static void set_cursors(struct device *dev, struct pipe_arg *pipes, unsigned int count)
{
uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0};
+ uint32_t cw = 64;
+ uint32_t ch = 64;
struct bo *bo;
+ uint64_t value;
unsigned int i;
int ret;
- /* maybe make cursor width/height configurable some day */
- uint32_t cw = 64;
- uint32_t ch = 64;
+ ret = drmGetCap(dev->fd, DRM_CAP_CURSOR_WIDTH, &value);
+ if (!ret)
+ cw = value;
+
+ ret = drmGetCap(dev->fd, DRM_CAP_CURSOR_HEIGHT, &value);
+ if (!ret)
+ ch = value;
+
/* create cursor bo.. just using PATTERN_PLAIN as it has
* translucent alpha
diff --git a/tests/proptest/proptest.c b/tests/proptest/proptest.c
index 5abbf029..0ab0907d 100644
--- a/tests/proptest/proptest.c
+++ b/tests/proptest/proptest.c
@@ -126,7 +126,7 @@ dump_prop(uint32_t prop_id, uint64_t value)
if (drm_property_type_is(prop, DRM_MODE_PROP_ENUM)) {
printf("\t\tenums:");
for (i = 0; i < prop->count_enums; i++)
- printf(" %s=%llu", prop->enums[i].name,
+ printf(" %s=%"PRIu64, prop->enums[i].name,
prop->enums[i].value);
printf("\n");
} else if (drm_property_type_is(prop, DRM_MODE_PROP_BITMASK)) {
diff --git a/xf86drm.c b/xf86drm.c
index edfeb347..5933e4bc 100644
--- a/xf86drm.c
+++ b/xf86drm.c
@@ -61,6 +61,7 @@
#include <sys/sysctl.h>
#endif
#include <math.h>
+#include <inttypes.h>
#if defined(__FreeBSD__)
#include <sys/param.h>
@@ -76,6 +77,7 @@
#include "xf86drm.h"
#include "libdrm_macros.h"
+#include "drm_fourcc.h"
#include "util_math.h"
@@ -124,6 +126,426 @@ static drmServerInfoPtr drm_server_info;
static bool drmNodeIsDRM(int maj, int min);
static char *drmGetMinorNameForFD(int fd, int type);
+#define DRM_MODIFIER(v, f, f_name) \
+ .modifier = DRM_FORMAT_MOD_##v ## _ ##f, \
+ .modifier_name = #f_name
+
+#define DRM_MODIFIER_INVALID(v, f_name) \
+ .modifier = DRM_FORMAT_MOD_INVALID, .modifier_name = #f_name
+
+#define DRM_MODIFIER_LINEAR(v, f_name) \
+ .modifier = DRM_FORMAT_MOD_LINEAR, .modifier_name = #f_name
+
+/* Intel is abit special as the format doesn't follow other vendors naming
+ * scheme */
+#define DRM_MODIFIER_INTEL(f, f_name) \
+ .modifier = I915_FORMAT_MOD_##f, .modifier_name = #f_name
+
+struct drmFormatModifierInfo {
+ uint64_t modifier;
+ const char *modifier_name;
+};
+
+struct drmFormatModifierVendorInfo {
+ uint8_t vendor;
+ const char *vendor_name;
+};
+
+#include "generated_static_table_fourcc.h"
+
+struct drmVendorInfo {
+ uint8_t vendor;
+ char *(*vendor_cb)(uint64_t modifier);
+};
+
+struct drmFormatVendorModifierInfo {
+ uint64_t modifier;
+ const char *modifier_name;
+};
+
+static char *
+drmGetFormatModifierNameFromArm(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromNvidia(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromAmd(uint64_t modifier);
+
+static char *
+drmGetFormatModifierNameFromAmlogic(uint64_t modifier);
+
+static const struct drmVendorInfo modifier_format_vendor_table[] = {
+ { DRM_FORMAT_MOD_VENDOR_ARM, drmGetFormatModifierNameFromArm },
+ { DRM_FORMAT_MOD_VENDOR_NVIDIA, drmGetFormatModifierNameFromNvidia },
+ { DRM_FORMAT_MOD_VENDOR_AMD, drmGetFormatModifierNameFromAmd },
+ { DRM_FORMAT_MOD_VENDOR_AMLOGIC, drmGetFormatModifierNameFromAmlogic },
+};
+
+#ifndef AFBC_FORMAT_MOD_MODE_VALUE_MASK
+#define AFBC_FORMAT_MOD_MODE_VALUE_MASK 0x000fffffffffffffULL
+#endif
+
+static const struct drmFormatVendorModifierInfo arm_mode_value_table[] = {
+ { AFBC_FORMAT_MOD_YTR, "YTR" },
+ { AFBC_FORMAT_MOD_SPLIT, "SPLIT" },
+ { AFBC_FORMAT_MOD_SPARSE, "SPARSE" },
+ { AFBC_FORMAT_MOD_CBR, "CBR" },
+ { AFBC_FORMAT_MOD_TILED, "TILED" },
+ { AFBC_FORMAT_MOD_SC, "SC" },
+ { AFBC_FORMAT_MOD_DB, "DB" },
+ { AFBC_FORMAT_MOD_BCH, "BCH" },
+ { AFBC_FORMAT_MOD_USM, "USM" },
+};
+
+static bool is_x_t_amd_gfx9_tile(uint64_t tile)
+{
+ switch (tile) {
+ case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
+ case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
+ case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+drmGetAfbcFormatModifierNameFromArm(uint64_t modifier, FILE *fp)
+{
+ uint64_t mode_value = modifier & AFBC_FORMAT_MOD_MODE_VALUE_MASK;
+ uint64_t block_size = mode_value & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
+
+ const char *block = NULL;
+ const char *mode = NULL;
+ bool did_print_mode = false;
+
+ /* add block, can only have a (single) block */
+ switch (block_size) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ block = "16x16";
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+ block = "32x8";
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+ block = "64x4";
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+ block = "32x8_64x4";
+ break;
+ }
+
+ if (!block) {
+ return false;
+ }
+
+ fprintf(fp, "BLOCK_SIZE=%s,", block);
+
+ /* add mode */
+ for (unsigned int i = 0; i < ARRAY_SIZE(arm_mode_value_table); i++) {
+ if (arm_mode_value_table[i].modifier & mode_value) {
+ mode = arm_mode_value_table[i].modifier_name;
+ if (!did_print_mode) {
+ fprintf(fp, "MODE=%s", mode);
+ did_print_mode = true;
+ } else {
+ fprintf(fp, "|%s", mode);
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+drmGetAfrcFormatModifierNameFromArm(uint64_t modifier, FILE *fp)
+{
+ for (unsigned int i = 0; i < 2; ++i) {
+ uint64_t coding_unit_block =
+ (modifier >> (i * 4)) & AFRC_FORMAT_MOD_CU_SIZE_MASK;
+ const char *coding_unit_size = NULL;
+
+ switch (coding_unit_block) {
+ case AFRC_FORMAT_MOD_CU_SIZE_16:
+ coding_unit_size = "CU_16";
+ break;
+ case AFRC_FORMAT_MOD_CU_SIZE_24:
+ coding_unit_size = "CU_24";
+ break;
+ case AFRC_FORMAT_MOD_CU_SIZE_32:
+ coding_unit_size = "CU_32";
+ break;
+ }
+
+ if (!coding_unit_size) {
+ if (i == 0) {
+ return false;
+ }
+ break;
+ }
+
+ if (i == 0) {
+ fprintf(fp, "P0=%s,", coding_unit_size);
+ } else {
+ fprintf(fp, "P12=%s,", coding_unit_size);
+ }
+ }
+
+ bool scan_layout =
+ (modifier & AFRC_FORMAT_MOD_LAYOUT_SCAN) == AFRC_FORMAT_MOD_LAYOUT_SCAN;
+ if (scan_layout) {
+ fprintf(fp, "SCAN");
+ } else {
+ fprintf(fp, "ROT");
+ }
+ return true;
+}
+
+static char *
+drmGetFormatModifierNameFromArm(uint64_t modifier)
+{
+ uint64_t type = (modifier >> 52) & 0xf;
+
+ FILE *fp;
+ size_t size = 0;
+ char *modifier_name = NULL;
+ bool result = false;
+
+ fp = open_memstream(&modifier_name, &size);
+ if (!fp)
+ return NULL;
+
+ switch (type) {
+ case DRM_FORMAT_MOD_ARM_TYPE_AFBC:
+ result = drmGetAfbcFormatModifierNameFromArm(modifier, fp);
+ break;
+ case DRM_FORMAT_MOD_ARM_TYPE_AFRC:
+ result = drmGetAfrcFormatModifierNameFromArm(modifier, fp);
+ break;
+ /* misc type is already handled by the static table */
+ case DRM_FORMAT_MOD_ARM_TYPE_MISC:
+ default:
+ result = false;
+ break;
+ }
+
+ fclose(fp);
+ if (!result) {
+ free(modifier_name);
+ return NULL;
+ }
+
+ return modifier_name;
+}
+
+static char *
+drmGetFormatModifierNameFromNvidia(uint64_t modifier)
+{
+ uint64_t height, kind, gen, sector, compression;
+
+ height = modifier & 0xf;
+ kind = (modifier >> 12) & 0xff;
+
+ gen = (modifier >> 20) & 0x3;
+ sector = (modifier >> 22) & 0x1;
+ compression = (modifier >> 23) & 0x7;
+
+ /* just in case there could other simpler modifiers, not yet added, avoid
+ * testing against TEGRA_TILE */
+ if ((modifier & 0x10) == 0x10) {
+ char *mod_nvidia;
+ asprintf(&mod_nvidia, "BLOCK_LINEAR_2D,HEIGHT=%"PRIu64",KIND=%"PRIu64","
+ "GEN=%"PRIu64",SECTOR=%"PRIu64",COMPRESSION=%"PRIu64"", height,
+ kind, gen, sector, compression);
+ return mod_nvidia;
+ }
+
+ return NULL;
+}
+
+static void
+drmGetFormatModifierNameFromAmdDcc(uint64_t modifier, FILE *fp)
+{
+ uint64_t dcc_max_compressed_block =
+ AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
+ uint64_t dcc_retile = AMD_FMT_MOD_GET(DCC_RETILE, modifier);
+
+ const char *dcc_max_compressed_block_str = NULL;
+
+ fprintf(fp, ",DCC");
+
+ if (dcc_retile)
+ fprintf(fp, ",DCC_RETILE");
+
+ if (!dcc_retile && AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier))
+ fprintf(fp, ",DCC_PIPE_ALIGN");
+
+ if (AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier))
+ fprintf(fp, ",DCC_INDEPENDENT_64B");
+
+ if (AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier))
+ fprintf(fp, ",DCC_INDEPENDENT_128B");
+
+ switch (dcc_max_compressed_block) {
+ case AMD_FMT_MOD_DCC_BLOCK_64B:
+ dcc_max_compressed_block_str = "64B";
+ break;
+ case AMD_FMT_MOD_DCC_BLOCK_128B:
+ dcc_max_compressed_block_str = "128B";
+ break;
+ case AMD_FMT_MOD_DCC_BLOCK_256B:
+ dcc_max_compressed_block_str = "256B";
+ break;
+ }
+
+ if (dcc_max_compressed_block_str)
+ fprintf(fp, ",DCC_MAX_COMPRESSED_BLOCK=%s",
+ dcc_max_compressed_block_str);
+
+ if (AMD_FMT_MOD_GET(DCC_CONSTANT_ENCODE, modifier))
+ fprintf(fp, ",DCC_CONSTANT_ENCODE");
+}
+
+static void
+drmGetFormatModifierNameFromAmdTile(uint64_t modifier, FILE *fp)
+{
+ uint64_t pipe_xor_bits, bank_xor_bits, packers, rb;
+ uint64_t pipe, pipe_align, dcc, dcc_retile, tile_version;
+
+ pipe_align = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+ pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+ dcc = AMD_FMT_MOD_GET(DCC, modifier);
+ dcc_retile = AMD_FMT_MOD_GET(DCC_RETILE, modifier);
+ tile_version = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+ fprintf(fp, ",PIPE_XOR_BITS=%"PRIu64, pipe_xor_bits);
+
+ if (tile_version == AMD_FMT_MOD_TILE_VER_GFX9) {
+ bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
+ fprintf(fp, ",BANK_XOR_BITS=%"PRIu64, bank_xor_bits);
+ }
+
+ if (tile_version == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ packers = AMD_FMT_MOD_GET(PACKERS, modifier);
+ fprintf(fp, ",PACKERS=%"PRIu64, packers);
+ }
+
+ if (dcc && tile_version == AMD_FMT_MOD_TILE_VER_GFX9) {
+ rb = AMD_FMT_MOD_GET(RB, modifier);
+ fprintf(fp, ",RB=%"PRIu64, rb);
+ }
+
+ if (dcc && tile_version == AMD_FMT_MOD_TILE_VER_GFX9 &&
+ (dcc_retile || pipe_align)) {
+ pipe = AMD_FMT_MOD_GET(PIPE, modifier);
+ fprintf(fp, ",PIPE_%"PRIu64, pipe);
+ }
+}
+
+static char *
+drmGetFormatModifierNameFromAmd(uint64_t modifier)
+{
+ uint64_t tile, tile_version, dcc;
+ FILE *fp;
+ char *mod_amd = NULL;
+ size_t size = 0;
+
+ const char *str_tile = NULL;
+ const char *str_tile_version = NULL;
+
+ tile = AMD_FMT_MOD_GET(TILE, modifier);
+ tile_version = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+ dcc = AMD_FMT_MOD_GET(DCC, modifier);
+
+ fp = open_memstream(&mod_amd, &size);
+ if (!fp)
+ return NULL;
+
+ /* add tile */
+ switch (tile_version) {
+ case AMD_FMT_MOD_TILE_VER_GFX9:
+ str_tile_version = "GFX9";
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ str_tile_version = "GFX10";
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
+ str_tile_version = "GFX10_RBPLUS";
+ break;
+ }
+
+ if (str_tile_version) {
+ fprintf(fp, "%s", str_tile_version);
+ } else {
+ fclose(fp);
+ free(mod_amd);
+ return NULL;
+ }
+
+ /* add tile str */
+ switch (tile) {
+ case AMD_FMT_MOD_TILE_GFX9_64K_S:
+ str_tile = "GFX9_64K_S";
+ break;
+ case AMD_FMT_MOD_TILE_GFX9_64K_D:
+ str_tile = "GFX9_64K_D";
+ break;
+ case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
+ str_tile = "GFX9_64K_S_X";
+ break;
+ case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
+ str_tile = "GFX9_64K_D_X";
+ break;
+ case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
+ str_tile = "GFX9_64K_R_X";
+ break;
+ }
+
+ if (str_tile)
+ fprintf(fp, ",%s", str_tile);
+
+ if (dcc)
+ drmGetFormatModifierNameFromAmdDcc(modifier, fp);
+
+ if (tile_version >= AMD_FMT_MOD_TILE_VER_GFX9 && is_x_t_amd_gfx9_tile(tile))
+ drmGetFormatModifierNameFromAmdTile(modifier, fp);
+
+ fclose(fp);
+ return mod_amd;
+}
+
+static char *
+drmGetFormatModifierNameFromAmlogic(uint64_t modifier)
+{
+ uint64_t layout = modifier & 0xff;
+ uint64_t options = (modifier >> 8) & 0xff;
+ char *mod_amlogic = NULL;
+
+ const char *layout_str;
+ const char *opts_str;
+
+ switch (layout) {
+ case AMLOGIC_FBC_LAYOUT_BASIC:
+ layout_str = "BASIC";
+ break;
+ case AMLOGIC_FBC_LAYOUT_SCATTER:
+ layout_str = "SCATTER";
+ break;
+ default:
+ layout_str = "INVALID_LAYOUT";
+ break;
+ }
+
+ if (options & AMLOGIC_FBC_OPTION_MEM_SAVING)
+ opts_str = "MEM_SAVING";
+ else
+ opts_str = "0";
+
+ asprintf(&mod_amlogic, "FBC,LAYOUT=%s,OPTIONS=%s", layout_str, opts_str);
+ return mod_amlogic;
+}
+
static unsigned log2_int(unsigned x)
{
unsigned l;
@@ -1137,8 +1559,8 @@ drm_public int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType t
memclear(map);
map.offset = offset;
map.size = size;
- map.type = type;
- map.flags = flags;
+ map.type = (enum drm_map_type)type;
+ map.flags = (enum drm_map_flags)flags;
if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
@@ -1182,7 +1604,7 @@ drm_public int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
memclear(request);
request.count = count;
request.size = size;
- request.flags = flags;
+ request.flags = (int)flags;
request.agp_start = agp_offset;
if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
@@ -1466,7 +1888,7 @@ drm_public int drmDMA(int fd, drmDMAReqPtr request)
dma.send_count = request->send_count;
dma.send_indices = request->send_list;
dma.send_sizes = request->send_sizes;
- dma.flags = request->flags;
+ dma.flags = (enum drm_dma_flags)request->flags;
dma.request_count = request->request_count;
dma.request_size = request->request_size;
dma.request_indices = request->request_list;
@@ -2403,8 +2825,8 @@ drm_public int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
return -errno;
*offset = map.offset;
*size = map.size;
- *type = map.type;
- *flags = map.flags;
+ *type = (drmMapType)map.type;
+ *flags = (drmMapFlags)map.flags;
*handle = (unsigned long)map.handle;
*mtrr = map.mtrr;
return 0;
@@ -2913,6 +3335,15 @@ drm_public int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle)
return 0;
}
+drm_public int drmCloseBufferHandle(int fd, uint32_t handle)
+{
+ struct drm_gem_close args;
+
+ memclear(args);
+ args.handle = handle;
+ return drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &args);
+}
+
static char *drmGetMinorNameForFD(int fd, int type)
{
#ifdef __linux__
@@ -2946,8 +3377,9 @@ static char *drmGetMinorNameForFD(int fd, int type)
while ((ent = readdir(sysdir))) {
if (strncmp(ent->d_name, name, len) == 0) {
- snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s",
- ent->d_name);
+ if (snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s",
+ ent->d_name) < 0)
+ return NULL;
closedir(sysdir);
return strdup(dev_name);
@@ -3358,7 +3790,9 @@ static int parse_separate_sysfs_files(int maj, int min,
get_pci_path(maj, min, pci_path);
for (unsigned i = ignore_revision ? 1 : 0; i < ARRAY_SIZE(attrs); i++) {
- snprintf(path, PATH_MAX, "%s/%s", pci_path, attrs[i]);
+ if (snprintf(path, PATH_MAX, "%s/%s", pci_path, attrs[i]) < 0)
+ return -errno;
+
fp = fopen(path, "r");
if (!fp)
return -errno;
@@ -3388,7 +3822,9 @@ static int parse_config_sysfs_file(int maj, int min,
get_pci_path(maj, min, pci_path);
- snprintf(path, PATH_MAX, "%s/config", pci_path);
+ if (snprintf(path, PATH_MAX, "%s/config", pci_path) < 0)
+ return -errno;
+
fd = open(path, O_RDONLY);
if (fd < 0)
return -errno;
@@ -3640,6 +4076,7 @@ free_device:
static int drm_usb_dev_path(int maj, int min, char *path, size_t len)
{
char *value, *tmp_path, *slash;
+ bool usb_device, usb_interface;
snprintf(path, len, "/sys/dev/char/%d:%d/device", maj, min);
@@ -3647,9 +4084,13 @@ static int drm_usb_dev_path(int maj, int min, char *path, size_t len)
if (!value)
return -ENOENT;
- if (strcmp(value, "usb_device") == 0)
+ usb_device = strcmp(value, "usb_device") == 0;
+ usb_interface = strcmp(value, "usb_interface") == 0;
+ free(value);
+
+ if (usb_device)
return 0;
- if (strcmp(value, "usb_interface") != 0)
+ if (!usb_interface)
return -ENOTSUP;
/* The parent of a usb_interface is a usb_device */
@@ -4061,19 +4502,16 @@ drm_device_has_rdev(drmDevicePtr device, dev_t find_rdev)
#define MAX_DRM_NODES 256
/**
- * Get information about the opened drm device
+ * Get information about a device from its dev_t identifier
*
- * \param fd file descriptor of the drm device
+ * \param find_rdev dev_t identifier of the device
* \param flags feature/behaviour bitmask
* \param device the address of a drmDevicePtr where the information
* will be allocated in stored
*
* \return zero on success, negative error code otherwise.
- *
- * \note Unlike drmGetDevice it does not retrieve the pci device revision field
- * unless the DRM_DEVICE_GET_PCI_REVISION \p flag is set.
*/
-drm_public int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
+drm_public int drmGetDeviceFromDevId(dev_t find_rdev, uint32_t flags, drmDevicePtr *device)
{
#ifdef __OpenBSD__
/*
@@ -4082,22 +4520,18 @@ drm_public int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
* Avoid stat'ing all of /dev needlessly by implementing this custom path.
*/
drmDevicePtr d;
- struct stat sbuf;
char node[PATH_MAX + 1];
const char *dev_name;
int node_type, subsystem_type;
int maj, min, n, ret;
- if (fd == -1 || device == NULL)
+ if (device == NULL)
return -EINVAL;
- if (fstat(fd, &sbuf))
- return -errno;
-
- maj = major(sbuf.st_rdev);
- min = minor(sbuf.st_rdev);
+ maj = major(find_rdev);
+ min = minor(find_rdev);
- if (!drmNodeIsDRM(maj, min) || !S_ISCHR(sbuf.st_mode))
+ if (!drmNodeIsDRM(maj, min))
return -EINVAL;
node_type = drmGetMinorType(maj, min);
@@ -4130,26 +4564,20 @@ drm_public int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
drmDevicePtr d;
DIR *sysdir;
struct dirent *dent;
- struct stat sbuf;
int subsystem_type;
int maj, min;
int ret, i, node_count;
- dev_t find_rdev;
if (drm_device_validate_flags(flags))
return -EINVAL;
- if (fd == -1 || device == NULL)
+ if (device == NULL)
return -EINVAL;
- if (fstat(fd, &sbuf))
- return -errno;
-
- find_rdev = sbuf.st_rdev;
- maj = major(sbuf.st_rdev);
- min = minor(sbuf.st_rdev);
+ maj = major(find_rdev);
+ min = minor(find_rdev);
- if (!drmNodeIsDRM(maj, min) || !S_ISCHR(sbuf.st_mode))
+ if (!drmNodeIsDRM(maj, min))
return -EINVAL;
subsystem_type = drmParseSubsystemType(maj, min);
@@ -4202,6 +4630,35 @@ drm_public int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
* Get information about the opened drm device
*
* \param fd file descriptor of the drm device
+ * \param flags feature/behaviour bitmask
+ * \param device the address of a drmDevicePtr where the information
+ * will be allocated in stored
+ *
+ * \return zero on success, negative error code otherwise.
+ *
+ * \note Unlike drmGetDevice it does not retrieve the pci device revision field
+ * unless the DRM_DEVICE_GET_PCI_REVISION \p flag is set.
+ */
+drm_public int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
+{
+ struct stat sbuf;
+
+ if (fd == -1)
+ return -EINVAL;
+
+ if (fstat(fd, &sbuf))
+ return -errno;
+
+ if (!S_ISCHR(sbuf.st_mode))
+ return -EINVAL;
+
+ return drmGetDeviceFromDevId(sbuf.st_rdev, flags, device);
+}
+
+/**
+ * Get information about the opened drm device
+ *
+ * \param fd file descriptor of the drm device
* \param device the address of a drmDevicePtr where the information
* will be allocated in stored
*
@@ -4585,3 +5042,66 @@ drm_public int drmSyncobjTransfer(int fd,
return ret;
}
+
+static char *
+drmGetFormatModifierFromSimpleTokens(uint64_t modifier)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_format_modifier_table); i++) {
+ if (drm_format_modifier_table[i].modifier == modifier)
+ return strdup(drm_format_modifier_table[i].modifier_name);
+ }
+
+ return NULL;
+}
+
+/** Retrieves a human-readable representation of a vendor (as a string) from
+ * the format token modifier
+ *
+ * \param modifier the format modifier token
+ * \return a char pointer to the human-readable form of the vendor. Caller is
+ * responsible for freeing it.
+ */
+drm_public char *
+drmGetFormatModifierVendor(uint64_t modifier)
+{
+ unsigned int i;
+ uint8_t vendor = fourcc_mod_get_vendor(modifier);
+
+ for (i = 0; i < ARRAY_SIZE(drm_format_modifier_vendor_table); i++) {
+ if (drm_format_modifier_vendor_table[i].vendor == vendor)
+ return strdup(drm_format_modifier_vendor_table[i].vendor_name);
+ }
+
+ return NULL;
+}
+
+/** Retrieves a human-readable representation string from a format token
+ * modifier
+ *
+ * If the dedicated function was not able to extract a valid name or searching
+ * the format modifier was not in the table, this function would return NULL.
+ *
+ * \param modifier the token format
+ * \return a malloc'ed string representation of the modifier. Caller is
+ * responsible for freeing the string returned.
+ *
+ */
+drm_public char *
+drmGetFormatModifierName(uint64_t modifier)
+{
+ uint8_t vendorid = fourcc_mod_get_vendor(modifier);
+ char *modifier_found = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modifier_format_vendor_table); i++) {
+ if (modifier_format_vendor_table[i].vendor == vendorid)
+ modifier_found = modifier_format_vendor_table[i].vendor_cb(modifier);
+ }
+
+ if (!modifier_found)
+ return drmGetFormatModifierFromSimpleTokens(modifier);
+
+ return modifier_found;
+}
diff --git a/xf86drm.h b/xf86drm.h
index 9fc06ab8..1631396a 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -834,6 +834,8 @@ extern int drmGetNodeTypeFromFd(int fd);
extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
+extern int drmCloseBufferHandle(int fd, uint32_t handle);
+
extern char *drmGetPrimaryDeviceNameFromFd(int fd);
extern char *drmGetRenderDeviceNameFromFd(int fd);
@@ -915,6 +917,8 @@ extern void drmFreeDevices(drmDevicePtr devices[], int count);
extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device);
extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
+extern int drmGetDeviceFromDevId(dev_t dev_id, uint32_t flags, drmDevicePtr *device);
+
extern int drmDevicesEqual(drmDevicePtr a, drmDevicePtr b);
extern int drmSyncobjCreate(int fd, uint32_t flags, uint32_t *handle);
@@ -944,6 +948,17 @@ extern int drmSyncobjTransfer(int fd,
uint32_t src_handle, uint64_t src_point,
uint32_t flags);
+extern char *
+drmGetFormatModifierVendor(uint64_t modifier);
+
+extern char *
+drmGetFormatModifierName(uint64_t modifier);
+
+#ifndef fourcc_mod_get_vendor
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+#endif
+
#if defined(__cplusplus)
}
#endif
diff --git a/xf86drmMode.c b/xf86drmMode.c
index c3920b91..84d3c771 100644
--- a/xf86drmMode.c
+++ b/xf86drmMode.c
@@ -33,11 +33,15 @@
*
*/
+#include <assert.h>
#include <limits.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#if HAVE_SYS_SYSCTL_H
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#endif
#include <sys/sysctl.h>
#endif
#include <stdio.h>
@@ -47,6 +51,7 @@
#include "xf86drmMode.h"
#include "xf86drm.h"
#include <drm.h>
+#include <drm_fourcc.h>
#include <string.h>
#include <dirent.h>
#include <unistd.h>
@@ -724,6 +729,112 @@ err_allocs:
return r;
}
+static inline const uint32_t *
+get_formats_ptr(const struct drm_format_modifier_blob *blob)
+{
+ return (const uint32_t *)(((uint8_t *)blob) + blob->formats_offset);
+}
+
+static inline const struct drm_format_modifier *
+get_modifiers_ptr(const struct drm_format_modifier_blob *blob)
+{
+ return (const struct drm_format_modifier *)(((uint8_t *)blob) +
+ blob->modifiers_offset);
+}
+
+static bool _drmModeFormatModifierGetNext(const drmModePropertyBlobRes *blob,
+ drmModeFormatModifierIterator *iter)
+{
+ const struct drm_format_modifier *blob_modifiers, *mod;
+ const struct drm_format_modifier_blob *fmt_mod_blob;
+ const uint32_t *blob_formats;
+
+ assert(blob && iter);
+
+ fmt_mod_blob = blob->data;
+ blob_modifiers = get_modifiers_ptr(fmt_mod_blob);
+ blob_formats = get_formats_ptr(fmt_mod_blob);
+
+ /* fmt_idx and mod_idx designate the number of processed formats
+ * and modifiers.
+ */
+ if (iter->fmt_idx >= fmt_mod_blob->count_formats ||
+ iter->mod_idx >= fmt_mod_blob->count_modifiers)
+ return false;
+
+ iter->fmt = blob_formats[iter->fmt_idx];
+ iter->mod = DRM_FORMAT_MOD_INVALID;
+
+ /* From the latest valid found, get the next valid modifier */
+ while (iter->mod_idx < fmt_mod_blob->count_modifiers) {
+ mod = &blob_modifiers[iter->mod_idx++];
+
+ /* Check if the format that fmt_idx designates, belongs to
+ * this modifier 64-bit window selected via mod->offset.
+ */
+ if (iter->fmt_idx < mod->offset ||
+ iter->fmt_idx >= mod->offset + 64)
+ continue;
+ if (!(mod->formats & (1 << (iter->fmt_idx - mod->offset))))
+ continue;
+
+ iter->mod = mod->modifier;
+ break;
+ }
+
+ if (iter->mod_idx == fmt_mod_blob->count_modifiers) {
+ iter->mod_idx = 0;
+ iter->fmt_idx++;
+ }
+
+ /* Since mod_idx reset, in order for the caller to iterate over
+ * the last modifier of the last format, always return true here
+ * and early return from the next call.
+ */
+ return true;
+}
+
+/**
+ * Iterate over formats first and then over modifiers. On each call, iter->fmt
+ * is retained until all associated modifiers are returned. Then, either update
+ * iter->fmt with the next format, or exit if there aren't any left.
+ *
+ * NOTE: clients should not make any assumption on mod_idx and fmt_idx values
+ *
+ * @blob: valid kernel blob holding formats and modifiers
+ * @iter: input and output iterator data. Iter data must be initialised to zero
+ * @return: false, on error or there aren't any further formats or modifiers left.
+ * true, on success and there are more formats or modifiers.
+ */
+drm_public bool drmModeFormatModifierBlobIterNext(const drmModePropertyBlobRes *blob,
+ drmModeFormatModifierIterator *iter)
+{
+ drmModeFormatModifierIterator tmp;
+ bool has_fmt;
+
+ if (!blob || !iter)
+ return false;
+
+ tmp.fmt_idx = iter->fmt_idx;
+ tmp.mod_idx = iter->mod_idx;
+
+ /* With the current state of things, DRM/KMS drivers are allowed to
+ * construct blobs having formats and no modifiers. Userspace can't
+ * legitimately abort in such cases.
+ *
+ * While waiting for the kernel to perhaps disallow formats with no
+ * modifiers in IN_FORMATS blobs, skip the format altogether.
+ */
+ do {
+ has_fmt = _drmModeFormatModifierGetNext(blob, &tmp);
+ if (has_fmt && tmp.mod != DRM_FORMAT_MOD_INVALID)
+ *iter = tmp;
+
+ } while (has_fmt && tmp.mod == DRM_FORMAT_MOD_INVALID);
+
+ return has_fmt;
+}
+
drm_public void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr)
{
if (!ptr)
diff --git a/xf86drmMode.h b/xf86drmMode.h
index 72696782..19bf91dd 100644
--- a/xf86drmMode.h
+++ b/xf86drmMode.h
@@ -42,6 +42,7 @@ extern "C" {
#include <drm.h>
#include <drm_mode.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -142,13 +143,15 @@ typedef struct _drmModeProperty {
uint32_t *blob_ids; /* store the blob IDs */
} drmModePropertyRes, *drmModePropertyPtr;
-static __inline int drm_property_type_is(drmModePropertyPtr property,
+static inline uint32_t drmModeGetPropertyType(const drmModePropertyRes *prop)
+{
+ return prop->flags & (DRM_MODE_PROP_LEGACY_TYPE | DRM_MODE_PROP_EXTENDED_TYPE);
+}
+
+static inline int drm_property_type_is(const drmModePropertyPtr property,
uint32_t type)
{
- /* instanceof for props.. handles extended type vs original types: */
- if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
- return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
- return property->flags & type;
+ return drmModeGetPropertyType(property) == type;
}
typedef struct _drmModeCrtc {
@@ -229,6 +232,12 @@ typedef struct _drmModeObjectProperties {
uint64_t *prop_values;
} drmModeObjectProperties, *drmModeObjectPropertiesPtr;
+typedef struct _drmModeFormatModifierIterator {
+ uint32_t fmt_idx, mod_idx;
+ uint32_t fmt;
+ uint64_t mod;
+} drmModeFormatModifierIterator;
+
typedef struct _drmModePlane {
uint32_t count_formats;
uint32_t *formats;
@@ -386,6 +395,8 @@ extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
extern void drmModeFreeProperty(drmModePropertyPtr ptr);
extern drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id);
+extern bool drmModeFormatModifierBlobIterNext(const drmModePropertyBlobRes *blob,
+ drmModeFormatModifierIterator *iter);
extern void drmModeFreePropertyBlob(drmModePropertyBlobPtr ptr);
extern int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property_id,
uint64_t value);