mirror of
https://github.com/armbian/build
synced 2025-09-24 19:47:06 +07:00
1556 lines
46 KiB
Diff
1556 lines
46 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Iouri Tarassov <iourit@linux.microsoft.com>
|
|
Date: Mon, 31 Jan 2022 16:41:28 -0800
|
|
Subject: drivers: hv: dxgkrnl: Sharing of sync objects
|
|
|
|
Implement creation of a shared sync objects and the ioctl for sharing
|
|
dxgsyncobject objects between processes in the virtual machine.
|
|
|
|
Sync objects are shared using file descriptor (FD) handles.
|
|
The name "NT handle" is used to be compatible with Windows implementation.
|
|
|
|
An FD handle is created by the LX_DXSHAREOBJECTS ioctl. The created FD
|
|
handle could be sent to another process using any Linux API.
|
|
|
|
To use a shared sync object in other ioctls, the object needs to be
|
|
opened using its FD handle. A sync object is opened by the
|
|
LX_DXOPENSYNCOBJECTFROMNTHANDLE2 ioctl, which returns a d3dkmthandle
|
|
value.
|
|
|
|
Signed-off-by: Iouri Tarassov <iourit@linux.microsoft.com>
|
|
[kms: Forward port to v6.1]
|
|
Signed-off-by: Kelsey Steele <kelseysteele@microsoft.com>
|
|
---
|
|
drivers/hv/dxgkrnl/dxgadapter.c | 181 ++-
|
|
drivers/hv/dxgkrnl/dxgkrnl.h | 96 ++
|
|
drivers/hv/dxgkrnl/dxgmodule.c | 1 +
|
|
drivers/hv/dxgkrnl/dxgprocess.c | 4 +
|
|
drivers/hv/dxgkrnl/dxgvmbus.c | 221 ++++
|
|
drivers/hv/dxgkrnl/dxgvmbus.h | 35 +
|
|
drivers/hv/dxgkrnl/ioctl.c | 556 +++++++++-
|
|
include/uapi/misc/d3dkmthk.h | 93 ++
|
|
8 files changed, 1181 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/drivers/hv/dxgkrnl/dxgadapter.c b/drivers/hv/dxgkrnl/dxgadapter.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgadapter.c
|
|
+++ b/drivers/hv/dxgkrnl/dxgadapter.c
|
|
@@ -171,6 +171,26 @@ void dxgadapter_remove_shared_resource(struct dxgadapter *adapter,
|
|
up_write(&adapter->shared_resource_list_lock);
|
|
}
|
|
|
|
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
|
|
+ struct dxgsharedsyncobject *object)
|
|
+{
|
|
+ down_write(&adapter->shared_resource_list_lock);
|
|
+ list_add_tail(&object->adapter_shared_syncobj_list_entry,
|
|
+ &adapter->adapter_shared_syncobj_list_head);
|
|
+ up_write(&adapter->shared_resource_list_lock);
|
|
+}
|
|
+
|
|
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
|
|
+ struct dxgsharedsyncobject *object)
|
|
+{
|
|
+ down_write(&adapter->shared_resource_list_lock);
|
|
+ if (object->adapter_shared_syncobj_list_entry.next) {
|
|
+ list_del(&object->adapter_shared_syncobj_list_entry);
|
|
+ object->adapter_shared_syncobj_list_entry.next = NULL;
|
|
+ }
|
|
+ up_write(&adapter->shared_resource_list_lock);
|
|
+}
|
|
+
|
|
void dxgadapter_add_syncobj(struct dxgadapter *adapter,
|
|
struct dxgsyncobject *object)
|
|
{
|
|
@@ -622,7 +642,7 @@ void dxgresource_destroy(struct dxgresource *resource)
|
|
dxgallocation_destroy(alloc);
|
|
}
|
|
dxgdevice_remove_resource(device, resource);
|
|
- shared_resource = resource->shared_owner;
|
|
+ shared_resource = resource->shared_owner;
|
|
if (shared_resource) {
|
|
dxgsharedresource_remove_resource(shared_resource,
|
|
resource);
|
|
@@ -736,6 +756,9 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *device)
|
|
*/
|
|
void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context)
|
|
{
|
|
+ struct dxghwqueue *hwqueue;
|
|
+ struct dxghwqueue *tmp;
|
|
+
|
|
DXG_TRACE("Destroying context %p", context);
|
|
context->object_state = DXGOBJECTSTATE_DESTROYED;
|
|
if (context->device) {
|
|
@@ -747,6 +770,10 @@ void dxgcontext_destroy(struct dxgprocess *process, struct dxgcontext *context)
|
|
dxgdevice_remove_context(context->device, context);
|
|
kref_put(&context->device->device_kref, dxgdevice_release);
|
|
}
|
|
+ list_for_each_entry_safe(hwqueue, tmp, &context->hwqueue_list_head,
|
|
+ hwqueue_list_entry) {
|
|
+ dxghwqueue_destroy(process, hwqueue);
|
|
+ }
|
|
kref_put(&context->context_kref, dxgcontext_release);
|
|
}
|
|
|
|
@@ -773,6 +800,38 @@ void dxgcontext_release(struct kref *refcount)
|
|
kfree(context);
|
|
}
|
|
|
|
+int dxgcontext_add_hwqueue(struct dxgcontext *context,
|
|
+ struct dxghwqueue *hwqueue)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ down_write(&context->hwqueue_list_lock);
|
|
+ if (dxgcontext_is_active(context))
|
|
+ list_add_tail(&hwqueue->hwqueue_list_entry,
|
|
+ &context->hwqueue_list_head);
|
|
+ else
|
|
+ ret = -ENODEV;
|
|
+ up_write(&context->hwqueue_list_lock);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void dxgcontext_remove_hwqueue(struct dxgcontext *context,
|
|
+ struct dxghwqueue *hwqueue)
|
|
+{
|
|
+ if (hwqueue->hwqueue_list_entry.next) {
|
|
+ list_del(&hwqueue->hwqueue_list_entry);
|
|
+ hwqueue->hwqueue_list_entry.next = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *context,
|
|
+ struct dxghwqueue *hwqueue)
|
|
+{
|
|
+ down_write(&context->hwqueue_list_lock);
|
|
+ dxgcontext_remove_hwqueue(context, hwqueue);
|
|
+ up_write(&context->hwqueue_list_lock);
|
|
+}
|
|
+
|
|
struct dxgallocation *dxgallocation_create(struct dxgprocess *process)
|
|
{
|
|
struct dxgallocation *alloc;
|
|
@@ -958,6 +1017,63 @@ void dxgprocess_adapter_remove_device(struct dxgdevice *device)
|
|
mutex_unlock(&device->adapter_info->device_list_mutex);
|
|
}
|
|
|
|
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
|
|
+ struct dxgsyncobject *so)
|
|
+{
|
|
+ struct dxgsharedsyncobject *syncobj;
|
|
+
|
|
+ syncobj = kzalloc(sizeof(*syncobj), GFP_KERNEL);
|
|
+ if (syncobj) {
|
|
+ kref_init(&syncobj->ssyncobj_kref);
|
|
+ INIT_LIST_HEAD(&syncobj->shared_syncobj_list_head);
|
|
+ syncobj->adapter = adapter;
|
|
+ syncobj->type = so->type;
|
|
+ syncobj->monitored_fence = so->monitored_fence;
|
|
+ dxgadapter_add_shared_syncobj(adapter, syncobj);
|
|
+ kref_get(&adapter->adapter_kref);
|
|
+ init_rwsem(&syncobj->syncobj_list_lock);
|
|
+ mutex_init(&syncobj->fd_mutex);
|
|
+ }
|
|
+ return syncobj;
|
|
+}
|
|
+
|
|
+void dxgsharedsyncobj_release(struct kref *refcount)
|
|
+{
|
|
+ struct dxgsharedsyncobject *syncobj;
|
|
+
|
|
+ syncobj = container_of(refcount, struct dxgsharedsyncobject,
|
|
+ ssyncobj_kref);
|
|
+ DXG_TRACE("Destroying shared sync object %p", syncobj);
|
|
+ if (syncobj->adapter) {
|
|
+ dxgadapter_remove_shared_syncobj(syncobj->adapter,
|
|
+ syncobj);
|
|
+ kref_put(&syncobj->adapter->adapter_kref,
|
|
+ dxgadapter_release);
|
|
+ }
|
|
+ kfree(syncobj);
|
|
+}
|
|
+
|
|
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *shared,
|
|
+ struct dxgsyncobject *syncobj)
|
|
+{
|
|
+ DXG_TRACE("Add syncobj 0x%p 0x%p", shared, syncobj);
|
|
+ kref_get(&shared->ssyncobj_kref);
|
|
+ down_write(&shared->syncobj_list_lock);
|
|
+ list_add(&syncobj->shared_syncobj_list_entry,
|
|
+ &shared->shared_syncobj_list_head);
|
|
+ syncobj->shared_owner = shared;
|
|
+ up_write(&shared->syncobj_list_lock);
|
|
+}
|
|
+
|
|
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *shared,
|
|
+ struct dxgsyncobject *syncobj)
|
|
+{
|
|
+ DXG_TRACE("Remove syncobj 0x%p", shared);
|
|
+ down_write(&shared->syncobj_list_lock);
|
|
+ list_del(&syncobj->shared_syncobj_list_entry);
|
|
+ up_write(&shared->syncobj_list_lock);
|
|
+}
|
|
+
|
|
struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
|
|
struct dxgdevice *device,
|
|
struct dxgadapter *adapter,
|
|
@@ -1091,7 +1207,70 @@ void dxgsyncobject_release(struct kref *refcount)
|
|
struct dxgsyncobject *syncobj;
|
|
|
|
syncobj = container_of(refcount, struct dxgsyncobject, syncobj_kref);
|
|
+ if (syncobj->shared_owner) {
|
|
+ dxgsharedsyncobj_remove_syncobj(syncobj->shared_owner,
|
|
+ syncobj);
|
|
+ kref_put(&syncobj->shared_owner->ssyncobj_kref,
|
|
+ dxgsharedsyncobj_release);
|
|
+ }
|
|
if (syncobj->host_event)
|
|
kfree(syncobj->host_event);
|
|
kfree(syncobj);
|
|
}
|
|
+
|
|
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *context)
|
|
+{
|
|
+ struct dxgprocess *process = context->device->process;
|
|
+ struct dxghwqueue *hwqueue = kzalloc(sizeof(*hwqueue), GFP_KERNEL);
|
|
+
|
|
+ if (hwqueue) {
|
|
+ kref_init(&hwqueue->hwqueue_kref);
|
|
+ hwqueue->context = context;
|
|
+ hwqueue->process = process;
|
|
+ hwqueue->device_handle = context->device->handle;
|
|
+ if (dxgcontext_add_hwqueue(context, hwqueue) < 0) {
|
|
+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release);
|
|
+ hwqueue = NULL;
|
|
+ } else {
|
|
+ kref_get(&context->context_kref);
|
|
+ }
|
|
+ }
|
|
+ return hwqueue;
|
|
+}
|
|
+
|
|
+void dxghwqueue_destroy(struct dxgprocess *process, struct dxghwqueue *hwqueue)
|
|
+{
|
|
+ DXG_TRACE("Destroyng hwqueue %p", hwqueue);
|
|
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
|
|
+ if (hwqueue->handle.v) {
|
|
+ hmgrtable_free_handle(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ hwqueue->handle);
|
|
+ hwqueue->handle.v = 0;
|
|
+ }
|
|
+ if (hwqueue->progress_fence_sync_object.v) {
|
|
+ hmgrtable_free_handle(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_MONITOREDFENCE,
|
|
+ hwqueue->progress_fence_sync_object);
|
|
+ hwqueue->progress_fence_sync_object.v = 0;
|
|
+ }
|
|
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
|
|
+
|
|
+ if (hwqueue->progress_fence_mapped_address) {
|
|
+ dxg_unmap_iospace(hwqueue->progress_fence_mapped_address,
|
|
+ PAGE_SIZE);
|
|
+ hwqueue->progress_fence_mapped_address = NULL;
|
|
+ }
|
|
+ dxgcontext_remove_hwqueue_safe(hwqueue->context, hwqueue);
|
|
+
|
|
+ kref_put(&hwqueue->context->context_kref, dxgcontext_release);
|
|
+ kref_put(&hwqueue->hwqueue_kref, dxghwqueue_release);
|
|
+}
|
|
+
|
|
+void dxghwqueue_release(struct kref *refcount)
|
|
+{
|
|
+ struct dxghwqueue *hwqueue;
|
|
+
|
|
+ hwqueue = container_of(refcount, struct dxghwqueue, hwqueue_kref);
|
|
+ kfree(hwqueue);
|
|
+}
|
|
diff --git a/drivers/hv/dxgkrnl/dxgkrnl.h b/drivers/hv/dxgkrnl/dxgkrnl.h
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgkrnl.h
|
|
+++ b/drivers/hv/dxgkrnl/dxgkrnl.h
|
|
@@ -40,6 +40,8 @@ struct dxgallocation;
|
|
struct dxgresource;
|
|
struct dxgsharedresource;
|
|
struct dxgsyncobject;
|
|
+struct dxgsharedsyncobject;
|
|
+struct dxghwqueue;
|
|
|
|
/*
|
|
* Driver private data.
|
|
@@ -137,6 +139,18 @@ struct dxghosteventcpu {
|
|
* "device" syncobject, because the belong to a device (dxgdevice).
|
|
* Device syncobjects are inserted to a list in dxgdevice.
|
|
*
|
|
+ * A syncobject can be "shared", meaning that it could be opened by many
|
|
+ * processes.
|
|
+ *
|
|
+ * Shared syncobjects are inserted to a list in its owner
|
|
+ * (dxgsharedsyncobject).
|
|
+ * A syncobject can be shared by using a global handle or by using
|
|
+ * "NT security handle".
|
|
+ * When global handle sharing is used, the handle is created durinig object
|
|
+ * creation.
|
|
+ * When "NT security" is used, the handle for sharing is create be calling
|
|
+ * dxgk_share_objects. On Linux "NT handle" is represented by a file
|
|
+ * descriptor. FD points to dxgsharedsyncobject.
|
|
*/
|
|
struct dxgsyncobject {
|
|
struct kref syncobj_kref;
|
|
@@ -146,6 +160,8 @@ struct dxgsyncobject {
|
|
* List entry in dxgadapter for other objects
|
|
*/
|
|
struct list_head syncobj_list_entry;
|
|
+ /* List entry in the dxgsharedsyncobject object for shared synobjects */
|
|
+ struct list_head shared_syncobj_list_entry;
|
|
/* Adapter, the syncobject belongs to. NULL for stopped sync obejcts. */
|
|
struct dxgadapter *adapter;
|
|
/*
|
|
@@ -156,6 +172,8 @@ struct dxgsyncobject {
|
|
struct dxgprocess *process;
|
|
/* Used by D3DDDI_CPU_NOTIFICATION objects */
|
|
struct dxghosteventcpu *host_event;
|
|
+ /* Owner object for shared syncobjects */
|
|
+ struct dxgsharedsyncobject *shared_owner;
|
|
/* CPU virtual address of the fence value for "device" syncobjects */
|
|
void *mapped_address;
|
|
/* Handle in the process handle table */
|
|
@@ -187,6 +205,41 @@ struct dxgvgpuchannel {
|
|
struct hv_device *hdev;
|
|
};
|
|
|
|
+/*
|
|
+ * The object is used as parent of all sync objects, created for a shared
|
|
+ * syncobject. When a shared syncobject is created without NT security, the
|
|
+ * handle in the global handle table will point to this object.
|
|
+ */
|
|
+struct dxgsharedsyncobject {
|
|
+ struct kref ssyncobj_kref;
|
|
+ /* Referenced by file descriptors */
|
|
+ int host_shared_handle_nt_reference;
|
|
+ /* Corresponding handle in the host global handle table */
|
|
+ struct d3dkmthandle host_shared_handle;
|
|
+ /*
|
|
+ * When the sync object is shared by NT handle, this is the
|
|
+ * corresponding handle in the host
|
|
+ */
|
|
+ struct d3dkmthandle host_shared_handle_nt;
|
|
+ /* Protects access to host_shared_handle_nt */
|
|
+ struct mutex fd_mutex;
|
|
+ struct rw_semaphore syncobj_list_lock;
|
|
+ struct list_head shared_syncobj_list_head;
|
|
+ struct list_head adapter_shared_syncobj_list_entry;
|
|
+ struct dxgadapter *adapter;
|
|
+ enum d3dddi_synchronizationobject_type type;
|
|
+ u32 monitored_fence:1;
|
|
+};
|
|
+
|
|
+struct dxgsharedsyncobject *dxgsharedsyncobj_create(struct dxgadapter *adapter,
|
|
+ struct dxgsyncobject
|
|
+ *syncobj);
|
|
+void dxgsharedsyncobj_release(struct kref *refcount);
|
|
+void dxgsharedsyncobj_add_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
|
|
+ struct dxgsyncobject *syncobj);
|
|
+void dxgsharedsyncobj_remove_syncobj(struct dxgsharedsyncobject *sharedsyncobj,
|
|
+ struct dxgsyncobject *syncobj);
|
|
+
|
|
struct dxgsyncobject *dxgsyncobject_create(struct dxgprocess *process,
|
|
struct dxgdevice *device,
|
|
struct dxgadapter *adapter,
|
|
@@ -375,6 +428,8 @@ struct dxgadapter {
|
|
struct list_head adapter_process_list_head;
|
|
/* List of all dxgsharedresource objects */
|
|
struct list_head shared_resource_list_head;
|
|
+ /* List of all dxgsharedsyncobject objects */
|
|
+ struct list_head adapter_shared_syncobj_list_head;
|
|
/* List of all non-device dxgsyncobject objects */
|
|
struct list_head syncobj_list_head;
|
|
/* This lock protects shared resource and syncobject lists */
|
|
@@ -402,6 +457,10 @@ void dxgadapter_release_lock_shared(struct dxgadapter *adapter);
|
|
int dxgadapter_acquire_lock_exclusive(struct dxgadapter *adapter);
|
|
void dxgadapter_acquire_lock_forced(struct dxgadapter *adapter);
|
|
void dxgadapter_release_lock_exclusive(struct dxgadapter *adapter);
|
|
+void dxgadapter_add_shared_syncobj(struct dxgadapter *adapter,
|
|
+ struct dxgsharedsyncobject *so);
|
|
+void dxgadapter_remove_shared_syncobj(struct dxgadapter *adapter,
|
|
+ struct dxgsharedsyncobject *so);
|
|
void dxgadapter_add_syncobj(struct dxgadapter *adapter,
|
|
struct dxgsyncobject *so);
|
|
void dxgadapter_remove_syncobj(struct dxgsyncobject *so);
|
|
@@ -487,8 +546,32 @@ struct dxgcontext *dxgcontext_create(struct dxgdevice *dev);
|
|
void dxgcontext_destroy(struct dxgprocess *pr, struct dxgcontext *ctx);
|
|
void dxgcontext_destroy_safe(struct dxgprocess *pr, struct dxgcontext *ctx);
|
|
void dxgcontext_release(struct kref *refcount);
|
|
+int dxgcontext_add_hwqueue(struct dxgcontext *ctx,
|
|
+ struct dxghwqueue *hq);
|
|
+void dxgcontext_remove_hwqueue(struct dxgcontext *ctx, struct dxghwqueue *hq);
|
|
+void dxgcontext_remove_hwqueue_safe(struct dxgcontext *ctx,
|
|
+ struct dxghwqueue *hq);
|
|
bool dxgcontext_is_active(struct dxgcontext *ctx);
|
|
|
|
+/*
|
|
+ * The object represent the execution hardware queue of a device.
|
|
+ */
|
|
+struct dxghwqueue {
|
|
+ /* entry in the context hw queue list */
|
|
+ struct list_head hwqueue_list_entry;
|
|
+ struct kref hwqueue_kref;
|
|
+ struct dxgcontext *context;
|
|
+ struct dxgprocess *process;
|
|
+ struct d3dkmthandle progress_fence_sync_object;
|
|
+ struct d3dkmthandle handle;
|
|
+ struct d3dkmthandle device_handle;
|
|
+ void *progress_fence_mapped_address;
|
|
+};
|
|
+
|
|
+struct dxghwqueue *dxghwqueue_create(struct dxgcontext *ctx);
|
|
+void dxghwqueue_destroy(struct dxgprocess *pr, struct dxghwqueue *hq);
|
|
+void dxghwqueue_release(struct kref *refcount);
|
|
+
|
|
/*
|
|
* A shared resource object is created to track the list of dxgresource objects,
|
|
* which are opened for the same underlying shared resource.
|
|
@@ -720,9 +803,22 @@ int dxgvmb_send_wait_sync_object_cpu(struct dxgprocess *process,
|
|
d3dkmt_waitforsynchronizationobjectfromcpu
|
|
*args,
|
|
u64 cpu_event);
|
|
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
|
|
+ struct dxgadapter *adapter,
|
|
+ struct d3dkmt_createhwqueue *args,
|
|
+ struct d3dkmt_createhwqueue *__user inargs,
|
|
+ struct dxghwqueue *hq);
|
|
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
|
|
+ struct dxgadapter *adapter,
|
|
+ struct d3dkmthandle handle);
|
|
int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
|
|
struct dxgadapter *adapter,
|
|
struct d3dkmt_queryadapterinfo *args);
|
|
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
|
|
+ struct dxgvmbuschannel *channel,
|
|
+ struct d3dkmt_opensyncobjectfromnthandle2
|
|
+ *args,
|
|
+ struct dxgsyncobject *syncobj);
|
|
int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
|
|
struct d3dkmthandle object,
|
|
struct d3dkmthandle *shared_handle);
|
|
diff --git a/drivers/hv/dxgkrnl/dxgmodule.c b/drivers/hv/dxgkrnl/dxgmodule.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgmodule.c
|
|
+++ b/drivers/hv/dxgkrnl/dxgmodule.c
|
|
@@ -259,6 +259,7 @@ int dxgglobal_create_adapter(struct pci_dev *dev, guid_t *guid,
|
|
|
|
INIT_LIST_HEAD(&adapter->adapter_process_list_head);
|
|
INIT_LIST_HEAD(&adapter->shared_resource_list_head);
|
|
+ INIT_LIST_HEAD(&adapter->adapter_shared_syncobj_list_head);
|
|
INIT_LIST_HEAD(&adapter->syncobj_list_head);
|
|
init_rwsem(&adapter->shared_resource_list_lock);
|
|
adapter->pci_dev = dev;
|
|
diff --git a/drivers/hv/dxgkrnl/dxgprocess.c b/drivers/hv/dxgkrnl/dxgprocess.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgprocess.c
|
|
+++ b/drivers/hv/dxgkrnl/dxgprocess.c
|
|
@@ -277,6 +277,10 @@ struct dxgdevice *dxgprocess_device_by_object_handle(struct dxgprocess *process,
|
|
device_handle =
|
|
((struct dxgcontext *)obj)->device_handle;
|
|
break;
|
|
+ case HMGRENTRY_TYPE_DXGHWQUEUE:
|
|
+ device_handle =
|
|
+ ((struct dxghwqueue *)obj)->device_handle;
|
|
+ break;
|
|
default:
|
|
DXG_ERR("invalid handle type: %d", t);
|
|
break;
|
|
diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgvmbus.c
|
|
+++ b/drivers/hv/dxgkrnl/dxgvmbus.c
|
|
@@ -712,6 +712,69 @@ int dxgvmb_send_destroy_process(struct d3dkmthandle process)
|
|
return ret;
|
|
}
|
|
|
|
+int dxgvmb_send_open_sync_object_nt(struct dxgprocess *process,
|
|
+ struct dxgvmbuschannel *channel,
|
|
+ struct d3dkmt_opensyncobjectfromnthandle2
|
|
+ *args,
|
|
+ struct dxgsyncobject *syncobj)
|
|
+{
|
|
+ struct dxgkvmb_command_opensyncobject *command;
|
|
+ struct dxgkvmb_command_opensyncobject_return result = { };
|
|
+ int ret;
|
|
+ struct dxgvmbusmsg msg;
|
|
+
|
|
+ ret = init_message(&msg, NULL, process, sizeof(*command));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ command = (void *)msg.msg;
|
|
+
|
|
+ command_vm_to_host_init2(&command->hdr, DXGK_VMBCOMMAND_OPENSYNCOBJECT,
|
|
+ process->host_handle);
|
|
+ command->device = args->device;
|
|
+ command->global_sync_object = syncobj->shared_owner->host_shared_handle;
|
|
+ command->flags = args->flags;
|
|
+ if (syncobj->monitored_fence)
|
|
+ command->engine_affinity =
|
|
+ args->monitored_fence.engine_affinity;
|
|
+
|
|
+ ret = dxgglobal_acquire_channel_lock();
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ ret = dxgvmb_send_sync_msg(channel, msg.hdr, msg.size,
|
|
+ &result, sizeof(result));
|
|
+
|
|
+ dxgglobal_release_channel_lock();
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ ret = ntstatus2int(result.status);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ args->sync_object = result.sync_object;
|
|
+ if (syncobj->monitored_fence) {
|
|
+ void *va = dxg_map_iospace(result.guest_cpu_physical_address,
|
|
+ PAGE_SIZE, PROT_READ | PROT_WRITE,
|
|
+ true);
|
|
+ if (va == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ args->monitored_fence.fence_value_cpu_va = va;
|
|
+ args->monitored_fence.fence_value_gpu_va =
|
|
+ result.gpu_virtual_address;
|
|
+ syncobj->mapped_address = va;
|
|
+ }
|
|
+
|
|
+cleanup:
|
|
+ free_message(&msg, process);
|
|
+ if (ret)
|
|
+ DXG_TRACE("err: %d", ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int dxgvmb_send_create_nt_shared_object(struct dxgprocess *process,
|
|
struct d3dkmthandle object,
|
|
struct d3dkmthandle *shared_handle)
|
|
@@ -2050,6 +2113,164 @@ int dxgvmb_send_wait_sync_object_gpu(struct dxgprocess *process,
|
|
return ret;
|
|
}
|
|
|
|
+int dxgvmb_send_create_hwqueue(struct dxgprocess *process,
|
|
+ struct dxgadapter *adapter,
|
|
+ struct d3dkmt_createhwqueue *args,
|
|
+ struct d3dkmt_createhwqueue *__user inargs,
|
|
+ struct dxghwqueue *hwqueue)
|
|
+{
|
|
+ struct dxgkvmb_command_createhwqueue *command = NULL;
|
|
+ u32 cmd_size = sizeof(struct dxgkvmb_command_createhwqueue);
|
|
+ int ret;
|
|
+ struct dxgvmbusmsg msg = {.hdr = NULL};
|
|
+
|
|
+ if (args->priv_drv_data_size > DXG_MAX_VM_BUS_PACKET_SIZE) {
|
|
+ DXG_ERR("invalid private driver data size: %d",
|
|
+ args->priv_drv_data_size);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ if (args->priv_drv_data_size)
|
|
+ cmd_size += args->priv_drv_data_size - 1;
|
|
+
|
|
+ ret = init_message(&msg, adapter, process, cmd_size);
|
|
+ if (ret)
|
|
+ goto cleanup;
|
|
+ command = (void *)msg.msg;
|
|
+
|
|
+ command_vgpu_to_host_init2(&command->hdr,
|
|
+ DXGK_VMBCOMMAND_CREATEHWQUEUE,
|
|
+ process->host_handle);
|
|
+ command->context = args->context;
|
|
+ command->flags = args->flags;
|
|
+ command->priv_drv_data_size = args->priv_drv_data_size;
|
|
+ if (args->priv_drv_data_size) {
|
|
+ ret = copy_from_user(command->priv_drv_data,
|
|
+ args->priv_drv_data,
|
|
+ args->priv_drv_data_size);
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy private data");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = dxgvmb_send_sync_msg(msg.channel, msg.hdr, msg.size,
|
|
+ command, cmd_size);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ ret = ntstatus2int(command->status);
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("dxgvmb_send_sync_msg failed: %x",
|
|
+ command->status.v);
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = hmgrtable_assign_handle_safe(&process->handle_table, hwqueue,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ command->hwqueue);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ ret = hmgrtable_assign_handle_safe(&process->handle_table,
|
|
+ NULL,
|
|
+ HMGRENTRY_TYPE_MONITOREDFENCE,
|
|
+ command->hwqueue_progress_fence);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ hwqueue->handle = command->hwqueue;
|
|
+ hwqueue->progress_fence_sync_object = command->hwqueue_progress_fence;
|
|
+
|
|
+ hwqueue->progress_fence_mapped_address =
|
|
+ dxg_map_iospace((u64)command->hwqueue_progress_fence_cpuva,
|
|
+ PAGE_SIZE, PROT_READ | PROT_WRITE, true);
|
|
+ if (hwqueue->progress_fence_mapped_address == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = copy_to_user(&inargs->queue, &command->hwqueue,
|
|
+ sizeof(struct d3dkmthandle));
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed to copy hwqueue handle");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = copy_to_user(&inargs->queue_progress_fence,
|
|
+ &command->hwqueue_progress_fence,
|
|
+ sizeof(struct d3dkmthandle));
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed to progress fence");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = copy_to_user(&inargs->queue_progress_fence_cpu_va,
|
|
+ &hwqueue->progress_fence_mapped_address,
|
|
+ sizeof(inargs->queue_progress_fence_cpu_va));
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed to copy fence cpu va");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = copy_to_user(&inargs->queue_progress_fence_gpu_va,
|
|
+ &command->hwqueue_progress_fence_gpuva,
|
|
+ sizeof(u64));
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed to copy fence gpu va");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ if (args->priv_drv_data_size) {
|
|
+ ret = copy_to_user(args->priv_drv_data,
|
|
+ command->priv_drv_data,
|
|
+ args->priv_drv_data_size);
|
|
+ if (ret < 0)
|
|
+ DXG_ERR("failed to copy private data");
|
|
+ }
|
|
+
|
|
+cleanup:
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed %x", ret);
|
|
+ if (hwqueue->handle.v) {
|
|
+ hmgrtable_free_handle_safe(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ hwqueue->handle);
|
|
+ hwqueue->handle.v = 0;
|
|
+ }
|
|
+ if (command && command->hwqueue.v)
|
|
+ dxgvmb_send_destroy_hwqueue(process, adapter,
|
|
+ command->hwqueue);
|
|
+ }
|
|
+ free_message(&msg, process);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int dxgvmb_send_destroy_hwqueue(struct dxgprocess *process,
|
|
+ struct dxgadapter *adapter,
|
|
+ struct d3dkmthandle handle)
|
|
+{
|
|
+ int ret;
|
|
+ struct dxgkvmb_command_destroyhwqueue *command;
|
|
+ struct dxgvmbusmsg msg = {.hdr = NULL};
|
|
+
|
|
+ ret = init_message(&msg, adapter, process, sizeof(*command));
|
|
+ if (ret)
|
|
+ goto cleanup;
|
|
+ command = (void *)msg.msg;
|
|
+
|
|
+ command_vgpu_to_host_init2(&command->hdr,
|
|
+ DXGK_VMBCOMMAND_DESTROYHWQUEUE,
|
|
+ process->host_handle);
|
|
+ command->hwqueue = handle;
|
|
+
|
|
+ ret = dxgvmb_send_sync_msg_ntstatus(msg.channel, msg.hdr, msg.size);
|
|
+
|
|
+cleanup:
|
|
+ free_message(&msg, process);
|
|
+ if (ret)
|
|
+ DXG_TRACE("err: %d", ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
int dxgvmb_send_query_adapter_info(struct dxgprocess *process,
|
|
struct dxgadapter *adapter,
|
|
struct d3dkmt_queryadapterinfo *args)
|
|
diff --git a/drivers/hv/dxgkrnl/dxgvmbus.h b/drivers/hv/dxgkrnl/dxgvmbus.h
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/dxgvmbus.h
|
|
+++ b/drivers/hv/dxgkrnl/dxgvmbus.h
|
|
@@ -172,6 +172,21 @@ struct dxgkvmb_command_signalguestevent {
|
|
bool dereference_event;
|
|
};
|
|
|
|
+struct dxgkvmb_command_opensyncobject {
|
|
+ struct dxgkvmb_command_vm_to_host hdr;
|
|
+ struct d3dkmthandle device;
|
|
+ struct d3dkmthandle global_sync_object;
|
|
+ u32 engine_affinity;
|
|
+ struct d3dddi_synchronizationobject_flags flags;
|
|
+};
|
|
+
|
|
+struct dxgkvmb_command_opensyncobject_return {
|
|
+ struct d3dkmthandle sync_object;
|
|
+ struct ntstatus status;
|
|
+ u64 gpu_virtual_address;
|
|
+ u64 guest_cpu_physical_address;
|
|
+};
|
|
+
|
|
/*
|
|
* The command returns struct d3dkmthandle of a shared object for the
|
|
* given pre-process object
|
|
@@ -508,4 +523,24 @@ struct dxgkvmb_command_waitforsyncobjectfromgpu {
|
|
/* struct d3dkmthandle ObjectHandles[object_count] */
|
|
};
|
|
|
|
+/* Returns the same structure */
|
|
+struct dxgkvmb_command_createhwqueue {
|
|
+ struct dxgkvmb_command_vgpu_to_host hdr;
|
|
+ struct ntstatus status;
|
|
+ struct d3dkmthandle hwqueue;
|
|
+ struct d3dkmthandle hwqueue_progress_fence;
|
|
+ void *hwqueue_progress_fence_cpuva;
|
|
+ u64 hwqueue_progress_fence_gpuva;
|
|
+ struct d3dkmthandle context;
|
|
+ struct d3dddi_createhwqueueflags flags;
|
|
+ u32 priv_drv_data_size;
|
|
+ char priv_drv_data[1];
|
|
+};
|
|
+
|
|
+/* The command returns ntstatus */
|
|
+struct dxgkvmb_command_destroyhwqueue {
|
|
+ struct dxgkvmb_command_vgpu_to_host hdr;
|
|
+ struct d3dkmthandle hwqueue;
|
|
+};
|
|
+
|
|
#endif /* _DXGVMBUS_H */
|
|
diff --git a/drivers/hv/dxgkrnl/ioctl.c b/drivers/hv/dxgkrnl/ioctl.c
|
|
index 111111111111..222222222222 100644
|
|
--- a/drivers/hv/dxgkrnl/ioctl.c
|
|
+++ b/drivers/hv/dxgkrnl/ioctl.c
|
|
@@ -36,6 +36,33 @@ static char *errorstr(int ret)
|
|
}
|
|
#endif
|
|
|
|
+static int dxgsyncobj_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct dxgsharedsyncobject *syncobj = file->private_data;
|
|
+
|
|
+ DXG_TRACE("Release syncobj: %p", syncobj);
|
|
+ mutex_lock(&syncobj->fd_mutex);
|
|
+ kref_get(&syncobj->ssyncobj_kref);
|
|
+ syncobj->host_shared_handle_nt_reference--;
|
|
+ if (syncobj->host_shared_handle_nt_reference == 0) {
|
|
+ if (syncobj->host_shared_handle_nt.v) {
|
|
+ dxgvmb_send_destroy_nt_shared_object(
|
|
+ syncobj->host_shared_handle_nt);
|
|
+ DXG_TRACE("Syncobj host_handle_nt destroyed: %x",
|
|
+ syncobj->host_shared_handle_nt.v);
|
|
+ syncobj->host_shared_handle_nt.v = 0;
|
|
+ }
|
|
+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release);
|
|
+ }
|
|
+ mutex_unlock(&syncobj->fd_mutex);
|
|
+ kref_put(&syncobj->ssyncobj_kref, dxgsharedsyncobj_release);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct file_operations dxg_syncobj_fops = {
|
|
+ .release = dxgsyncobj_release,
|
|
+};
|
|
+
|
|
static int dxgsharedresource_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct dxgsharedresource *resource = file->private_data;
|
|
@@ -833,6 +860,156 @@ dxgkio_destroy_context(struct dxgprocess *process, void *__user inargs)
|
|
return ret;
|
|
}
|
|
|
|
+static int
|
|
+dxgkio_create_hwqueue(struct dxgprocess *process, void *__user inargs)
|
|
+{
|
|
+ struct d3dkmt_createhwqueue args;
|
|
+ struct dxgdevice *device = NULL;
|
|
+ struct dxgcontext *context = NULL;
|
|
+ struct dxgadapter *adapter = NULL;
|
|
+ struct dxghwqueue *hwqueue = NULL;
|
|
+ int ret;
|
|
+ bool device_lock_acquired = false;
|
|
+
|
|
+ ret = copy_from_user(&args, inargs, sizeof(args));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy input args");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The call acquires reference on the device. It is safe to access the
|
|
+ * adapter, because the device holds reference on it.
|
|
+ */
|
|
+ device = dxgprocess_device_by_object_handle(process,
|
|
+ HMGRENTRY_TYPE_DXGCONTEXT,
|
|
+ args.context);
|
|
+ if (device == NULL) {
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgdevice_acquire_lock_shared(device);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ device_lock_acquired = true;
|
|
+
|
|
+ hmgrtable_lock(&process->handle_table, DXGLOCK_SHARED);
|
|
+ context = hmgrtable_get_object_by_type(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_DXGCONTEXT,
|
|
+ args.context);
|
|
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_SHARED);
|
|
+
|
|
+ if (context == NULL) {
|
|
+ DXG_ERR("Invalid context handle %x", args.context.v);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ hwqueue = dxghwqueue_create(context);
|
|
+ if (hwqueue == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ adapter = device->adapter;
|
|
+ ret = dxgadapter_acquire_lock_shared(adapter);
|
|
+ if (ret < 0) {
|
|
+ adapter = NULL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgvmb_send_create_hwqueue(process, adapter, &args,
|
|
+ inargs, hwqueue);
|
|
+
|
|
+cleanup:
|
|
+
|
|
+ if (ret < 0 && hwqueue)
|
|
+ dxghwqueue_destroy(process, hwqueue);
|
|
+
|
|
+ if (adapter)
|
|
+ dxgadapter_release_lock_shared(adapter);
|
|
+
|
|
+ if (device_lock_acquired)
|
|
+ dxgdevice_release_lock_shared(device);
|
|
+
|
|
+ if (device)
|
|
+ kref_put(&device->device_kref, dxgdevice_release);
|
|
+
|
|
+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int dxgkio_destroy_hwqueue(struct dxgprocess *process,
|
|
+ void *__user inargs)
|
|
+{
|
|
+ struct d3dkmt_destroyhwqueue args;
|
|
+ int ret;
|
|
+ struct dxgadapter *adapter = NULL;
|
|
+ struct dxgdevice *device = NULL;
|
|
+ struct dxghwqueue *hwqueue = NULL;
|
|
+ struct d3dkmthandle device_handle = {};
|
|
+
|
|
+ ret = copy_from_user(&args, inargs, sizeof(args));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy input args");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
|
|
+ hwqueue = hmgrtable_get_object_by_type(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ args.queue);
|
|
+ if (hwqueue) {
|
|
+ hmgrtable_free_handle(&process->handle_table,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE, args.queue);
|
|
+ hwqueue->handle.v = 0;
|
|
+ device_handle = hwqueue->device_handle;
|
|
+ }
|
|
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
|
|
+
|
|
+ if (hwqueue == NULL) {
|
|
+ DXG_ERR("invalid hwqueue handle: %x", args.queue.v);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The call acquires reference on the device. It is safe to access the
|
|
+ * adapter, because the device holds reference on it.
|
|
+ */
|
|
+ device = dxgprocess_device_by_handle(process, device_handle);
|
|
+ if (device == NULL) {
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ adapter = device->adapter;
|
|
+ ret = dxgadapter_acquire_lock_shared(adapter);
|
|
+ if (ret < 0) {
|
|
+ adapter = NULL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgvmb_send_destroy_hwqueue(process, adapter, args.queue);
|
|
+
|
|
+ dxghwqueue_destroy(process, hwqueue);
|
|
+
|
|
+cleanup:
|
|
+
|
|
+ if (adapter)
|
|
+ dxgadapter_release_lock_shared(adapter);
|
|
+
|
|
+ if (device)
|
|
+ kref_put(&device->device_kref, dxgdevice_release);
|
|
+
|
|
+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int
|
|
get_standard_alloc_priv_data(struct dxgdevice *device,
|
|
struct d3dkmt_createstandardallocation *alloc_info,
|
|
@@ -1548,6 +1725,164 @@ dxgkio_destroy_allocation(struct dxgprocess *process, void *__user inargs)
|
|
return ret;
|
|
}
|
|
|
|
+static int
|
|
+dxgkio_submit_signal_to_hwqueue(struct dxgprocess *process, void *__user inargs)
|
|
+{
|
|
+ int ret;
|
|
+ struct d3dkmt_submitsignalsyncobjectstohwqueue args;
|
|
+ struct dxgdevice *device = NULL;
|
|
+ struct dxgadapter *adapter = NULL;
|
|
+ struct d3dkmthandle hwqueue = {};
|
|
+
|
|
+ ret = copy_from_user(&args, inargs, sizeof(args));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy input args");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ if (args.hwqueue_count > D3DDDI_MAX_BROADCAST_CONTEXT ||
|
|
+ args.hwqueue_count == 0) {
|
|
+ DXG_ERR("invalid hwqueue count: %d",
|
|
+ args.hwqueue_count);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ if (args.object_count > D3DDDI_MAX_OBJECT_SIGNALED ||
|
|
+ args.object_count == 0) {
|
|
+ DXG_ERR("invalid number of syncobjects: %d",
|
|
+ args.object_count);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = copy_from_user(&hwqueue, args.hwqueues,
|
|
+ sizeof(struct d3dkmthandle));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy hwqueue handle");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ device = dxgprocess_device_by_object_handle(process,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ hwqueue);
|
|
+ if (device == NULL) {
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ adapter = device->adapter;
|
|
+ ret = dxgadapter_acquire_lock_shared(adapter);
|
|
+ if (ret < 0) {
|
|
+ adapter = NULL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgvmb_send_signal_sync_object(process, adapter,
|
|
+ args.flags, 0, zerohandle,
|
|
+ args.object_count, args.objects,
|
|
+ args.hwqueue_count, args.hwqueues,
|
|
+ args.object_count,
|
|
+ args.fence_values, NULL,
|
|
+ zerohandle);
|
|
+
|
|
+cleanup:
|
|
+
|
|
+ if (adapter)
|
|
+ dxgadapter_release_lock_shared(adapter);
|
|
+ if (device)
|
|
+ kref_put(&device->device_kref, dxgdevice_release);
|
|
+
|
|
+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int
|
|
+dxgkio_submit_wait_to_hwqueue(struct dxgprocess *process, void *__user inargs)
|
|
+{
|
|
+ struct d3dkmt_submitwaitforsyncobjectstohwqueue args;
|
|
+ struct dxgdevice *device = NULL;
|
|
+ struct dxgadapter *adapter = NULL;
|
|
+ int ret;
|
|
+ struct d3dkmthandle *objects = NULL;
|
|
+ u32 object_size;
|
|
+ u64 *fences = NULL;
|
|
+
|
|
+ ret = copy_from_user(&args, inargs, sizeof(args));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy input args");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ if (args.object_count > D3DDDI_MAX_OBJECT_WAITED_ON ||
|
|
+ args.object_count == 0) {
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ object_size = sizeof(struct d3dkmthandle) * args.object_count;
|
|
+ objects = vzalloc(object_size);
|
|
+ if (objects == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = copy_from_user(objects, args.objects, object_size);
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy objects");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ object_size = sizeof(u64) * args.object_count;
|
|
+ fences = vzalloc(object_size);
|
|
+ if (fences == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = copy_from_user(fences, args.fence_values, object_size);
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy fence values");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ device = dxgprocess_device_by_object_handle(process,
|
|
+ HMGRENTRY_TYPE_DXGHWQUEUE,
|
|
+ args.hwqueue);
|
|
+ if (device == NULL) {
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ adapter = device->adapter;
|
|
+ ret = dxgadapter_acquire_lock_shared(adapter);
|
|
+ if (ret < 0) {
|
|
+ adapter = NULL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgvmb_send_wait_sync_object_gpu(process, adapter,
|
|
+ args.hwqueue, args.object_count,
|
|
+ objects, fences, false);
|
|
+
|
|
+cleanup:
|
|
+
|
|
+ if (objects)
|
|
+ vfree(objects);
|
|
+ if (fences)
|
|
+ vfree(fences);
|
|
+ if (adapter)
|
|
+ dxgadapter_release_lock_shared(adapter);
|
|
+ if (device)
|
|
+ kref_put(&device->device_kref, dxgdevice_release);
|
|
+
|
|
+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int
|
|
dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
{
|
|
@@ -1558,6 +1893,7 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
struct eventfd_ctx *event = NULL;
|
|
struct dxgsyncobject *syncobj = NULL;
|
|
bool device_lock_acquired = false;
|
|
+ struct dxgsharedsyncobject *syncobjgbl = NULL;
|
|
struct dxghosteventcpu *host_event = NULL;
|
|
|
|
ret = copy_from_user(&args, inargs, sizeof(args));
|
|
@@ -1618,6 +1954,22 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
if (ret < 0)
|
|
goto cleanup;
|
|
|
|
+ if (args.info.flags.shared) {
|
|
+ if (args.info.shared_handle.v == 0) {
|
|
+ DXG_ERR("shared handle should not be 0");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ syncobjgbl = dxgsharedsyncobj_create(device->adapter, syncobj);
|
|
+ if (syncobjgbl == NULL) {
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ dxgsharedsyncobj_add_syncobj(syncobjgbl, syncobj);
|
|
+
|
|
+ syncobjgbl->host_shared_handle = args.info.shared_handle;
|
|
+ }
|
|
+
|
|
ret = copy_to_user(inargs, &args, sizeof(args));
|
|
if (ret) {
|
|
DXG_ERR("failed to copy output args");
|
|
@@ -1646,6 +1998,8 @@ dxgkio_create_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
if (event)
|
|
eventfd_ctx_put(event);
|
|
}
|
|
+ if (syncobjgbl)
|
|
+ kref_put(&syncobjgbl->ssyncobj_kref, dxgsharedsyncobj_release);
|
|
if (adapter)
|
|
dxgadapter_release_lock_shared(adapter);
|
|
if (device_lock_acquired)
|
|
@@ -1700,6 +2054,140 @@ dxgkio_destroy_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
return ret;
|
|
}
|
|
|
|
+static int
|
|
+dxgkio_open_sync_object_nt(struct dxgprocess *process, void *__user inargs)
|
|
+{
|
|
+ struct d3dkmt_opensyncobjectfromnthandle2 args;
|
|
+ struct dxgsyncobject *syncobj = NULL;
|
|
+ struct dxgsharedsyncobject *syncobj_fd = NULL;
|
|
+ struct file *file = NULL;
|
|
+ struct dxgdevice *device = NULL;
|
|
+ struct dxgadapter *adapter = NULL;
|
|
+ struct d3dddi_synchronizationobject_flags flags = { };
|
|
+ int ret;
|
|
+ bool device_lock_acquired = false;
|
|
+ struct dxgglobal *dxgglobal = dxggbl();
|
|
+
|
|
+ ret = copy_from_user(&args, inargs, sizeof(args));
|
|
+ if (ret) {
|
|
+ DXG_ERR("failed to copy input args");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ args.sync_object.v = 0;
|
|
+
|
|
+ if (args.device.v) {
|
|
+ device = dxgprocess_device_by_handle(process, args.device);
|
|
+ if (device == NULL) {
|
|
+ return -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+ } else {
|
|
+ DXG_ERR("device handle is missing");
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ ret = dxgdevice_acquire_lock_shared(device);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ device_lock_acquired = true;
|
|
+
|
|
+ adapter = device->adapter;
|
|
+ ret = dxgadapter_acquire_lock_shared(adapter);
|
|
+ if (ret < 0) {
|
|
+ adapter = NULL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ file = fget(args.nt_handle);
|
|
+ if (!file) {
|
|
+ DXG_ERR("failed to get file from handle: %llx",
|
|
+ args.nt_handle);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ if (file->f_op != &dxg_syncobj_fops) {
|
|
+ DXG_ERR("invalid fd: %llx", args.nt_handle);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ syncobj_fd = file->private_data;
|
|
+ if (syncobj_fd == NULL) {
|
|
+ DXG_ERR("invalid private data: %llx", args.nt_handle);
|
|
+ ret = -EINVAL;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ flags.shared = 1;
|
|
+ flags.nt_security_sharing = 1;
|
|
+ syncobj = dxgsyncobject_create(process, device, adapter,
|
|
+ syncobj_fd->type, flags);
|
|
+ if (syncobj == NULL) {
|
|
+ DXG_ERR("failed to create sync object");
|
|
+ ret = -ENOMEM;
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ dxgsharedsyncobj_add_syncobj(syncobj_fd, syncobj);
|
|
+
|
|
+ ret = dxgvmb_send_open_sync_object_nt(process, &dxgglobal->channel,
|
|
+ &args, syncobj);
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("failed to open sync object on host: %x",
|
|
+ syncobj_fd->host_shared_handle.v);
|
|
+ goto cleanup;
|
|
+ }
|
|
+
|
|
+ hmgrtable_lock(&process->handle_table, DXGLOCK_EXCL);
|
|
+ ret = hmgrtable_assign_handle(&process->handle_table, syncobj,
|
|
+ HMGRENTRY_TYPE_DXGSYNCOBJECT,
|
|
+ args.sync_object);
|
|
+ if (ret >= 0) {
|
|
+ syncobj->handle = args.sync_object;
|
|
+ kref_get(&syncobj->syncobj_kref);
|
|
+ }
|
|
+ hmgrtable_unlock(&process->handle_table, DXGLOCK_EXCL);
|
|
+
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+
|
|
+ ret = copy_to_user(inargs, &args, sizeof(args));
|
|
+ if (ret == 0)
|
|
+ goto success;
|
|
+ DXG_ERR("failed to copy output args");
|
|
+
|
|
+cleanup:
|
|
+
|
|
+ if (syncobj) {
|
|
+ dxgsyncobject_destroy(process, syncobj);
|
|
+ syncobj = NULL;
|
|
+ }
|
|
+
|
|
+ if (args.sync_object.v)
|
|
+ dxgvmb_send_destroy_sync_object(process, args.sync_object);
|
|
+
|
|
+success:
|
|
+
|
|
+ if (file)
|
|
+ fput(file);
|
|
+ if (syncobj)
|
|
+ kref_put(&syncobj->syncobj_kref, dxgsyncobject_release);
|
|
+ if (adapter)
|
|
+ dxgadapter_release_lock_shared(adapter);
|
|
+ if (device_lock_acquired)
|
|
+ dxgdevice_release_lock_shared(device);
|
|
+ if (device)
|
|
+ kref_put(&device->device_kref, dxgdevice_release);
|
|
+
|
|
+ DXG_TRACE("ioctl:%s %d", errorstr(ret), ret);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int
|
|
dxgkio_signal_sync_object(struct dxgprocess *process, void *__user inargs)
|
|
{
|
|
@@ -2353,6 +2841,30 @@ dxgkio_wait_sync_object_gpu(struct dxgprocess *process, void *__user inargs)
|
|
return ret;
|
|
}
|
|
|
|
+static int
|
|
+dxgsharedsyncobj_get_host_nt_handle(struct dxgsharedsyncobject *syncobj,
|
|
+ struct dxgprocess *process,
|
|
+ struct d3dkmthandle objecthandle)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&syncobj->fd_mutex);
|
|
+ if (syncobj->host_shared_handle_nt_reference == 0) {
|
|
+ ret = dxgvmb_send_create_nt_shared_object(process,
|
|
+ objecthandle,
|
|
+ &syncobj->host_shared_handle_nt);
|
|
+ if (ret < 0)
|
|
+ goto cleanup;
|
|
+ DXG_TRACE("Host_shared_handle_ht: %x",
|
|
+ syncobj->host_shared_handle_nt.v);
|
|
+ kref_get(&syncobj->ssyncobj_kref);
|
|
+ }
|
|
+ syncobj->host_shared_handle_nt_reference++;
|
|
+cleanup:
|
|
+ mutex_unlock(&syncobj->fd_mutex);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int
|
|
dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource,
|
|
struct dxgprocess *process,
|
|
@@ -2378,6 +2890,7 @@ dxgsharedresource_get_host_nt_handle(struct dxgsharedresource *resource,
|
|
}
|
|
|
|
enum dxg_sharedobject_type {
|
|
+ DXG_SHARED_SYNCOBJECT,
|
|
DXG_SHARED_RESOURCE
|
|
};
|
|
|
|
@@ -2394,6 +2907,10 @@ static int get_object_fd(enum dxg_sharedobject_type type,
|
|
}
|
|
|
|
switch (type) {
|
|
+ case DXG_SHARED_SYNCOBJECT:
|
|
+ file = anon_inode_getfile("dxgsyncobj",
|
|
+ &dxg_syncobj_fops, object, 0);
|
|
+ break;
|
|
case DXG_SHARED_RESOURCE:
|
|
file = anon_inode_getfile("dxgresource",
|
|
&dxg_resource_fops, object, 0);
|
|
@@ -2419,6 +2936,7 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs)
|
|
enum hmgrentry_type object_type;
|
|
struct dxgsyncobject *syncobj = NULL;
|
|
struct dxgresource *resource = NULL;
|
|
+ struct dxgsharedsyncobject *shared_syncobj = NULL;
|
|
struct dxgsharedresource *shared_resource = NULL;
|
|
struct d3dkmthandle *handles = NULL;
|
|
int object_fd = -1;
|
|
@@ -2465,6 +2983,17 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs)
|
|
ret = -EINVAL;
|
|
} else {
|
|
switch (object_type) {
|
|
+ case HMGRENTRY_TYPE_DXGSYNCOBJECT:
|
|
+ syncobj = obj;
|
|
+ if (syncobj->shared) {
|
|
+ kref_get(&syncobj->syncobj_kref);
|
|
+ shared_syncobj = syncobj->shared_owner;
|
|
+ } else {
|
|
+ DXG_ERR("sync object is not shared");
|
|
+ syncobj = NULL;
|
|
+ ret = -EINVAL;
|
|
+ }
|
|
+ break;
|
|
case HMGRENTRY_TYPE_DXGRESOURCE:
|
|
resource = obj;
|
|
if (resource->shared_owner) {
|
|
@@ -2488,6 +3017,21 @@ dxgkio_share_objects(struct dxgprocess *process, void *__user inargs)
|
|
goto cleanup;
|
|
|
|
switch (object_type) {
|
|
+ case HMGRENTRY_TYPE_DXGSYNCOBJECT:
|
|
+ ret = get_object_fd(DXG_SHARED_SYNCOBJECT, shared_syncobj,
|
|
+ &object_fd);
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("get_object_fd failed for sync object");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ ret = dxgsharedsyncobj_get_host_nt_handle(shared_syncobj,
|
|
+ process,
|
|
+ handles[0]);
|
|
+ if (ret < 0) {
|
|
+ DXG_ERR("get_host_nt_handle failed");
|
|
+ goto cleanup;
|
|
+ }
|
|
+ break;
|
|
case HMGRENTRY_TYPE_DXGRESOURCE:
|
|
ret = get_object_fd(DXG_SHARED_RESOURCE, shared_resource,
|
|
&object_fd);
|
|
@@ -2954,10 +3498,10 @@ static struct ioctl_desc ioctls[] = {
|
|
/* 0x15 */ {dxgkio_close_adapter, LX_DXCLOSEADAPTER},
|
|
/* 0x16 */ {},
|
|
/* 0x17 */ {},
|
|
-/* 0x18 */ {},
|
|
+/* 0x18 */ {dxgkio_create_hwqueue, LX_DXCREATEHWQUEUE},
|
|
/* 0x19 */ {dxgkio_destroy_device, LX_DXDESTROYDEVICE},
|
|
/* 0x1a */ {},
|
|
-/* 0x1b */ {},
|
|
+/* 0x1b */ {dxgkio_destroy_hwqueue, LX_DXDESTROYHWQUEUE},
|
|
/* 0x1c */ {},
|
|
/* 0x1d */ {dxgkio_destroy_sync_object, LX_DXDESTROYSYNCHRONIZATIONOBJECT},
|
|
/* 0x1e */ {},
|
|
@@ -2986,8 +3530,10 @@ static struct ioctl_desc ioctls[] = {
|
|
/* 0x33 */ {dxgkio_signal_sync_object_gpu2,
|
|
LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2},
|
|
/* 0x34 */ {},
|
|
-/* 0x35 */ {},
|
|
-/* 0x36 */ {},
|
|
+/* 0x35 */ {dxgkio_submit_signal_to_hwqueue,
|
|
+ LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE},
|
|
+/* 0x36 */ {dxgkio_submit_wait_to_hwqueue,
|
|
+ LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE},
|
|
/* 0x37 */ {},
|
|
/* 0x38 */ {},
|
|
/* 0x39 */ {},
|
|
@@ -2999,7 +3545,7 @@ static struct ioctl_desc ioctls[] = {
|
|
/* 0x3d */ {},
|
|
/* 0x3e */ {dxgkio_enum_adapters3, LX_DXENUMADAPTERS3},
|
|
/* 0x3f */ {dxgkio_share_objects, LX_DXSHAREOBJECTS},
|
|
-/* 0x40 */ {},
|
|
+/* 0x40 */ {dxgkio_open_sync_object_nt, LX_DXOPENSYNCOBJECTFROMNTHANDLE2},
|
|
/* 0x41 */ {dxgkio_query_resource_info_nt,
|
|
LX_DXQUERYRESOURCEINFOFROMNTHANDLE},
|
|
/* 0x42 */ {dxgkio_open_resource_nt, LX_DXOPENRESOURCEFROMNTHANDLE},
|
|
diff --git a/include/uapi/misc/d3dkmthk.h b/include/uapi/misc/d3dkmthk.h
|
|
index 111111111111..222222222222 100644
|
|
--- a/include/uapi/misc/d3dkmthk.h
|
|
+++ b/include/uapi/misc/d3dkmthk.h
|
|
@@ -201,6 +201,16 @@ struct d3dkmt_createcontextvirtual {
|
|
struct d3dkmthandle context;
|
|
};
|
|
|
|
+struct d3dddi_createhwqueueflags {
|
|
+ union {
|
|
+ struct {
|
|
+ __u32 disable_gpu_timeout:1;
|
|
+ __u32 reserved:31;
|
|
+ };
|
|
+ __u32 value;
|
|
+ };
|
|
+};
|
|
+
|
|
enum d3dkmdt_gdisurfacetype {
|
|
_D3DKMDT_GDISURFACE_INVALID = 0,
|
|
_D3DKMDT_GDISURFACE_TEXTURE = 1,
|
|
@@ -694,6 +704,81 @@ struct d3dddi_openallocationinfo2 {
|
|
__u64 reserved[6];
|
|
};
|
|
|
|
+struct d3dkmt_createhwqueue {
|
|
+ struct d3dkmthandle context;
|
|
+ struct d3dddi_createhwqueueflags flags;
|
|
+ __u32 priv_drv_data_size;
|
|
+ __u32 reserved;
|
|
+#ifdef __KERNEL__
|
|
+ void *priv_drv_data;
|
|
+#else
|
|
+ __u64 priv_drv_data;
|
|
+#endif
|
|
+ struct d3dkmthandle queue;
|
|
+ struct d3dkmthandle queue_progress_fence;
|
|
+#ifdef __KERNEL__
|
|
+ void *queue_progress_fence_cpu_va;
|
|
+#else
|
|
+ __u64 queue_progress_fence_cpu_va;
|
|
+#endif
|
|
+ __u64 queue_progress_fence_gpu_va;
|
|
+};
|
|
+
|
|
+struct d3dkmt_destroyhwqueue {
|
|
+ struct d3dkmthandle queue;
|
|
+};
|
|
+
|
|
+struct d3dkmt_submitwaitforsyncobjectstohwqueue {
|
|
+ struct d3dkmthandle hwqueue;
|
|
+ __u32 object_count;
|
|
+#ifdef __KERNEL__
|
|
+ struct d3dkmthandle *objects;
|
|
+ __u64 *fence_values;
|
|
+#else
|
|
+ __u64 objects;
|
|
+ __u64 fence_values;
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct d3dkmt_submitsignalsyncobjectstohwqueue {
|
|
+ struct d3dddicb_signalflags flags;
|
|
+ __u32 hwqueue_count;
|
|
+#ifdef __KERNEL__
|
|
+ struct d3dkmthandle *hwqueues;
|
|
+#else
|
|
+ __u64 hwqueues;
|
|
+#endif
|
|
+ __u32 object_count;
|
|
+ __u32 reserved;
|
|
+#ifdef __KERNEL__
|
|
+ struct d3dkmthandle *objects;
|
|
+ __u64 *fence_values;
|
|
+#else
|
|
+ __u64 objects;
|
|
+ __u64 fence_values;
|
|
+#endif
|
|
+};
|
|
+
|
|
+struct d3dkmt_opensyncobjectfromnthandle2 {
|
|
+ __u64 nt_handle;
|
|
+ struct d3dkmthandle device;
|
|
+ struct d3dddi_synchronizationobject_flags flags;
|
|
+ struct d3dkmthandle sync_object;
|
|
+ __u32 reserved1;
|
|
+ union {
|
|
+ struct {
|
|
+#ifdef __KERNEL__
|
|
+ void *fence_value_cpu_va;
|
|
+#else
|
|
+ __u64 fence_value_cpu_va;
|
|
+#endif
|
|
+ __u64 fence_value_gpu_va;
|
|
+ __u32 engine_affinity;
|
|
+ } monitored_fence;
|
|
+ __u64 reserved[8];
|
|
+ };
|
|
+};
|
|
+
|
|
struct d3dkmt_openresourcefromnthandle {
|
|
struct d3dkmthandle device;
|
|
__u32 reserved;
|
|
@@ -819,6 +904,10 @@ struct d3dkmt_enumadapters3 {
|
|
_IOWR(0x47, 0x14, struct d3dkmt_enumadapters2)
|
|
#define LX_DXCLOSEADAPTER \
|
|
_IOWR(0x47, 0x15, struct d3dkmt_closeadapter)
|
|
+#define LX_DXCREATEHWQUEUE \
|
|
+ _IOWR(0x47, 0x18, struct d3dkmt_createhwqueue)
|
|
+#define LX_DXDESTROYHWQUEUE \
|
|
+ _IOWR(0x47, 0x1b, struct d3dkmt_destroyhwqueue)
|
|
#define LX_DXDESTROYDEVICE \
|
|
_IOWR(0x47, 0x19, struct d3dkmt_destroydevice)
|
|
#define LX_DXDESTROYSYNCHRONIZATIONOBJECT \
|
|
@@ -829,6 +918,10 @@ struct d3dkmt_enumadapters3 {
|
|
_IOWR(0x47, 0x32, struct d3dkmt_signalsynchronizationobjectfromgpu)
|
|
#define LX_DXSIGNALSYNCHRONIZATIONOBJECTFROMGPU2 \
|
|
_IOWR(0x47, 0x33, struct d3dkmt_signalsynchronizationobjectfromgpu2)
|
|
+#define LX_DXSUBMITSIGNALSYNCOBJECTSTOHWQUEUE \
|
|
+ _IOWR(0x47, 0x35, struct d3dkmt_submitsignalsyncobjectstohwqueue)
|
|
+#define LX_DXSUBMITWAITFORSYNCOBJECTSTOHWQUEUE \
|
|
+ _IOWR(0x47, 0x36, struct d3dkmt_submitwaitforsyncobjectstohwqueue)
|
|
#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMCPU \
|
|
_IOWR(0x47, 0x3a, struct d3dkmt_waitforsynchronizationobjectfromcpu)
|
|
#define LX_DXWAITFORSYNCHRONIZATIONOBJECTFROMGPU \
|
|
--
|
|
Armbian
|
|
|