intel: prepare for primary node support
We will be able to support primary nodes once we know how to talk to an X
server.
diff --git a/icd/intel/dev.c b/icd/intel/dev.c
index 428c90c..410832a 100644
--- a/icd/intel/dev.c
+++ b/icd/intel/dev.c
@@ -76,7 +76,7 @@
if (info->extensionCount)
return XGL_ERROR_INVALID_EXTENSION;
- if (gpu->fd >= 0)
+ if (gpu->device_fd >= 0)
return XGL_ERROR_DEVICE_ALREADY_CREATED;
dev = (struct intel_dev *) intel_base_create(NULL, sizeof(*dev),
@@ -93,10 +93,10 @@
return ret;
}
- dev->winsys = intel_winsys_create_for_fd(gpu->fd);
+ dev->winsys = intel_winsys_create_for_fd(gpu->device_fd);
if (!dev->winsys) {
icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE,
- 0, 0, "failed to create device winsys for %s", gpu->path);
+ 0, 0, "failed to create device winsys");
intel_dev_destroy(dev);
return XGL_ERROR_UNKNOWN;
}
@@ -153,7 +153,7 @@
if (dev->winsys)
intel_winsys_destroy(dev->winsys);
- if (dev->gpu->fd >= 0)
+ if (dev->gpu->device_fd >= 0)
intel_gpu_close(dev->gpu);
intel_base_destroy(&dev->base);
diff --git a/icd/intel/gpu.c b/icd/intel/gpu.c
index a840a9d..93333e9 100644
--- a/icd/intel/gpu.c
+++ b/icd/intel/gpu.c
@@ -36,6 +36,41 @@
#include "queue.h"
#include "gpu.h"
+static int gpu_open_primary_node(struct intel_gpu *gpu)
+{
+ /* cannot not open gpu->primary_node directly */
+ return gpu->primary_fd_internal;
+}
+
+static void gpu_close_primary_node(struct intel_gpu *gpu)
+{
+ if (gpu->primary_fd_internal >= 0) {
+ close(gpu->primary_fd_internal);
+ gpu->primary_fd_internal = -1;
+ }
+}
+
+static int gpu_open_render_node(struct intel_gpu *gpu)
+{
+ if (gpu->render_fd_internal < 0 && gpu->render_node) {
+ gpu->render_fd_internal = open(gpu->render_node, O_RDWR);
+ if (gpu->render_fd_internal < 0) {
+ icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, NULL, 0,
+ 0, "failed to open %s", gpu->render_node);
+ }
+ }
+
+ return gpu->render_fd_internal;
+}
+
+static void gpu_close_render_node(struct intel_gpu *gpu)
+{
+ if (gpu->render_fd_internal >= 0) {
+ close(gpu->render_fd_internal);
+ gpu->render_fd_internal = -1;
+ }
+}
+
static const char *gpu_get_name(const struct intel_gpu *gpu)
{
const char *name = NULL;
@@ -71,31 +106,12 @@
return name;
}
-static int gpu_open_internal(struct intel_gpu *gpu)
-{
- if (gpu->fd_internal < 0) {
- gpu->fd_internal = open(gpu->path, O_RDWR);
- if (gpu->fd_internal < 0) {
- icd_log(XGL_DBG_MSG_ERROR, XGL_VALIDATION_LEVEL_0, NULL, 0,
- 0, "failed to open %s", gpu->path);
- }
- }
-
- return gpu->fd_internal;
-}
-
-static void gpu_close_internal(struct intel_gpu *gpu)
-{
- if (gpu->fd_internal >= 0) {
- close(gpu->fd_internal);
- gpu->fd_internal = -1;
- }
-}
-
-static struct intel_gpu *gpu_create(int gen, int devid, const char *path)
+static struct intel_gpu *gpu_create(int gen, int devid,
+ const char *primary_node,
+ const char *render_node)
{
struct intel_gpu *gpu;
- size_t path_len;
+ size_t primary_len, render_len;
gpu = icd_alloc(sizeof(*gpu), 0, XGL_SYSTEM_ALLOC_API_OBJECT);
if (!gpu)
@@ -108,13 +124,22 @@
gpu->devid = devid;
- path_len = strlen(path);
- gpu->path = icd_alloc(path_len + 1, 0, XGL_SYSTEM_ALLOC_INTERNAL);
- if (!gpu->path) {
+ primary_len = strlen(primary_node);
+ render_len = (render_node) ? strlen(render_node) : 0;
+
+ gpu->primary_node = icd_alloc(primary_len + 1 +
+ ((render_len) ? (render_len + 1) : 0), 0, XGL_SYSTEM_ALLOC_INTERNAL);
+ if (!gpu->primary_node) {
icd_free(gpu);
return NULL;
}
- memcpy(gpu->path, path, path_len + 1);
+
+ memcpy(gpu->primary_node, primary_node, primary_len + 1);
+
+ if (render_node) {
+ gpu->render_node = gpu->primary_node + primary_len + 1;
+ memcpy(gpu->render_node, render_node, render_len + 1);
+ }
gpu->gen_opaque = gen;
@@ -137,16 +162,18 @@
gpu->batch_buffer_reloc_count =
gpu->max_batch_buffer_size / sizeof(uint32_t) / 2 - 2;
- gpu->fd_internal = -1;
- gpu->fd = -1;
+ gpu->primary_fd_internal = -1;
+ gpu->render_fd_internal = -1;
+
+ gpu->device_fd = -1;
return gpu;
}
static void gpu_destroy(struct intel_gpu *gpu)
{
- gpu_close_internal(gpu);
- icd_free(gpu->path);
+ intel_gpu_close(gpu);
+ icd_free(gpu->primary_node);
icd_free(gpu);
}
@@ -189,8 +216,8 @@
return gen;
}
-XGL_RESULT intel_gpu_add(int devid, const char *path,
- struct intel_gpu **gpu_ret)
+XGL_RESULT intel_gpu_add(int devid, const char *primary_node,
+ const char *render_node, struct intel_gpu **gpu_ret)
{
const int gen = devid_to_gen(devid);
struct intel_gpu *gpu;
@@ -201,7 +228,7 @@
return XGL_ERROR_INITIALIZATION_FAILED;
}
- gpu = gpu_create(gen, devid, path);
+ gpu = gpu_create(gen, devid, primary_node, render_node);
if (!gpu)
return XGL_ERROR_OUT_OF_MEMORY;
@@ -318,15 +345,18 @@
XGL_RESULT intel_gpu_open(struct intel_gpu *gpu)
{
- gpu->fd = gpu_open_internal(gpu);
+ gpu->device_fd = gpu_open_primary_node(gpu);
+ if (gpu->device_fd < 0)
+ gpu->device_fd = gpu_open_render_node(gpu);
- return (gpu->fd >= 0) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
+ return (gpu->device_fd >= 0) ? XGL_SUCCESS : XGL_ERROR_UNKNOWN;
}
void intel_gpu_close(struct intel_gpu *gpu)
{
- gpu->fd = -1;
- gpu_close_internal(gpu);
+ gpu_close_primary_node(gpu);
+ gpu_close_render_node(gpu);
+ gpu->device_fd = -1;
}
bool intel_gpu_has_extension(const struct intel_gpu *gpu, const char *ext)
diff --git a/icd/intel/gpu.h b/icd/intel/gpu.h
index acccfc5..42568f5 100644
--- a/icd/intel/gpu.h
+++ b/icd/intel/gpu.h
@@ -50,20 +50,23 @@
struct intel_gpu *next;
int devid; /* PCI device ID */
- char *path; /* path to the render or legacy node, or NULL */
- int gen_opaque; /* always read with intel_gpu_gen() */
+ char *primary_node; /* path to the primary node */
+ char *render_node; /* path to the render node */
+ int gen_opaque; /* always read this with intel_gpu_gen() */
int gt;
XGL_GPU_SIZE max_batch_buffer_size;
XGL_UINT batch_buffer_reloc_count;
/*
- * The enabled hardware features could be limited by the kernel. This
- * mutable internal fd allows us to talk to the kernel when we need to.
+ * The enabled hardware features could be limited by the kernel. These
+ * mutable fds allows us to talk to the kernel before the device is
+ * created.
*/
- int fd_internal;
+ int primary_fd_internal;
+ int render_fd_internal;
- int fd;
+ int device_fd;
};
static inline struct intel_gpu *intel_gpu(XGL_PHYSICAL_GPU gpu)
@@ -82,8 +85,8 @@
bool intel_gpu_is_valid(const struct intel_gpu *gpu);
-XGL_RESULT intel_gpu_add(int devid, const char *path,
- struct intel_gpu **gpu_ret);
+XGL_RESULT intel_gpu_add(int devid, const char *primary_node,
+ const char *render_node, struct intel_gpu **gpu_ret);
void intel_gpu_remove_all(void);
struct intel_gpu *intel_gpu_get_list(void);
diff --git a/icd/intel/intel.c b/icd/intel/intel.c
index 2f65585..c248460 100644
--- a/icd/intel/intel.c
+++ b/icd/intel/intel.c
@@ -102,14 +102,15 @@
count = 0;
dev = devices;
while (dev) {
- const char *devnode;
+ const char *primary_node, *render_node;
struct intel_gpu *gpu;
- devnode = icd_drm_get_devnode(dev, ICD_DRM_MINOR_RENDER);
- if (!devnode)
+ primary_node = icd_drm_get_devnode(dev, ICD_DRM_MINOR_LEGACY);
+ render_node = icd_drm_get_devnode(dev, ICD_DRM_MINOR_RENDER);
+ if (!primary_node || !render_node)
continue;
- ret = intel_gpu_add(dev->devid, devnode, &gpu);
+ ret = intel_gpu_add(dev->devid, primary_node, render_node, &gpu);
if (ret == XGL_SUCCESS) {
pGpus[count++] = (XGL_PHYSICAL_GPU) gpu;
if (count >= maxGpus)