amdgpu: add flag to support 32bit VA address v4

The AMDGPU_VA_RANGE_32_BIT flag is added to request VA range in the
32bit address space for amdgpu_va_range_alloc.

The 32bit address space is reserved at initialization time, and managed
with a separate VAMGR as part of the global VAMGR. And if no enough VA
space available in range above 4GB, this reserved range can be used as
fallback.

v2: add comment for AMDGPU_VA_RANGE_32_BIT, and add vamgr to va_range
v3: rebase to Emil's drm_private series
v4: fix one warning

Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index a3eea84..e44d802 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1075,6 +1075,11 @@
 			     uint32_t *values);
 
 /**
+ * Flag to request VA address range in the 32bit address space
+*/
+#define AMDGPU_VA_RANGE_32_BIT		0x1
+
+/**
  * Allocate virtual address range
  *
  * \param dev - [in] Device handle. See #amdgpu_device_initialize()
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index c6bbae8..e16cd24 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -43,6 +43,7 @@
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
 #include "util_hash_table.h"
+#include "util_math.h"
 
 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
 #define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -173,6 +174,7 @@
 	int flag_auth = 0;
 	int flag_authexist=0;
 	uint32_t accel_working = 0;
+	uint64_t start, max;
 
 	*device_handle = NULL;
 
@@ -251,6 +253,19 @@
 
 	dev->vamgr = amdgpu_vamgr_get_global(dev);
 
+	max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
+	start = amdgpu_vamgr_find_va(dev->vamgr,
+				     max - dev->dev_info.virtual_address_offset,
+				     dev->dev_info.virtual_address_alignment, 0);
+	if (start > 0xffffffff)
+		goto free_va; /* shouldn't get here */
+
+	dev->vamgr_32 =  calloc(1, sizeof(struct amdgpu_bo_va_mgr));
+	if (dev->vamgr_32 == NULL)
+		goto free_va;
+	amdgpu_vamgr_init(dev->vamgr_32, start, max,
+			  dev->dev_info.virtual_address_alignment);
+
 	*major_version = dev->major_version;
 	*minor_version = dev->minor_version;
 	*device_handle = dev;
@@ -259,6 +274,11 @@
 
 	return 0;
 
+free_va:
+	r = -ENOMEM;
+	amdgpu_vamgr_free_va(dev->vamgr, start,
+			     max - dev->dev_info.virtual_address_offset);
+
 cleanup:
 	if (dev->fd >= 0)
 		close(dev->fd);
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 4b07aff..3ce0969 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -66,6 +66,7 @@
 	uint64_t address;
 	uint64_t size;
 	enum amdgpu_gpu_va_range range;
+	struct amdgpu_bo_va_mgr *vamgr;
 };
 
 struct amdgpu_device {
@@ -83,7 +84,10 @@
 	pthread_mutex_t bo_table_mutex;
 	struct drm_amdgpu_info_device dev_info;
 	struct amdgpu_gpu_info info;
+	/** The global VA manager for the whole virtual address space */
 	struct amdgpu_bo_va_mgr *vamgr;
+	/** The VA manager for the 32bit address space */
+	struct amdgpu_bo_va_mgr *vamgr_32;
 };
 
 struct amdgpu_bo {
@@ -128,6 +132,11 @@
 amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
 		       struct amdgpu_bo_va_mgr *src);
 
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+		       uint64_t max, uint64_t alignment);
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
+
 drm_private uint64_t
 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
 		     uint64_t alignment, uint64_t base_required);
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index eef8a71..507a73a 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -46,7 +46,7 @@
 	return -EINVAL;
 }
 
-static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
 			      uint64_t max, uint64_t alignment)
 {
 	mgr->va_offset = start;
@@ -57,7 +57,7 @@
 	pthread_mutex_init(&mgr->bo_va_mutex, NULL);
 }
 
-static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
 {
 	struct amdgpu_bo_va_hole *hole;
 	LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
@@ -255,23 +255,39 @@
 			  amdgpu_va_handle *va_range_handle,
 			  uint64_t flags)
 {
-	va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment);
-	size = ALIGN(size, vamgr.va_alignment);
+	struct amdgpu_bo_va_mgr *vamgr;
 
-	*va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size,
+	if (flags & AMDGPU_VA_RANGE_32_BIT)
+		vamgr = dev->vamgr_32;
+	else
+		vamgr = dev->vamgr;
+
+	va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+	size = ALIGN(size, vamgr->va_alignment);
+
+	*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
 					va_base_alignment, va_base_required);
 
+	if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
+	    (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+		/* fallback to 32bit address */
+		vamgr = dev->vamgr_32;
+		*va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+					va_base_alignment, va_base_required);
+	}
+
 	if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
 		struct amdgpu_va* va;
 		va = calloc(1, sizeof(struct amdgpu_va));
 		if(!va){
-			amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size);
+			amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
 			return -ENOMEM;
 		}
 		va->dev = dev;
 		va->address = *va_base_allocated;
 		va->size = size;
 		va->range = va_range_type;
+		va->vamgr = vamgr;
 		*va_range_handle = va;
 	} else {
 		return -EINVAL;
@@ -284,7 +300,9 @@
 {
 	if(!va_range_handle || !va_range_handle->address)
 		return 0;
-	amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address,
+
+	amdgpu_vamgr_free_va(va_range_handle->vamgr,
+			va_range_handle->address,
 			va_range_handle->size);
 	free(va_range_handle);
 	return 0;