drm/radeon: GPU virtual memory support v22

Virtual address space are per drm client (opener of /dev/drm).
Client are in charge of virtual address space, they need to
map bo into it by calling DRM_RADEON_GEM_VA ioctl.

First 16M of virtual address space is reserved by the kernel.

Once using 2 level page table we should be able to have a small
vram memory footprint for each pt (there would be one pt for all
gart, one for all vram and then one first level for each virtual
address space).

Plan include using the sub allocator for a common vm page table
area and using memcpy to copy vm page table in & out. Or use
a gart object and copy things in & out using dma.

v2: agd5f fixes:
- Add vram base offset for vram pages.  The GPU physical address of a
vram page is FB_OFFSET + page offset.  FB_OFFSET is 0 on discrete
cards and the physical bus address of the stolen memory on
integrated chips.
- VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR covers all vmid's >= 1

v3: agd5f:
- integrate with the semaphore/multi-ring stuff

v4:
- rebase on top ttm dma & multi-ring stuff
- userspace is now in charge of the address space
- no more specific cs vm ioctl, instead cs ioctl has a new
  chunk

v5:
- properly handle mem == NULL case from move_notify callback
- fix the vm cleanup path

v6:
- fix update of page table to only happen on valid mem placement

v7:
- add tlb flush for each vm context
- add flags to define mapping property (readable, writeable, snooped)
- make ring id implicit from ib->fence->ring, up to each asic callback
  to then do ring specific scheduling if vm ib scheduling function

v8:
- add query for ib limit and kernel reserved virtual space
- rename vm->size to max_pfn (maximum number of page)
- update gem_va ioctl to also allow unmap operation
- bump kernel version to allow userspace to query for vm support

v9:
- rebuild page table only when bind and incrementaly depending
  on bo referenced by cs and that have been moved
- allow virtual address space to grow
- use sa allocator for vram page table
- return invalid when querying vm limit on non cayman GPU
- dump vm fault register on lockup

v10: agd5f:
- Move the vm schedule_ib callback to a standalone function, remove
  the callback and use the existing ib_execute callback for VM IBs.

v11:
- rebase on top of lastest Linus

v12: agd5f:
- remove spurious backslash
- set IB vm_id to 0 in radeon_ib_get()

v13: agd5f:
- fix handling of RADEON_CHUNK_ID_FLAGS

v14:
- fix va destruction
- fix suspend resume
- forbid bo to have several different va in same vm

v15:
- rebase

v16:
- cleanup left over of vm init/fini

v17: agd5f:
- cs checker

v18: agd5f:
- reworks the CS ioctl to better support multiple rings and
VM.  Rather than adding a new chunk id for VM, just re-use the
IB chunk id and add a new flags for VM mode.  Also define additional
dwords for the flags chunk id to define the what ring we want to use
(gfx, compute, uvd, etc.) and the priority.

v19:
- fix cs fini in weird case of no ib
- semi working flush fix for ni
- rebase on top of sa allocator changes

v20: agd5f:
- further CS ioctl cleanups from Christian's comments

v21: agd5f:
- integrate CS checker improvements

v22: agd5f:
- final cleanups for release, only allow VM CS on cayman

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 374f9a4..5e35423 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -118,6 +118,10 @@
 #define CAYMAN_RING_TYPE_CP1_INDEX 1
 #define CAYMAN_RING_TYPE_CP2_INDEX 2
 
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE		(8 << 20)
+#define RADEON_IB_VM_MAX_SIZE		(64 << 10)
+
 /*
  * Errata workarounds.
  */
@@ -262,6 +266,21 @@
 	bool				initialized;
 };
 
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+	/* bo list is protected by bo being reserved */
+	struct list_head		bo_list;
+	/* vm list is protected by vm mutex */
+	struct list_head		vm_list;
+	/* constant after initialization */
+	struct radeon_vm		*vm;
+	struct radeon_bo		*bo;
+	uint64_t			soffset;
+	uint64_t			eoffset;
+	uint32_t			flags;
+	bool				valid;
+};
+
 struct radeon_bo {
 	/* Protected by gem.mutex */
 	struct list_head		list;
@@ -275,6 +294,10 @@
 	u32				tiling_flags;
 	u32				pitch;
 	int				surface_reg;
+	/* list of all virtual address to which this bo
+	 * is associated to
+	 */
+	struct list_head		va;
 	/* Constant after initialization */
 	struct radeon_device		*rdev;
 	struct drm_gem_object		gem_base;
@@ -408,6 +431,7 @@
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
 #define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
 struct radeon_gart {
 	dma_addr_t			table_addr;
@@ -565,6 +589,7 @@
 	uint64_t		gpu_addr;
 	uint32_t		*ptr;
 	struct radeon_fence	*fence;
+	unsigned		vm_id;
 };
 
 /*
@@ -602,6 +627,56 @@
 };
 
 /*
+ * VM
+ */
+struct radeon_vm {
+	struct list_head		list;
+	struct list_head		va;
+	int				id;
+	unsigned			last_pfn;
+	u64				pt_gpu_addr;
+	u64				*pt;
+	struct radeon_sa_bo		sa_bo;
+	struct mutex			mutex;
+	/* last fence for cs using this vm */
+	struct radeon_fence		*fence;
+};
+
+struct radeon_vm_funcs {
+	int (*init)(struct radeon_device *rdev);
+	void (*fini)(struct radeon_device *rdev);
+	/* cs mutex must be lock for schedule_ib */
+	int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+	void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+	void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+	uint32_t (*page_flags)(struct radeon_device *rdev,
+			       struct radeon_vm *vm,
+			       uint32_t flags);
+	void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+			unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+	struct list_head		lru_vm;
+	uint32_t			use_bitmap;
+	struct radeon_sa_manager	sa_manager;
+	uint32_t			max_pfn;
+	/* fields constant after init */
+	const struct radeon_vm_funcs	*funcs;
+	/* number of VMIDs */
+	unsigned			nvm;
+	/* vram base address for page table entry  */
+	u64				vram_base_offset;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+	struct radeon_vm		vm;
+};
+
+/*
  * R6xx+ IH ring
  */
 struct r600_ih {
@@ -691,12 +766,12 @@
 struct radeon_cs_chunk {
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
-	int kpage_idx[2];
-	uint32_t                *kpage[2];
+	int			kpage_idx[2];
+	uint32_t		*kpage[2];
 	uint32_t		*kdata;
-	void __user *user_ptr;
-	int last_copied_page;
-	int last_page_index;
+	void __user		*user_ptr;
+	int			last_copied_page;
+	int			last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -717,11 +792,14 @@
 	/* indices of various chunks */
 	int			chunk_ib_idx;
 	int			chunk_relocs_idx;
+	int			chunk_flags_idx;
 	struct radeon_ib	*ib;
 	void			*track;
 	unsigned		family;
 	int			parser_error;
-	bool			keep_tiling_flags;
+	u32			cs_flags;
+	u32			ring;
+	s32			priority;
 };
 
 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -1018,6 +1096,7 @@
 
 	struct {
 		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
 		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
 		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
 				       struct radeon_semaphore *semaphore, bool emit_wait);
@@ -1255,6 +1334,8 @@
 			  struct drm_file *filp);
 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *filp);
@@ -1404,6 +1485,8 @@
 	/* debugfs */
 	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
 	unsigned 		debugfs_count;
+	/* virtual memory */
+	struct radeon_vm_manager	vm_manager;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -1568,6 +1651,7 @@
 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
 #define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
 #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
@@ -1627,6 +1711,33 @@
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
 /*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+			    struct radeon_vm *vm,
+			    struct radeon_bo *bo,
+			    struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_bo *bo,
+		     uint64_t offset,
+		     uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_bo *bo);
+
+
+/*
  * R600 vram scratch functions
  */
 int r600_vram_scratch_init(struct radeon_device *rdev);