blob: 1b35a9c87437d89cf761c5d2e9e992ffe7813ae3 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_PRIV_H_INCLUDED
24#define KFD_PRIV_H_INCLUDED
25
26#include <linux/hashtable.h>
27#include <linux/mmu_notifier.h>
28#include <linux/mutex.h>
29#include <linux/types.h>
30#include <linux/atomic.h>
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030033#include <linux/kfd_ioctl.h>
Oded Gabbay4a488a72014-07-16 21:08:55 +030034#include <kgd_kfd_interface.h>
35
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030036#define KFD_SYSFS_FILE_MODE 0444
37
Ben Gozed6e6a32014-07-17 00:45:35 +030038/*
39 * When working with cp scheduler we should assign the HIQ manually or via
40 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
41 * definitions for Kaveri. In Kaveri only the first ME queues participates
42 * in the cp scheduling taking that in mind we set the HIQ slot in the
43 * second ME.
44 */
45#define KFD_CIK_HIQ_PIPE 4
46#define KFD_CIK_HIQ_QUEUE 0
47
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030048/* GPU ID hash width in bits */
49#define KFD_GPU_ID_HASH_WIDTH 16
50
51/* Macro for allocating structures */
52#define kfd_alloc_struct(ptr_to_struct) \
53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
54
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030055/* Kernel module parameter to specify maximum number of supported processes */
56extern int max_num_of_processes;
57
58#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
59#define KFD_MAX_NUM_OF_PROCESSES 512
60
61/*
62 * Kernel module parameter to specify maximum number of supported queues
63 * per process
64 */
65extern int max_num_of_queues_per_process;
66
67#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
68#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
69
Ben Gozed6e6a32014-07-17 00:45:35 +030070#define KFD_KERNEL_QUEUE_SIZE 2048
71
Ben Goz31c21fe2014-07-17 00:48:28 +030072/* Kernel module parameter to specify the scheduling policy */
73extern int sched_policy;
74
75/**
76 * enum kfd_sched_policy
77 *
78 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
79 * scheduling. In this scheduling mode we're using the firmware code to
80 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
81 * the HIQ queue is used as a special queue that dispatches the configuration
82 * to the cp and the user mode queues list that are currently running.
83 * the DIQ queue is a debugging queue that dispatches debugging commands to the
84 * firmware.
85 * in this scheduling mode user mode queues over subscription feature is
86 * enabled.
87 *
88 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
89 * subscription feature disabled.
90 *
91 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
92 * set the command processor registers and sets the queues "manually". This
93 * mode is used *ONLY* for debugging proposes.
94 *
95 */
96enum kfd_sched_policy {
97 KFD_SCHED_POLICY_HWS = 0,
98 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
99 KFD_SCHED_POLICY_NO_HWS
100};
101
Ben Gozed6e6a32014-07-17 00:45:35 +0300102enum cache_policy {
103 cache_policy_coherent,
104 cache_policy_noncoherent
105};
106
Ben Goz0da75582015-01-01 17:10:01 +0200107enum asic_family_type {
108 CHIP_KAVERI = 0,
109 CHIP_CARRIZO
110};
111
Oded Gabbay4a488a72014-07-16 21:08:55 +0300112struct kfd_device_info {
Ben Goz0da75582015-01-01 17:10:01 +0200113 unsigned int asic_family;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300114 unsigned int max_pasid_bits;
115 size_t ih_ring_entry_size;
Alexey Skidanovf7c826a2014-10-13 16:35:12 +0300116 uint8_t num_of_watch_points;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300117 uint16_t mqd_size_aligned;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300118};
119
Oded Gabbay36b5c082014-10-26 09:53:10 +0200120struct kfd_mem_obj {
121 uint32_t range_start;
122 uint32_t range_end;
123 uint64_t gpu_addr;
124 uint32_t *cpu_ptr;
125};
126
Oded Gabbay4a488a72014-07-16 21:08:55 +0300127struct kfd_dev {
128 struct kgd_dev *kgd;
129
130 const struct kfd_device_info *device_info;
131 struct pci_dev *pdev;
132
133 unsigned int id; /* topology stub index */
134
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300135 phys_addr_t doorbell_base; /* Start of actual doorbells used by
136 * KFD. It is aligned for mapping
137 * into user mode
138 */
139 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
140 * to HW doorbell, GFX reserved some
141 * at the start)
142 */
143 size_t doorbell_process_limit; /* Number of processes we have doorbell
144 * space for.
145 */
146 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
147 * page used by kernel queue
148 */
149
Oded Gabbay4a488a72014-07-16 21:08:55 +0300150 struct kgd2kfd_shared_resources shared_resources;
151
Oded Gabbay36b5c082014-10-26 09:53:10 +0200152 void *gtt_mem;
153 uint64_t gtt_start_gpu_addr;
154 void *gtt_start_cpu_ptr;
155 void *gtt_sa_bitmap;
156 struct mutex gtt_sa_lock;
157 unsigned int gtt_sa_chunk_size;
158 unsigned int gtt_sa_num_of_chunks;
159
Ben Gozed6e6a32014-07-17 00:45:35 +0300160 /* QCM Device instance */
161 struct device_queue_manager *dqm;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300162
Ben Gozed6e6a32014-07-17 00:45:35 +0300163 bool init_complete;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300164};
165
166/* KGD2KFD callbacks */
167void kgd2kfd_exit(void);
168struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
169bool kgd2kfd_device_init(struct kfd_dev *kfd,
170 const struct kgd2kfd_shared_resources *gpu_resources);
171void kgd2kfd_device_exit(struct kfd_dev *kfd);
172
173extern const struct kfd2kgd_calls *kfd2kgd;
174
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300175enum kfd_mempool {
176 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
177 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
178 KFD_MEMPOOL_FRAMEBUFFER = 3,
179};
180
Oded Gabbay4a488a72014-07-16 21:08:55 +0300181/* Character device interface */
182int kfd_chardev_init(void);
183void kfd_chardev_exit(void);
184struct device *kfd_chardev(void);
185
Ben Goz241f24f2014-07-17 00:55:28 +0300186/**
187 * enum kfd_preempt_type_filter
188 *
189 * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
190 *
191 * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
192 * running queues list.
193 *
194 * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
195 * specific process.
196 *
197 */
198enum kfd_preempt_type_filter {
199 KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
200 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
201 KFD_PREEMPT_TYPE_FILTER_BY_PASID
202};
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300203
Ben Goz6e99df52014-07-17 00:36:17 +0300204enum kfd_preempt_type {
205 KFD_PREEMPT_TYPE_WAVEFRONT,
206 KFD_PREEMPT_TYPE_WAVEFRONT_RESET
207};
208
Ben Gozed8aab42014-07-17 00:18:51 +0300209/**
210 * enum kfd_queue_type
211 *
212 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
213 *
214 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
215 *
216 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
217 *
218 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
219 */
220enum kfd_queue_type {
221 KFD_QUEUE_TYPE_COMPUTE,
222 KFD_QUEUE_TYPE_SDMA,
223 KFD_QUEUE_TYPE_HIQ,
224 KFD_QUEUE_TYPE_DIQ
225};
226
Ben Goz6e99df52014-07-17 00:36:17 +0300227enum kfd_queue_format {
228 KFD_QUEUE_FORMAT_PM4,
229 KFD_QUEUE_FORMAT_AQL
230};
231
Ben Gozed8aab42014-07-17 00:18:51 +0300232/**
233 * struct queue_properties
234 *
235 * @type: The queue type.
236 *
237 * @queue_id: Queue identifier.
238 *
239 * @queue_address: Queue ring buffer address.
240 *
241 * @queue_size: Queue ring buffer size.
242 *
243 * @priority: Defines the queue priority relative to other queues in the
244 * process.
245 * This is just an indication and HW scheduling may override the priority as
246 * necessary while keeping the relative prioritization.
247 * the priority granularity is from 0 to f which f is the highest priority.
248 * currently all queues are initialized with the highest priority.
249 *
250 * @queue_percent: This field is partially implemented and currently a zero in
251 * this field defines that the queue is non active.
252 *
253 * @read_ptr: User space address which points to the number of dwords the
254 * cp read from the ring buffer. This field updates automatically by the H/W.
255 *
256 * @write_ptr: Defines the number of dwords written to the ring buffer.
257 *
258 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
259 * the queue ring buffer. This field should be similar to write_ptr and the user
260 * should update this field after he updated the write_ptr.
261 *
262 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
263 *
264 * @is_interop: Defines if this is a interop queue. Interop queue means that the
265 * queue can access both graphics and compute resources.
266 *
267 * @is_active: Defines if the queue is active or not.
268 *
269 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
270 * of the queue.
271 *
272 * This structure represents the queue properties for each queue no matter if
273 * it's user mode or kernel mode queue.
274 *
275 */
276struct queue_properties {
277 enum kfd_queue_type type;
Ben Goz6e99df52014-07-17 00:36:17 +0300278 enum kfd_queue_format format;
Ben Gozed8aab42014-07-17 00:18:51 +0300279 unsigned int queue_id;
280 uint64_t queue_address;
281 uint64_t queue_size;
282 uint32_t priority;
283 uint32_t queue_percent;
284 uint32_t *read_ptr;
285 uint32_t *write_ptr;
Oded Gabbay5cd78de2014-11-20 16:14:56 +0200286 uint32_t __iomem *doorbell_ptr;
Ben Gozed8aab42014-07-17 00:18:51 +0300287 uint32_t doorbell_off;
288 bool is_interop;
289 bool is_active;
290 /* Not relevant for user mode queues in cp scheduling */
291 unsigned int vmid;
Ben Goz77669eb2015-01-03 22:12:31 +0200292 /* Relevant only for sdma queues*/
293 uint32_t sdma_engine_id;
294 uint32_t sdma_queue_id;
295 uint32_t sdma_vm_addr;
Ben Gozff3d04a2015-01-04 10:37:18 +0200296 /* Relevant only for VI */
297 uint64_t eop_ring_buffer_address;
298 uint32_t eop_ring_buffer_size;
299 uint64_t ctx_save_restore_area_address;
300 uint32_t ctx_save_restore_area_size;
Ben Gozed8aab42014-07-17 00:18:51 +0300301};
302
303/**
304 * struct queue
305 *
306 * @list: Queue linked list.
307 *
308 * @mqd: The queue MQD.
309 *
310 * @mqd_mem_obj: The MQD local gpu memory object.
311 *
312 * @gart_mqd_addr: The MQD gart mc address.
313 *
314 * @properties: The queue properties.
315 *
316 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
317 * that the queue should be execute on.
318 *
319 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
320 *
321 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
322 *
323 * @process: The kfd process that created this queue.
324 *
325 * @device: The kfd device that created this queue.
326 *
327 * This structure represents user mode compute queues.
328 * It contains all the necessary data to handle such queues.
329 *
330 */
331
332struct queue {
333 struct list_head list;
334 void *mqd;
335 struct kfd_mem_obj *mqd_mem_obj;
336 uint64_t gart_mqd_addr;
337 struct queue_properties properties;
338
339 uint32_t mec;
340 uint32_t pipe;
341 uint32_t queue;
342
Ben Goz77669eb2015-01-03 22:12:31 +0200343 unsigned int sdma_id;
344
Ben Gozed8aab42014-07-17 00:18:51 +0300345 struct kfd_process *process;
346 struct kfd_dev *device;
347};
348
Ben Goz6e99df52014-07-17 00:36:17 +0300349/*
350 * Please read the kfd_mqd_manager.h description.
351 */
352enum KFD_MQD_TYPE {
Ben Goz85d258f2015-01-04 10:36:30 +0200353 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
354 KFD_MQD_TYPE_HIQ, /* for hiq */
355 KFD_MQD_TYPE_CP, /* for cp queues and diq */
356 KFD_MQD_TYPE_SDMA, /* for sdma queues */
Ben Goz6e99df52014-07-17 00:36:17 +0300357 KFD_MQD_TYPE_MAX
358};
359
Ben Goz241f24f2014-07-17 00:55:28 +0300360struct scheduling_resources {
361 unsigned int vmid_mask;
362 enum kfd_queue_type type;
363 uint64_t queue_mask;
364 uint64_t gws_mask;
365 uint32_t oac_mask;
366 uint32_t gds_heap_base;
367 uint32_t gds_heap_size;
368};
369
370struct process_queue_manager {
371 /* data */
372 struct kfd_process *process;
373 unsigned int num_concurrent_processes;
374 struct list_head queues;
375 unsigned long *queue_slot_bitmap;
376};
377
378struct qcm_process_device {
379 /* The Device Queue Manager that owns this data */
380 struct device_queue_manager *dqm;
381 struct process_queue_manager *pqm;
382 /* Device Queue Manager lock */
383 struct mutex *lock;
384 /* Queues list */
385 struct list_head queues_list;
386 struct list_head priv_queue_list;
387
388 unsigned int queue_count;
389 unsigned int vmid;
390 bool is_debug;
391 /*
392 * All the memory management data should be here too
393 */
394 uint64_t gds_context_area;
395 uint32_t sh_mem_config;
396 uint32_t sh_mem_bases;
397 uint32_t sh_mem_ape1_base;
398 uint32_t sh_mem_ape1_limit;
399 uint32_t page_table_base;
400 uint32_t gds_size;
401 uint32_t num_gws;
402 uint32_t num_oac;
403};
404
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300405/* Data that is per-process-per device. */
406struct kfd_process_device {
407 /*
408 * List of all per-device data for a process.
409 * Starts from kfd_process.per_device_data.
410 */
411 struct list_head per_device_list;
412
413 /* The device that owns this data. */
414 struct kfd_dev *dev;
415
416
Ben Goz45102042014-07-17 01:04:10 +0300417 /* per-process-per device QCM data structure */
418 struct qcm_process_device qpd;
419
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300420 /*Apertures*/
421 uint64_t lds_base;
422 uint64_t lds_limit;
423 uint64_t gpuvm_base;
424 uint64_t gpuvm_limit;
425 uint64_t scratch_base;
426 uint64_t scratch_limit;
427
428 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
429 bool bound;
430};
431
Alexey Skidanov52a5fdc2014-11-19 17:07:00 +0200432#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
433
Oded Gabbay4a488a72014-07-16 21:08:55 +0300434/* Process data */
435struct kfd_process {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300436 /*
437 * kfd_process are stored in an mm_struct*->kfd_process*
438 * hash table (kfd_processes in kfd_process.c)
439 */
440 struct hlist_node kfd_processes;
441
442 struct mm_struct *mm;
443
444 struct mutex mutex;
445
446 /*
447 * In any process, the thread that started main() is the lead
448 * thread and outlives the rest.
449 * It is here because amd_iommu_bind_pasid wants a task_struct.
450 */
451 struct task_struct *lead_thread;
452
453 /* We want to receive a notification when the mm_struct is destroyed */
454 struct mmu_notifier mmu_notifier;
455
456 /* Use for delayed freeing of kfd_process structure */
457 struct rcu_head rcu;
458
459 unsigned int pasid;
460
461 /*
462 * List of kfd_process_device structures,
463 * one for each device the process is using.
464 */
465 struct list_head per_device_data;
466
Ben Goz45102042014-07-17 01:04:10 +0300467 struct process_queue_manager pqm;
468
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300469 /* The process's queues. */
470 size_t queue_array_size;
471
472 /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
473 struct kfd_queue **queues;
474
475 unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
476
477 /*Is the user space process 32 bit?*/
478 bool is_32bit_user_mode;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300479};
480
Oded Gabbay76baee62014-12-29 14:20:05 +0200481/**
482 * Ioctl function type.
483 *
484 * \param filep pointer to file structure.
485 * \param p amdkfd process pointer.
486 * \param data pointer to arg that was copied from user.
487 */
488typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
489 void *data);
490
491struct amdkfd_ioctl_desc {
492 unsigned int cmd;
493 int flags;
494 amdkfd_ioctl_t *func;
495 unsigned int cmd_drv;
496 const char *name;
497};
498
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300499void kfd_process_create_wq(void);
500void kfd_process_destroy_wq(void);
501struct kfd_process *kfd_create_process(const struct task_struct *);
502struct kfd_process *kfd_get_process(const struct task_struct *);
503
Ben Goz64c7f8c2014-07-17 01:27:00 +0300504struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
505 struct kfd_process *p);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300506void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300507struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200508 struct kfd_process *p);
509struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
510 struct kfd_process *p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300511
Alexey Skidanov775921e2014-07-17 01:49:36 +0300512/* Process device data iterator */
513struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
514struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
515 struct kfd_process_device *pdd);
516bool kfd_has_process_device_data(struct kfd_process *p);
517
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300518/* PASIDs */
519int kfd_pasid_init(void);
520void kfd_pasid_exit(void);
521bool kfd_set_pasid_limit(unsigned int new_limit);
522unsigned int kfd_get_pasid_limit(void);
523unsigned int kfd_pasid_alloc(void);
524void kfd_pasid_free(unsigned int pasid);
525
526/* Doorbells */
527void kfd_doorbell_init(struct kfd_dev *kfd);
528int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
529u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
530 unsigned int *doorbell_off);
531void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
532u32 read_kernel_doorbell(u32 __iomem *db);
533void write_kernel_doorbell(u32 __iomem *db, u32 value);
534unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
535 struct kfd_process *process,
536 unsigned int queue_id);
537
Oded Gabbay6e810902014-10-27 14:36:07 +0200538/* GTT Sub-Allocator */
539
540int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
541 struct kfd_mem_obj **mem_obj);
542
543int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
544
Oded Gabbay4a488a72014-07-16 21:08:55 +0300545extern struct device *kfd_device;
546
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300547/* Topology */
548int kfd_topology_init(void);
549void kfd_topology_shutdown(void);
550int kfd_topology_add_device(struct kfd_dev *gpu);
551int kfd_topology_remove_device(struct kfd_dev *gpu);
552struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
553struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
554struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
555
Oded Gabbay4a488a72014-07-16 21:08:55 +0300556/* Interrupts */
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300557void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300558
559/* Power Management */
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300560void kgd2kfd_suspend(struct kfd_dev *kfd);
561int kgd2kfd_resume(struct kfd_dev *kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300562
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300563/* amdkfd Apertures */
564int kfd_init_apertures(struct kfd_process *process);
565
Ben Gozed6e6a32014-07-17 00:45:35 +0300566/* Queue Context Management */
Ben Goz241f24f2014-07-17 00:55:28 +0300567inline uint32_t lower_32(uint64_t x);
568inline uint32_t upper_32(uint64_t x);
Ben Goz77669eb2015-01-03 22:12:31 +0200569struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
570inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
Ben Goz241f24f2014-07-17 00:55:28 +0300571
Ben Gozed6e6a32014-07-17 00:45:35 +0300572int init_queue(struct queue **q, struct queue_properties properties);
573void uninit_queue(struct queue *q);
Ben Goz45102042014-07-17 01:04:10 +0300574void print_queue_properties(struct queue_properties *q);
Ben Gozed6e6a32014-07-17 00:45:35 +0300575void print_queue(struct queue *q);
576
Ben Goz64c7f8c2014-07-17 01:27:00 +0300577struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
578 struct kfd_dev *dev);
Ben Goz4b8f5892015-01-04 11:24:25 +0200579struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
580 struct kfd_dev *dev);
581struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
582 struct kfd_dev *dev);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300583struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
584void device_queue_manager_uninit(struct device_queue_manager *dqm);
Ben Goz241f24f2014-07-17 00:55:28 +0300585struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
586 enum kfd_queue_type type);
587void kernel_queue_uninit(struct kernel_queue *kq);
588
Ben Goz45102042014-07-17 01:04:10 +0300589/* Process Queue Manager */
590struct process_queue_node {
591 struct queue *q;
592 struct kernel_queue *kq;
593 struct list_head process_queue_list;
594};
595
596int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
597void pqm_uninit(struct process_queue_manager *pqm);
598int pqm_create_queue(struct process_queue_manager *pqm,
599 struct kfd_dev *dev,
600 struct file *f,
601 struct queue_properties *properties,
602 unsigned int flags,
603 enum kfd_queue_type type,
604 unsigned int *qid);
605int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
606int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
607 struct queue_properties *p);
608
Ben Gozed6e6a32014-07-17 00:45:35 +0300609/* Packet Manager */
610
Ben Goz241f24f2014-07-17 00:55:28 +0300611#define KFD_HIQ_TIMEOUT (500)
612
Ben Goz64c7f8c2014-07-17 01:27:00 +0300613#define KFD_FENCE_COMPLETED (100)
614#define KFD_FENCE_INIT (10)
Ben Goz241f24f2014-07-17 00:55:28 +0300615#define KFD_UNMAP_LATENCY (150)
616
Ben Gozed6e6a32014-07-17 00:45:35 +0300617struct packet_manager {
618 struct device_queue_manager *dqm;
619 struct kernel_queue *priv_queue;
620 struct mutex lock;
621 bool allocated;
622 struct kfd_mem_obj *ib_buffer_obj;
623};
624
Ben Goz64c7f8c2014-07-17 01:27:00 +0300625int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
626void pm_uninit(struct packet_manager *pm);
627int pm_send_set_resources(struct packet_manager *pm,
628 struct scheduling_resources *res);
629int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
630int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
631 uint32_t fence_value);
632
633int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
634 enum kfd_preempt_type_filter mode,
635 uint32_t filter_param, bool reset,
636 unsigned int sdma_engine);
637
Ben Goz241f24f2014-07-17 00:55:28 +0300638void pm_release_ib(struct packet_manager *pm);
639
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300640uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
641phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
642 struct kfd_process *process);
643
Oded Gabbay4a488a72014-07-16 21:08:55 +0300644#endif