blob: c71160422348822f3167978467dd7b2052456929 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_PRIV_H_INCLUDED
24#define KFD_PRIV_H_INCLUDED
25
26#include <linux/hashtable.h>
27#include <linux/mmu_notifier.h>
28#include <linux/mutex.h>
29#include <linux/types.h>
30#include <linux/atomic.h>
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030033#include <linux/kfd_ioctl.h>
Oded Gabbay4a488a72014-07-16 21:08:55 +030034#include <kgd_kfd_interface.h>
35
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030036#define KFD_SYSFS_FILE_MODE 0444
37
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030038#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
39#define KFD_MMAP_EVENTS_MASK 0x4000000000000
40
Ben Gozed6e6a32014-07-17 00:45:35 +030041/*
42 * When working with cp scheduler we should assign the HIQ manually or via
43 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
44 * definitions for Kaveri. In Kaveri only the first ME queues participates
45 * in the cp scheduling taking that in mind we set the HIQ slot in the
46 * second ME.
47 */
48#define KFD_CIK_HIQ_PIPE 4
49#define KFD_CIK_HIQ_QUEUE 0
50
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030051/* GPU ID hash width in bits */
52#define KFD_GPU_ID_HASH_WIDTH 16
53
54/* Macro for allocating structures */
55#define kfd_alloc_struct(ptr_to_struct) \
56 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
57
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030058#define KFD_MAX_NUM_OF_PROCESSES 512
Oded Gabbayb8cbab02015-01-18 13:18:01 +020059#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030060
61/*
Oded Gabbayb8cbab02015-01-18 13:18:01 +020062 * Kernel module parameter to specify maximum number of supported queues per
63 * device
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030064 */
Oded Gabbayb8cbab02015-01-18 13:18:01 +020065extern int max_num_of_queues_per_device;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030066
Oded Gabbayb8cbab02015-01-18 13:18:01 +020067#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
68#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
69 (KFD_MAX_NUM_OF_PROCESSES * \
70 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030071
Ben Gozed6e6a32014-07-17 00:45:35 +030072#define KFD_KERNEL_QUEUE_SIZE 2048
73
Ben Goz31c21fe2014-07-17 00:48:28 +030074/* Kernel module parameter to specify the scheduling policy */
75extern int sched_policy;
76
Oded Gabbay81663012014-12-24 13:30:52 +020077/*
78 * Kernel module parameter to specify whether to send sigterm to HSA process on
79 * unhandled exception
80 */
81extern int send_sigterm;
82
Ben Goz31c21fe2014-07-17 00:48:28 +030083/**
84 * enum kfd_sched_policy
85 *
86 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
87 * scheduling. In this scheduling mode we're using the firmware code to
88 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
89 * the HIQ queue is used as a special queue that dispatches the configuration
90 * to the cp and the user mode queues list that are currently running.
91 * the DIQ queue is a debugging queue that dispatches debugging commands to the
92 * firmware.
93 * in this scheduling mode user mode queues over subscription feature is
94 * enabled.
95 *
96 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
97 * subscription feature disabled.
98 *
99 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
100 * set the command processor registers and sets the queues "manually". This
101 * mode is used *ONLY* for debugging proposes.
102 *
103 */
104enum kfd_sched_policy {
105 KFD_SCHED_POLICY_HWS = 0,
106 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
107 KFD_SCHED_POLICY_NO_HWS
108};
109
Ben Gozed6e6a32014-07-17 00:45:35 +0300110enum cache_policy {
111 cache_policy_coherent,
112 cache_policy_noncoherent
113};
114
Ben Goz0da75582015-01-01 17:10:01 +0200115enum asic_family_type {
116 CHIP_KAVERI = 0,
117 CHIP_CARRIZO
118};
119
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300120struct kfd_event_interrupt_class {
121 bool (*interrupt_isr)(struct kfd_dev *dev,
122 const uint32_t *ih_ring_entry);
123 void (*interrupt_wq)(struct kfd_dev *dev,
124 const uint32_t *ih_ring_entry);
125};
126
Oded Gabbay4a488a72014-07-16 21:08:55 +0300127struct kfd_device_info {
Ben Goz0da75582015-01-01 17:10:01 +0200128 unsigned int asic_family;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300129 const struct kfd_event_interrupt_class *event_interrupt_class;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300130 unsigned int max_pasid_bits;
Yair Shachar992839a2015-05-20 13:43:04 +0300131 unsigned int max_no_of_hqd;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300132 size_t ih_ring_entry_size;
Alexey Skidanovf7c826a2014-10-13 16:35:12 +0300133 uint8_t num_of_watch_points;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300134 uint16_t mqd_size_aligned;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300135};
136
Oded Gabbay36b5c082014-10-26 09:53:10 +0200137struct kfd_mem_obj {
138 uint32_t range_start;
139 uint32_t range_end;
140 uint64_t gpu_addr;
141 uint32_t *cpu_ptr;
142};
143
Oded Gabbay4a488a72014-07-16 21:08:55 +0300144struct kfd_dev {
145 struct kgd_dev *kgd;
146
147 const struct kfd_device_info *device_info;
148 struct pci_dev *pdev;
149
150 unsigned int id; /* topology stub index */
151
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300152 phys_addr_t doorbell_base; /* Start of actual doorbells used by
153 * KFD. It is aligned for mapping
154 * into user mode
155 */
156 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
157 * to HW doorbell, GFX reserved some
158 * at the start)
159 */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300160 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
161 * page used by kernel queue
162 */
163
Oded Gabbay4a488a72014-07-16 21:08:55 +0300164 struct kgd2kfd_shared_resources shared_resources;
165
Xihan Zhangcea405b2015-03-17 19:32:53 +0800166 const struct kfd2kgd_calls *kfd2kgd;
167 struct mutex doorbell_mutex;
Joe Perchesf761d8b2015-05-19 18:37:51 -0700168 DECLARE_BITMAP(doorbell_available_index,
169 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800170
Oded Gabbay36b5c082014-10-26 09:53:10 +0200171 void *gtt_mem;
172 uint64_t gtt_start_gpu_addr;
173 void *gtt_start_cpu_ptr;
174 void *gtt_sa_bitmap;
175 struct mutex gtt_sa_lock;
176 unsigned int gtt_sa_chunk_size;
177 unsigned int gtt_sa_num_of_chunks;
178
Andrew Lewycky2249d552014-07-17 01:37:30 +0300179 /* Interrupts */
180 void *interrupt_ring;
181 size_t interrupt_ring_size;
182 atomic_t interrupt_ring_rptr;
183 atomic_t interrupt_ring_wptr;
184 struct work_struct interrupt_work;
185 spinlock_t interrupt_lock;
186
Ben Gozed6e6a32014-07-17 00:45:35 +0300187 /* QCM Device instance */
188 struct device_queue_manager *dqm;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300189
Ben Gozed6e6a32014-07-17 00:45:35 +0300190 bool init_complete;
Andrew Lewycky2249d552014-07-17 01:37:30 +0300191 /*
192 * Interrupts of interest to KFD are copied
193 * from the HW ring into a SW ring.
194 */
195 bool interrupts_active;
Yair Shacharfbeb6612015-05-20 13:48:26 +0300196
197 /* Debug manager */
198 struct kfd_dbgmgr *dbgmgr;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300199};
200
201/* KGD2KFD callbacks */
202void kgd2kfd_exit(void);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800203struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
204 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300205bool kgd2kfd_device_init(struct kfd_dev *kfd,
Xihan Zhangcea405b2015-03-17 19:32:53 +0800206 const struct kgd2kfd_shared_resources *gpu_resources);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300207void kgd2kfd_device_exit(struct kfd_dev *kfd);
208
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300209enum kfd_mempool {
210 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
211 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
212 KFD_MEMPOOL_FRAMEBUFFER = 3,
213};
214
Oded Gabbay4a488a72014-07-16 21:08:55 +0300215/* Character device interface */
216int kfd_chardev_init(void);
217void kfd_chardev_exit(void);
218struct device *kfd_chardev(void);
219
Ben Goz241f24f2014-07-17 00:55:28 +0300220/**
221 * enum kfd_preempt_type_filter
222 *
223 * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
224 *
225 * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
226 * running queues list.
227 *
228 * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
229 * specific process.
230 *
231 */
232enum kfd_preempt_type_filter {
233 KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
234 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
Yair Shachar992839a2015-05-20 13:43:04 +0300235 KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
Ben Goz241f24f2014-07-17 00:55:28 +0300236 KFD_PREEMPT_TYPE_FILTER_BY_PASID
237};
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300238
Ben Gozed8aab42014-07-17 00:18:51 +0300239/**
240 * enum kfd_queue_type
241 *
242 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
243 *
244 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
245 *
246 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
247 *
248 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
249 */
250enum kfd_queue_type {
251 KFD_QUEUE_TYPE_COMPUTE,
252 KFD_QUEUE_TYPE_SDMA,
253 KFD_QUEUE_TYPE_HIQ,
254 KFD_QUEUE_TYPE_DIQ
255};
256
Ben Goz6e99df52014-07-17 00:36:17 +0300257enum kfd_queue_format {
258 KFD_QUEUE_FORMAT_PM4,
259 KFD_QUEUE_FORMAT_AQL
260};
261
Ben Gozed8aab42014-07-17 00:18:51 +0300262/**
263 * struct queue_properties
264 *
265 * @type: The queue type.
266 *
267 * @queue_id: Queue identifier.
268 *
269 * @queue_address: Queue ring buffer address.
270 *
271 * @queue_size: Queue ring buffer size.
272 *
273 * @priority: Defines the queue priority relative to other queues in the
274 * process.
275 * This is just an indication and HW scheduling may override the priority as
276 * necessary while keeping the relative prioritization.
277 * the priority granularity is from 0 to f which f is the highest priority.
278 * currently all queues are initialized with the highest priority.
279 *
280 * @queue_percent: This field is partially implemented and currently a zero in
281 * this field defines that the queue is non active.
282 *
283 * @read_ptr: User space address which points to the number of dwords the
284 * cp read from the ring buffer. This field updates automatically by the H/W.
285 *
286 * @write_ptr: Defines the number of dwords written to the ring buffer.
287 *
288 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
Kent Russell8eabaf52017-08-15 23:00:04 -0400289 * the queue ring buffer. This field should be similar to write_ptr and the
290 * user should update this field after he updated the write_ptr.
Ben Gozed8aab42014-07-17 00:18:51 +0300291 *
292 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
293 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400294 * @is_interop: Defines if this is a interop queue. Interop queue means that
295 * the queue can access both graphics and compute resources.
Ben Gozed8aab42014-07-17 00:18:51 +0300296 *
297 * @is_active: Defines if the queue is active or not.
298 *
299 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
300 * of the queue.
301 *
302 * This structure represents the queue properties for each queue no matter if
303 * it's user mode or kernel mode queue.
304 *
305 */
306struct queue_properties {
307 enum kfd_queue_type type;
Ben Goz6e99df52014-07-17 00:36:17 +0300308 enum kfd_queue_format format;
Ben Gozed8aab42014-07-17 00:18:51 +0300309 unsigned int queue_id;
310 uint64_t queue_address;
311 uint64_t queue_size;
312 uint32_t priority;
313 uint32_t queue_percent;
314 uint32_t *read_ptr;
315 uint32_t *write_ptr;
Oded Gabbay5cd78de2014-11-20 16:14:56 +0200316 uint32_t __iomem *doorbell_ptr;
Ben Gozed8aab42014-07-17 00:18:51 +0300317 uint32_t doorbell_off;
318 bool is_interop;
319 bool is_active;
320 /* Not relevant for user mode queues in cp scheduling */
321 unsigned int vmid;
Ben Goz77669eb2015-01-03 22:12:31 +0200322 /* Relevant only for sdma queues*/
323 uint32_t sdma_engine_id;
324 uint32_t sdma_queue_id;
325 uint32_t sdma_vm_addr;
Ben Gozff3d04a2015-01-04 10:37:18 +0200326 /* Relevant only for VI */
327 uint64_t eop_ring_buffer_address;
328 uint32_t eop_ring_buffer_size;
329 uint64_t ctx_save_restore_area_address;
330 uint32_t ctx_save_restore_area_size;
Ben Gozed8aab42014-07-17 00:18:51 +0300331};
332
333/**
334 * struct queue
335 *
336 * @list: Queue linked list.
337 *
338 * @mqd: The queue MQD.
339 *
340 * @mqd_mem_obj: The MQD local gpu memory object.
341 *
342 * @gart_mqd_addr: The MQD gart mc address.
343 *
344 * @properties: The queue properties.
345 *
346 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
Kent Russell8eabaf52017-08-15 23:00:04 -0400347 * that the queue should be execute on.
Ben Gozed8aab42014-07-17 00:18:51 +0300348 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400349 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
350 * id.
Ben Gozed8aab42014-07-17 00:18:51 +0300351 *
352 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
353 *
354 * @process: The kfd process that created this queue.
355 *
356 * @device: The kfd device that created this queue.
357 *
358 * This structure represents user mode compute queues.
359 * It contains all the necessary data to handle such queues.
360 *
361 */
362
363struct queue {
364 struct list_head list;
365 void *mqd;
366 struct kfd_mem_obj *mqd_mem_obj;
367 uint64_t gart_mqd_addr;
368 struct queue_properties properties;
369
370 uint32_t mec;
371 uint32_t pipe;
372 uint32_t queue;
373
Ben Goz77669eb2015-01-03 22:12:31 +0200374 unsigned int sdma_id;
375
Ben Gozed8aab42014-07-17 00:18:51 +0300376 struct kfd_process *process;
377 struct kfd_dev *device;
378};
379
Ben Goz6e99df52014-07-17 00:36:17 +0300380/*
381 * Please read the kfd_mqd_manager.h description.
382 */
383enum KFD_MQD_TYPE {
Ben Goz85d258f2015-01-04 10:36:30 +0200384 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
385 KFD_MQD_TYPE_HIQ, /* for hiq */
386 KFD_MQD_TYPE_CP, /* for cp queues and diq */
387 KFD_MQD_TYPE_SDMA, /* for sdma queues */
Ben Goz6e99df52014-07-17 00:36:17 +0300388 KFD_MQD_TYPE_MAX
389};
390
Ben Goz241f24f2014-07-17 00:55:28 +0300391struct scheduling_resources {
392 unsigned int vmid_mask;
393 enum kfd_queue_type type;
394 uint64_t queue_mask;
395 uint64_t gws_mask;
396 uint32_t oac_mask;
397 uint32_t gds_heap_base;
398 uint32_t gds_heap_size;
399};
400
401struct process_queue_manager {
402 /* data */
403 struct kfd_process *process;
404 unsigned int num_concurrent_processes;
405 struct list_head queues;
406 unsigned long *queue_slot_bitmap;
407};
408
409struct qcm_process_device {
410 /* The Device Queue Manager that owns this data */
411 struct device_queue_manager *dqm;
412 struct process_queue_manager *pqm;
Ben Goz241f24f2014-07-17 00:55:28 +0300413 /* Queues list */
414 struct list_head queues_list;
415 struct list_head priv_queue_list;
416
417 unsigned int queue_count;
418 unsigned int vmid;
419 bool is_debug;
420 /*
421 * All the memory management data should be here too
422 */
423 uint64_t gds_context_area;
424 uint32_t sh_mem_config;
425 uint32_t sh_mem_bases;
426 uint32_t sh_mem_ape1_base;
427 uint32_t sh_mem_ape1_limit;
428 uint32_t page_table_base;
429 uint32_t gds_size;
430 uint32_t num_gws;
431 uint32_t num_oac;
Moses Reuben6a1c9512017-08-15 23:00:20 -0400432 uint32_t sh_hidden_private_base;
Ben Goz241f24f2014-07-17 00:55:28 +0300433};
434
Yong Zhao733fa1f2017-09-20 18:10:14 -0400435
436enum kfd_pdd_bound {
437 PDD_UNBOUND = 0,
438 PDD_BOUND,
439 PDD_BOUND_SUSPENDED,
440};
441
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300442/* Data that is per-process-per device. */
443struct kfd_process_device {
444 /*
445 * List of all per-device data for a process.
446 * Starts from kfd_process.per_device_data.
447 */
448 struct list_head per_device_list;
449
450 /* The device that owns this data. */
451 struct kfd_dev *dev;
452
453
Ben Goz45102042014-07-17 01:04:10 +0300454 /* per-process-per device QCM data structure */
455 struct qcm_process_device qpd;
456
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300457 /*Apertures*/
458 uint64_t lds_base;
459 uint64_t lds_limit;
460 uint64_t gpuvm_base;
461 uint64_t gpuvm_limit;
462 uint64_t scratch_base;
463 uint64_t scratch_limit;
464
465 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
Yong Zhao733fa1f2017-09-20 18:10:14 -0400466 enum kfd_pdd_bound bound;
Ben Goza82918f2015-03-25 13:12:20 +0200467
468 /* This flag tells if we should reset all
469 * wavefronts on process termination
470 */
471 bool reset_wavefronts;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300472};
473
Alexey Skidanov52a5fdc2014-11-19 17:07:00 +0200474#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
475
Oded Gabbay4a488a72014-07-16 21:08:55 +0300476/* Process data */
477struct kfd_process {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300478 /*
479 * kfd_process are stored in an mm_struct*->kfd_process*
480 * hash table (kfd_processes in kfd_process.c)
481 */
482 struct hlist_node kfd_processes;
483
484 struct mm_struct *mm;
485
486 struct mutex mutex;
487
488 /*
489 * In any process, the thread that started main() is the lead
490 * thread and outlives the rest.
491 * It is here because amd_iommu_bind_pasid wants a task_struct.
492 */
493 struct task_struct *lead_thread;
494
495 /* We want to receive a notification when the mm_struct is destroyed */
496 struct mmu_notifier mmu_notifier;
497
498 /* Use for delayed freeing of kfd_process structure */
499 struct rcu_head rcu;
500
501 unsigned int pasid;
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400502 unsigned int doorbell_index;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300503
504 /*
505 * List of kfd_process_device structures,
506 * one for each device the process is using.
507 */
508 struct list_head per_device_data;
509
Ben Goz45102042014-07-17 01:04:10 +0300510 struct process_queue_manager pqm;
511
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300512 /* The process's queues. */
513 size_t queue_array_size;
514
515 /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
516 struct kfd_queue **queues;
517
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300518 /*Is the user space process 32 bit?*/
519 bool is_32bit_user_mode;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300520
521 /* Event-related data */
522 struct mutex event_mutex;
523 /* All events in process hashed by ID, linked on kfd_event.events. */
524 DECLARE_HASHTABLE(events, 4);
Kent Russell8eabaf52017-08-15 23:00:04 -0400525 /* struct slot_page_header.event_pages */
526 struct list_head signal_event_pages;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300527 u32 next_nonsignal_event_id;
528 size_t signal_event_count;
Felix Kuehlingc9861692017-09-20 18:10:22 -0400529 bool signal_event_limit_reached;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300530};
531
Oded Gabbay76baee62014-12-29 14:20:05 +0200532/**
533 * Ioctl function type.
534 *
535 * \param filep pointer to file structure.
536 * \param p amdkfd process pointer.
537 * \param data pointer to arg that was copied from user.
538 */
539typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
540 void *data);
541
542struct amdkfd_ioctl_desc {
543 unsigned int cmd;
544 int flags;
545 amdkfd_ioctl_t *func;
546 unsigned int cmd_drv;
547 const char *name;
548};
549
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300550void kfd_process_create_wq(void);
551void kfd_process_destroy_wq(void);
552struct kfd_process *kfd_create_process(const struct task_struct *);
553struct kfd_process *kfd_get_process(const struct task_struct *);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300554struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300555
Ben Goz64c7f8c2014-07-17 01:27:00 +0300556struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
Yong Zhao733fa1f2017-09-20 18:10:14 -0400557 struct kfd_process *p);
558int kfd_bind_processes_to_device(struct kfd_dev *dev);
559void kfd_unbind_processes_from_device(struct kfd_dev *dev);
560void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300561struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200562 struct kfd_process *p);
563struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
564 struct kfd_process *p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300565
Alexey Skidanov775921e2014-07-17 01:49:36 +0300566/* Process device data iterator */
Kent Russell8eabaf52017-08-15 23:00:04 -0400567struct kfd_process_device *kfd_get_first_process_device_data(
568 struct kfd_process *p);
569struct kfd_process_device *kfd_get_next_process_device_data(
570 struct kfd_process *p,
Alexey Skidanov775921e2014-07-17 01:49:36 +0300571 struct kfd_process_device *pdd);
572bool kfd_has_process_device_data(struct kfd_process *p);
573
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300574/* PASIDs */
575int kfd_pasid_init(void);
576void kfd_pasid_exit(void);
577bool kfd_set_pasid_limit(unsigned int new_limit);
578unsigned int kfd_get_pasid_limit(void);
579unsigned int kfd_pasid_alloc(void);
580void kfd_pasid_free(unsigned int pasid);
581
582/* Doorbells */
Felix Kuehling735df2b2017-08-15 23:00:10 -0400583int kfd_doorbell_init(struct kfd_dev *kfd);
584void kfd_doorbell_fini(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300585int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
586u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
587 unsigned int *doorbell_off);
588void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
589u32 read_kernel_doorbell(u32 __iomem *db);
590void write_kernel_doorbell(u32 __iomem *db, u32 value);
591unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
592 struct kfd_process *process,
593 unsigned int queue_id);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400594phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
595 struct kfd_process *process);
596int kfd_alloc_process_doorbells(struct kfd_process *process);
597void kfd_free_process_doorbells(struct kfd_process *process);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300598
Oded Gabbay6e810902014-10-27 14:36:07 +0200599/* GTT Sub-Allocator */
600
601int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
602 struct kfd_mem_obj **mem_obj);
603
604int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
605
Oded Gabbay4a488a72014-07-16 21:08:55 +0300606extern struct device *kfd_device;
607
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300608/* Topology */
609int kfd_topology_init(void);
610void kfd_topology_shutdown(void);
611int kfd_topology_add_device(struct kfd_dev *gpu);
612int kfd_topology_remove_device(struct kfd_dev *gpu);
613struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
614struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
615struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
616
Oded Gabbay4a488a72014-07-16 21:08:55 +0300617/* Interrupts */
Andrew Lewycky2249d552014-07-17 01:37:30 +0300618int kfd_interrupt_init(struct kfd_dev *dev);
619void kfd_interrupt_exit(struct kfd_dev *dev);
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300620void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300621bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
622bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300623
624/* Power Management */
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300625void kgd2kfd_suspend(struct kfd_dev *kfd);
626int kgd2kfd_resume(struct kfd_dev *kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300627
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300628/* amdkfd Apertures */
629int kfd_init_apertures(struct kfd_process *process);
630
Ben Gozed6e6a32014-07-17 00:45:35 +0300631/* Queue Context Management */
Ben Goz77669eb2015-01-03 22:12:31 +0200632struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
Ben Goz241f24f2014-07-17 00:55:28 +0300633
Edward O'Callaghane88a6142016-09-17 15:01:45 +1000634int init_queue(struct queue **q, const struct queue_properties *properties);
Ben Gozed6e6a32014-07-17 00:45:35 +0300635void uninit_queue(struct queue *q);
Ben Goz45102042014-07-17 01:04:10 +0300636void print_queue_properties(struct queue_properties *q);
Ben Gozed6e6a32014-07-17 00:45:35 +0300637void print_queue(struct queue *q);
638
Ben Goz64c7f8c2014-07-17 01:27:00 +0300639struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
640 struct kfd_dev *dev);
Ben Goz4b8f5892015-01-04 11:24:25 +0200641struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
642 struct kfd_dev *dev);
643struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
644 struct kfd_dev *dev);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300645struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
646void device_queue_manager_uninit(struct device_queue_manager *dqm);
Ben Goz241f24f2014-07-17 00:55:28 +0300647struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
648 enum kfd_queue_type type);
649void kernel_queue_uninit(struct kernel_queue *kq);
650
Ben Goz45102042014-07-17 01:04:10 +0300651/* Process Queue Manager */
652struct process_queue_node {
653 struct queue *q;
654 struct kernel_queue *kq;
655 struct list_head process_queue_list;
656};
657
658int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
659void pqm_uninit(struct process_queue_manager *pqm);
660int pqm_create_queue(struct process_queue_manager *pqm,
661 struct kfd_dev *dev,
662 struct file *f,
663 struct queue_properties *properties,
664 unsigned int flags,
665 enum kfd_queue_type type,
666 unsigned int *qid);
667int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
668int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
669 struct queue_properties *p);
Yair Shacharfbeb6612015-05-20 13:48:26 +0300670struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
671 unsigned int qid);
Ben Goz45102042014-07-17 01:04:10 +0300672
Yair Shachar788bf832015-05-20 13:58:12 +0300673int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
674 unsigned int fence_value,
675 unsigned long timeout);
676
Ben Gozed6e6a32014-07-17 00:45:35 +0300677/* Packet Manager */
678
Ben Goz241f24f2014-07-17 00:55:28 +0300679#define KFD_HIQ_TIMEOUT (500)
680
Ben Goz64c7f8c2014-07-17 01:27:00 +0300681#define KFD_FENCE_COMPLETED (100)
682#define KFD_FENCE_INIT (10)
Ben Goz241f24f2014-07-17 00:55:28 +0300683#define KFD_UNMAP_LATENCY (150)
684
Ben Gozed6e6a32014-07-17 00:45:35 +0300685struct packet_manager {
686 struct device_queue_manager *dqm;
687 struct kernel_queue *priv_queue;
688 struct mutex lock;
689 bool allocated;
690 struct kfd_mem_obj *ib_buffer_obj;
691};
692
Ben Goz64c7f8c2014-07-17 01:27:00 +0300693int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
694void pm_uninit(struct packet_manager *pm);
695int pm_send_set_resources(struct packet_manager *pm,
696 struct scheduling_resources *res);
697int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
698int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
699 uint32_t fence_value);
700
701int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
702 enum kfd_preempt_type_filter mode,
703 uint32_t filter_param, bool reset,
704 unsigned int sdma_engine);
705
Ben Goz241f24f2014-07-17 00:55:28 +0300706void pm_release_ib(struct packet_manager *pm);
707
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300708uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300709
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300710/* Events */
711extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200712extern const struct kfd_device_global_init_class device_global_init_class_cik;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300713
714enum kfd_event_wait_result {
715 KFD_WAIT_COMPLETE,
716 KFD_WAIT_TIMEOUT,
717 KFD_WAIT_ERROR
718};
719
720void kfd_event_init_process(struct kfd_process *p);
721void kfd_event_free_process(struct kfd_process *p);
722int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
723int kfd_wait_on_events(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300724 uint32_t num_events, void __user *data,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300725 bool all, uint32_t user_timeout_ms,
726 enum kfd_event_wait_result *wait_result);
727void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
728 uint32_t valid_id_bits);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300729void kfd_signal_iommu_event(struct kfd_dev *dev,
730 unsigned int pasid, unsigned long address,
731 bool is_write_requested, bool is_execute_requested);
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200732void kfd_signal_hw_exception_event(unsigned int pasid);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300733int kfd_set_event(struct kfd_process *p, uint32_t event_id);
734int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
735int kfd_event_create(struct file *devkfd, struct kfd_process *p,
736 uint32_t event_type, bool auto_reset, uint32_t node_id,
737 uint32_t *event_id, uint32_t *event_trigger_data,
738 uint64_t *event_page_offset, uint32_t *event_slot_index);
739int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
740
Ben Gozc3447e82015-05-20 18:05:44 +0300741int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
742
Oded Gabbay4a488a72014-07-16 21:08:55 +0300743#endif