blob: 248e4f5108247873e509442739b8c25e6cac966c [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_PRIV_H_INCLUDED
24#define KFD_PRIV_H_INCLUDED
25
26#include <linux/hashtable.h>
27#include <linux/mmu_notifier.h>
28#include <linux/mutex.h>
29#include <linux/types.h>
30#include <linux/atomic.h>
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030033#include <linux/kfd_ioctl.h>
Felix Kuehling482f0772017-10-27 19:35:27 -040034#include <linux/idr.h>
Andres Rodriguez04ad47b2017-10-27 19:35:31 -040035#include <linux/kfifo.h>
Felix Kuehling851a6452017-11-27 18:29:49 -050036#include <linux/seq_file.h>
Felix Kuehling5ce10682017-11-27 18:29:51 -050037#include <linux/kref.h>
Oded Gabbay4a488a72014-07-16 21:08:55 +030038#include <kgd_kfd_interface.h>
39
Yong Zhaoe596b902017-09-20 18:10:19 -040040#include "amd_shared.h"
41
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030042#define KFD_SYSFS_FILE_MODE 0444
43
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030044#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
45#define KFD_MMAP_EVENTS_MASK 0x4000000000000
Felix Kuehling373d7082017-11-14 16:41:19 -050046#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030047
Ben Gozed6e6a32014-07-17 00:45:35 +030048/*
49 * When working with cp scheduler we should assign the HIQ manually or via
50 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
51 * definitions for Kaveri. In Kaveri only the first ME queues participates
52 * in the cp scheduling taking that in mind we set the HIQ slot in the
53 * second ME.
54 */
55#define KFD_CIK_HIQ_PIPE 4
56#define KFD_CIK_HIQ_QUEUE 0
57
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030058/* GPU ID hash width in bits */
59#define KFD_GPU_ID_HASH_WIDTH 16
60
61/* Macro for allocating structures */
62#define kfd_alloc_struct(ptr_to_struct) \
63 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
64
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030065#define KFD_MAX_NUM_OF_PROCESSES 512
Oded Gabbayb8cbab02015-01-18 13:18:01 +020066#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030067
68/*
Felix Kuehling373d7082017-11-14 16:41:19 -050069 * Size of the per-process TBA+TMA buffer: 2 pages
70 *
71 * The first page is the TBA used for the CWSR ISA code. The second
72 * page is used as TMA for daisy changing a user-mode trap handler.
73 */
74#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
75#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
76
77/*
Oded Gabbayb8cbab02015-01-18 13:18:01 +020078 * Kernel module parameter to specify maximum number of supported queues per
79 * device
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030080 */
Oded Gabbayb8cbab02015-01-18 13:18:01 +020081extern int max_num_of_queues_per_device;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030082
Oded Gabbayb8cbab02015-01-18 13:18:01 +020083#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
84#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
85 (KFD_MAX_NUM_OF_PROCESSES * \
86 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030087
Ben Gozed6e6a32014-07-17 00:45:35 +030088#define KFD_KERNEL_QUEUE_SIZE 2048
89
Ben Goz31c21fe2014-07-17 00:48:28 +030090/* Kernel module parameter to specify the scheduling policy */
91extern int sched_policy;
92
Felix Kuehlinga99c6d42017-11-27 18:29:45 -050093/*
94 * Kernel module parameter to specify the maximum process
95 * number per HW scheduler
96 */
97extern int hws_max_conc_proc;
98
Felix Kuehling373d7082017-11-14 16:41:19 -050099extern int cwsr_enable;
100
Oded Gabbay81663012014-12-24 13:30:52 +0200101/*
102 * Kernel module parameter to specify whether to send sigterm to HSA process on
103 * unhandled exception
104 */
105extern int send_sigterm;
106
Ben Goz31c21fe2014-07-17 00:48:28 +0300107/**
108 * enum kfd_sched_policy
109 *
110 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
111 * scheduling. In this scheduling mode we're using the firmware code to
112 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
113 * the HIQ queue is used as a special queue that dispatches the configuration
114 * to the cp and the user mode queues list that are currently running.
115 * the DIQ queue is a debugging queue that dispatches debugging commands to the
116 * firmware.
117 * in this scheduling mode user mode queues over subscription feature is
118 * enabled.
119 *
120 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
121 * subscription feature disabled.
122 *
123 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
124 * set the command processor registers and sets the queues "manually". This
125 * mode is used *ONLY* for debugging proposes.
126 *
127 */
128enum kfd_sched_policy {
129 KFD_SCHED_POLICY_HWS = 0,
130 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
131 KFD_SCHED_POLICY_NO_HWS
132};
133
Ben Gozed6e6a32014-07-17 00:45:35 +0300134enum cache_policy {
135 cache_policy_coherent,
136 cache_policy_noncoherent
137};
138
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300139struct kfd_event_interrupt_class {
140 bool (*interrupt_isr)(struct kfd_dev *dev,
141 const uint32_t *ih_ring_entry);
142 void (*interrupt_wq)(struct kfd_dev *dev,
143 const uint32_t *ih_ring_entry);
144};
145
Oded Gabbay4a488a72014-07-16 21:08:55 +0300146struct kfd_device_info {
Yong Zhaoe596b902017-09-20 18:10:19 -0400147 enum amd_asic_type asic_family;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300148 const struct kfd_event_interrupt_class *event_interrupt_class;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300149 unsigned int max_pasid_bits;
Yair Shachar992839a2015-05-20 13:43:04 +0300150 unsigned int max_no_of_hqd;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300151 size_t ih_ring_entry_size;
Alexey Skidanovf7c826a2014-10-13 16:35:12 +0300152 uint8_t num_of_watch_points;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300153 uint16_t mqd_size_aligned;
Felix Kuehling373d7082017-11-14 16:41:19 -0500154 bool supports_cwsr;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300155};
156
Oded Gabbay36b5c082014-10-26 09:53:10 +0200157struct kfd_mem_obj {
158 uint32_t range_start;
159 uint32_t range_end;
160 uint64_t gpu_addr;
161 uint32_t *cpu_ptr;
162};
163
Yong Zhao44008d72017-09-20 18:10:18 -0400164struct kfd_vmid_info {
165 uint32_t first_vmid_kfd;
166 uint32_t last_vmid_kfd;
167 uint32_t vmid_num_kfd;
168};
169
Oded Gabbay4a488a72014-07-16 21:08:55 +0300170struct kfd_dev {
171 struct kgd_dev *kgd;
172
173 const struct kfd_device_info *device_info;
174 struct pci_dev *pdev;
175
176 unsigned int id; /* topology stub index */
177
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300178 phys_addr_t doorbell_base; /* Start of actual doorbells used by
179 * KFD. It is aligned for mapping
180 * into user mode
181 */
182 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
183 * to HW doorbell, GFX reserved some
184 * at the start)
185 */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300186 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
187 * page used by kernel queue
188 */
189
Oded Gabbay4a488a72014-07-16 21:08:55 +0300190 struct kgd2kfd_shared_resources shared_resources;
Yong Zhao44008d72017-09-20 18:10:18 -0400191 struct kfd_vmid_info vm_info;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300192
Xihan Zhangcea405b2015-03-17 19:32:53 +0800193 const struct kfd2kgd_calls *kfd2kgd;
194 struct mutex doorbell_mutex;
Joe Perchesf761d8b2015-05-19 18:37:51 -0700195 DECLARE_BITMAP(doorbell_available_index,
196 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800197
Oded Gabbay36b5c082014-10-26 09:53:10 +0200198 void *gtt_mem;
199 uint64_t gtt_start_gpu_addr;
200 void *gtt_start_cpu_ptr;
201 void *gtt_sa_bitmap;
202 struct mutex gtt_sa_lock;
203 unsigned int gtt_sa_chunk_size;
204 unsigned int gtt_sa_num_of_chunks;
205
Andrew Lewycky2249d552014-07-17 01:37:30 +0300206 /* Interrupts */
Andres Rodriguez04ad47b2017-10-27 19:35:31 -0400207 struct kfifo ih_fifo;
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400208 struct workqueue_struct *ih_wq;
Andrew Lewycky2249d552014-07-17 01:37:30 +0300209 struct work_struct interrupt_work;
210 spinlock_t interrupt_lock;
211
Ben Gozed6e6a32014-07-17 00:45:35 +0300212 /* QCM Device instance */
213 struct device_queue_manager *dqm;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300214
Ben Gozed6e6a32014-07-17 00:45:35 +0300215 bool init_complete;
Andrew Lewycky2249d552014-07-17 01:37:30 +0300216 /*
217 * Interrupts of interest to KFD are copied
218 * from the HW ring into a SW ring.
219 */
220 bool interrupts_active;
Yair Shacharfbeb6612015-05-20 13:48:26 +0300221
222 /* Debug manager */
223 struct kfd_dbgmgr *dbgmgr;
Felix Kuehling373d7082017-11-14 16:41:19 -0500224
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500225 /* Maximum process number mapped to HW scheduler */
226 unsigned int max_proc_per_quantum;
227
Felix Kuehling373d7082017-11-14 16:41:19 -0500228 /* CWSR */
229 bool cwsr_enabled;
230 const void *cwsr_isa;
231 unsigned int cwsr_isa_size;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300232};
233
234/* KGD2KFD callbacks */
235void kgd2kfd_exit(void);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800236struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
237 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300238bool kgd2kfd_device_init(struct kfd_dev *kfd,
Xihan Zhangcea405b2015-03-17 19:32:53 +0800239 const struct kgd2kfd_shared_resources *gpu_resources);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300240void kgd2kfd_device_exit(struct kfd_dev *kfd);
241
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300242enum kfd_mempool {
243 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
244 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
245 KFD_MEMPOOL_FRAMEBUFFER = 3,
246};
247
Oded Gabbay4a488a72014-07-16 21:08:55 +0300248/* Character device interface */
249int kfd_chardev_init(void);
250void kfd_chardev_exit(void);
251struct device *kfd_chardev(void);
252
Ben Goz241f24f2014-07-17 00:55:28 +0300253/**
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400254 * enum kfd_unmap_queues_filter
Ben Goz241f24f2014-07-17 00:55:28 +0300255 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400256 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
Ben Goz241f24f2014-07-17 00:55:28 +0300257 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400258 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
Ben Goz241f24f2014-07-17 00:55:28 +0300259 * running queues list.
260 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400261 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
Ben Goz241f24f2014-07-17 00:55:28 +0300262 * specific process.
263 *
264 */
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400265enum kfd_unmap_queues_filter {
266 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
267 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
268 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
269 KFD_UNMAP_QUEUES_FILTER_BY_PASID
Ben Goz241f24f2014-07-17 00:55:28 +0300270};
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300271
Ben Gozed8aab42014-07-17 00:18:51 +0300272/**
273 * enum kfd_queue_type
274 *
275 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
276 *
277 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
278 *
279 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
280 *
281 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
282 */
283enum kfd_queue_type {
284 KFD_QUEUE_TYPE_COMPUTE,
285 KFD_QUEUE_TYPE_SDMA,
286 KFD_QUEUE_TYPE_HIQ,
287 KFD_QUEUE_TYPE_DIQ
288};
289
Ben Goz6e99df52014-07-17 00:36:17 +0300290enum kfd_queue_format {
291 KFD_QUEUE_FORMAT_PM4,
292 KFD_QUEUE_FORMAT_AQL
293};
294
Ben Gozed8aab42014-07-17 00:18:51 +0300295/**
296 * struct queue_properties
297 *
298 * @type: The queue type.
299 *
300 * @queue_id: Queue identifier.
301 *
302 * @queue_address: Queue ring buffer address.
303 *
304 * @queue_size: Queue ring buffer size.
305 *
306 * @priority: Defines the queue priority relative to other queues in the
307 * process.
308 * This is just an indication and HW scheduling may override the priority as
309 * necessary while keeping the relative prioritization.
310 * the priority granularity is from 0 to f which f is the highest priority.
311 * currently all queues are initialized with the highest priority.
312 *
313 * @queue_percent: This field is partially implemented and currently a zero in
314 * this field defines that the queue is non active.
315 *
316 * @read_ptr: User space address which points to the number of dwords the
317 * cp read from the ring buffer. This field updates automatically by the H/W.
318 *
319 * @write_ptr: Defines the number of dwords written to the ring buffer.
320 *
321 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
Kent Russell8eabaf52017-08-15 23:00:04 -0400322 * the queue ring buffer. This field should be similar to write_ptr and the
323 * user should update this field after he updated the write_ptr.
Ben Gozed8aab42014-07-17 00:18:51 +0300324 *
325 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
326 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400327 * @is_interop: Defines if this is a interop queue. Interop queue means that
328 * the queue can access both graphics and compute resources.
Ben Gozed8aab42014-07-17 00:18:51 +0300329 *
330 * @is_active: Defines if the queue is active or not.
331 *
332 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
333 * of the queue.
334 *
335 * This structure represents the queue properties for each queue no matter if
336 * it's user mode or kernel mode queue.
337 *
338 */
339struct queue_properties {
340 enum kfd_queue_type type;
Ben Goz6e99df52014-07-17 00:36:17 +0300341 enum kfd_queue_format format;
Ben Gozed8aab42014-07-17 00:18:51 +0300342 unsigned int queue_id;
343 uint64_t queue_address;
344 uint64_t queue_size;
345 uint32_t priority;
346 uint32_t queue_percent;
347 uint32_t *read_ptr;
348 uint32_t *write_ptr;
Oded Gabbay5cd78de2014-11-20 16:14:56 +0200349 uint32_t __iomem *doorbell_ptr;
Ben Gozed8aab42014-07-17 00:18:51 +0300350 uint32_t doorbell_off;
351 bool is_interop;
352 bool is_active;
353 /* Not relevant for user mode queues in cp scheduling */
354 unsigned int vmid;
Ben Goz77669eb2015-01-03 22:12:31 +0200355 /* Relevant only for sdma queues*/
356 uint32_t sdma_engine_id;
357 uint32_t sdma_queue_id;
358 uint32_t sdma_vm_addr;
Ben Gozff3d04a2015-01-04 10:37:18 +0200359 /* Relevant only for VI */
360 uint64_t eop_ring_buffer_address;
361 uint32_t eop_ring_buffer_size;
362 uint64_t ctx_save_restore_area_address;
363 uint32_t ctx_save_restore_area_size;
Felix Kuehling373d7082017-11-14 16:41:19 -0500364 uint32_t ctl_stack_size;
365 uint64_t tba_addr;
366 uint64_t tma_addr;
Ben Gozed8aab42014-07-17 00:18:51 +0300367};
368
369/**
370 * struct queue
371 *
372 * @list: Queue linked list.
373 *
374 * @mqd: The queue MQD.
375 *
376 * @mqd_mem_obj: The MQD local gpu memory object.
377 *
378 * @gart_mqd_addr: The MQD gart mc address.
379 *
380 * @properties: The queue properties.
381 *
382 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
Kent Russell8eabaf52017-08-15 23:00:04 -0400383 * that the queue should be execute on.
Ben Gozed8aab42014-07-17 00:18:51 +0300384 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400385 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
386 * id.
Ben Gozed8aab42014-07-17 00:18:51 +0300387 *
388 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
389 *
390 * @process: The kfd process that created this queue.
391 *
392 * @device: The kfd device that created this queue.
393 *
394 * This structure represents user mode compute queues.
395 * It contains all the necessary data to handle such queues.
396 *
397 */
398
399struct queue {
400 struct list_head list;
401 void *mqd;
402 struct kfd_mem_obj *mqd_mem_obj;
403 uint64_t gart_mqd_addr;
404 struct queue_properties properties;
405
406 uint32_t mec;
407 uint32_t pipe;
408 uint32_t queue;
409
Ben Goz77669eb2015-01-03 22:12:31 +0200410 unsigned int sdma_id;
411
Ben Gozed8aab42014-07-17 00:18:51 +0300412 struct kfd_process *process;
413 struct kfd_dev *device;
414};
415
Ben Goz6e99df52014-07-17 00:36:17 +0300416/*
417 * Please read the kfd_mqd_manager.h description.
418 */
419enum KFD_MQD_TYPE {
Ben Goz85d258f2015-01-04 10:36:30 +0200420 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
421 KFD_MQD_TYPE_HIQ, /* for hiq */
422 KFD_MQD_TYPE_CP, /* for cp queues and diq */
423 KFD_MQD_TYPE_SDMA, /* for sdma queues */
Ben Goz6e99df52014-07-17 00:36:17 +0300424 KFD_MQD_TYPE_MAX
425};
426
Ben Goz241f24f2014-07-17 00:55:28 +0300427struct scheduling_resources {
428 unsigned int vmid_mask;
429 enum kfd_queue_type type;
430 uint64_t queue_mask;
431 uint64_t gws_mask;
432 uint32_t oac_mask;
433 uint32_t gds_heap_base;
434 uint32_t gds_heap_size;
435};
436
437struct process_queue_manager {
438 /* data */
439 struct kfd_process *process;
Ben Goz241f24f2014-07-17 00:55:28 +0300440 struct list_head queues;
441 unsigned long *queue_slot_bitmap;
442};
443
444struct qcm_process_device {
445 /* The Device Queue Manager that owns this data */
446 struct device_queue_manager *dqm;
447 struct process_queue_manager *pqm;
Ben Goz241f24f2014-07-17 00:55:28 +0300448 /* Queues list */
449 struct list_head queues_list;
450 struct list_head priv_queue_list;
451
452 unsigned int queue_count;
453 unsigned int vmid;
454 bool is_debug;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400455
456 /* This flag tells if we should reset all wavefronts on
457 * process termination
458 */
459 bool reset_wavefronts;
460
Ben Goz241f24f2014-07-17 00:55:28 +0300461 /*
462 * All the memory management data should be here too
463 */
464 uint64_t gds_context_area;
465 uint32_t sh_mem_config;
466 uint32_t sh_mem_bases;
467 uint32_t sh_mem_ape1_base;
468 uint32_t sh_mem_ape1_limit;
469 uint32_t page_table_base;
470 uint32_t gds_size;
471 uint32_t num_gws;
472 uint32_t num_oac;
Moses Reuben6a1c9512017-08-15 23:00:20 -0400473 uint32_t sh_hidden_private_base;
Felix Kuehling373d7082017-11-14 16:41:19 -0500474
475 /* CWSR memory */
476 void *cwsr_kaddr;
477 uint64_t tba_addr;
478 uint64_t tma_addr;
Ben Goz241f24f2014-07-17 00:55:28 +0300479};
480
Yong Zhao733fa1f2017-09-20 18:10:14 -0400481
482enum kfd_pdd_bound {
483 PDD_UNBOUND = 0,
484 PDD_BOUND,
485 PDD_BOUND_SUSPENDED,
486};
487
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300488/* Data that is per-process-per device. */
489struct kfd_process_device {
490 /*
491 * List of all per-device data for a process.
492 * Starts from kfd_process.per_device_data.
493 */
494 struct list_head per_device_list;
495
496 /* The device that owns this data. */
497 struct kfd_dev *dev;
498
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400499 /* The process that owns this kfd_process_device. */
500 struct kfd_process *process;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300501
Ben Goz45102042014-07-17 01:04:10 +0300502 /* per-process-per device QCM data structure */
503 struct qcm_process_device qpd;
504
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300505 /*Apertures*/
506 uint64_t lds_base;
507 uint64_t lds_limit;
508 uint64_t gpuvm_base;
509 uint64_t gpuvm_limit;
510 uint64_t scratch_base;
511 uint64_t scratch_limit;
512
513 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
Yong Zhao733fa1f2017-09-20 18:10:14 -0400514 enum kfd_pdd_bound bound;
Ben Goza82918f2015-03-25 13:12:20 +0200515
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400516 /* Flag used to tell the pdd has dequeued from the dqm.
517 * This is used to prevent dev->dqm->ops.process_termination() from
518 * being called twice when it is already called in IOMMU callback
519 * function.
Ben Goza82918f2015-03-25 13:12:20 +0200520 */
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400521 bool already_dequeued;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300522};
523
Alexey Skidanov52a5fdc2014-11-19 17:07:00 +0200524#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
525
Oded Gabbay4a488a72014-07-16 21:08:55 +0300526/* Process data */
527struct kfd_process {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300528 /*
529 * kfd_process are stored in an mm_struct*->kfd_process*
530 * hash table (kfd_processes in kfd_process.c)
531 */
532 struct hlist_node kfd_processes;
533
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400534 /*
535 * Opaque pointer to mm_struct. We don't hold a reference to
536 * it so it should never be dereferenced from here. This is
537 * only used for looking up processes by their mm.
538 */
539 void *mm;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300540
Felix Kuehling5ce10682017-11-27 18:29:51 -0500541 struct kref ref;
542 struct work_struct release_work;
543
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300544 struct mutex mutex;
545
546 /*
547 * In any process, the thread that started main() is the lead
548 * thread and outlives the rest.
549 * It is here because amd_iommu_bind_pasid wants a task_struct.
Felix Kuehling894a8292017-11-01 19:21:33 -0400550 * It can also be used for safely getting a reference to the
551 * mm_struct of the process.
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300552 */
553 struct task_struct *lead_thread;
554
555 /* We want to receive a notification when the mm_struct is destroyed */
556 struct mmu_notifier mmu_notifier;
557
558 /* Use for delayed freeing of kfd_process structure */
559 struct rcu_head rcu;
560
561 unsigned int pasid;
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400562 unsigned int doorbell_index;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300563
564 /*
565 * List of kfd_process_device structures,
566 * one for each device the process is using.
567 */
568 struct list_head per_device_data;
569
Ben Goz45102042014-07-17 01:04:10 +0300570 struct process_queue_manager pqm;
571
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300572 /*Is the user space process 32 bit?*/
573 bool is_32bit_user_mode;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300574
575 /* Event-related data */
576 struct mutex event_mutex;
Felix Kuehling482f0772017-10-27 19:35:27 -0400577 /* Event ID allocator and lookup */
578 struct idr event_idr;
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400579 /* Event page */
580 struct kfd_signal_page *signal_page;
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400581 size_t signal_mapped_size;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300582 size_t signal_event_count;
Felix Kuehlingc9861692017-09-20 18:10:22 -0400583 bool signal_event_limit_reached;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300584};
585
Oded Gabbay76baee62014-12-29 14:20:05 +0200586/**
587 * Ioctl function type.
588 *
589 * \param filep pointer to file structure.
590 * \param p amdkfd process pointer.
591 * \param data pointer to arg that was copied from user.
592 */
593typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
594 void *data);
595
596struct amdkfd_ioctl_desc {
597 unsigned int cmd;
598 int flags;
599 amdkfd_ioctl_t *func;
600 unsigned int cmd_drv;
601 const char *name;
602};
603
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300604void kfd_process_create_wq(void);
605void kfd_process_destroy_wq(void);
Felix Kuehling373d7082017-11-14 16:41:19 -0500606struct kfd_process *kfd_create_process(struct file *filep);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300607struct kfd_process *kfd_get_process(const struct task_struct *);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300608struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300609
Ben Goz64c7f8c2014-07-17 01:27:00 +0300610struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
Yong Zhao733fa1f2017-09-20 18:10:14 -0400611 struct kfd_process *p);
612int kfd_bind_processes_to_device(struct kfd_dev *dev);
613void kfd_unbind_processes_from_device(struct kfd_dev *dev);
614void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300615struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200616 struct kfd_process *p);
617struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
618 struct kfd_process *p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300619
Felix Kuehling373d7082017-11-14 16:41:19 -0500620int kfd_reserved_mem_mmap(struct kfd_process *process,
621 struct vm_area_struct *vma);
622
Alexey Skidanov775921e2014-07-17 01:49:36 +0300623/* Process device data iterator */
Kent Russell8eabaf52017-08-15 23:00:04 -0400624struct kfd_process_device *kfd_get_first_process_device_data(
625 struct kfd_process *p);
626struct kfd_process_device *kfd_get_next_process_device_data(
627 struct kfd_process *p,
Alexey Skidanov775921e2014-07-17 01:49:36 +0300628 struct kfd_process_device *pdd);
629bool kfd_has_process_device_data(struct kfd_process *p);
630
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300631/* PASIDs */
632int kfd_pasid_init(void);
633void kfd_pasid_exit(void);
634bool kfd_set_pasid_limit(unsigned int new_limit);
635unsigned int kfd_get_pasid_limit(void);
636unsigned int kfd_pasid_alloc(void);
637void kfd_pasid_free(unsigned int pasid);
638
639/* Doorbells */
Felix Kuehling735df2b2017-08-15 23:00:10 -0400640int kfd_doorbell_init(struct kfd_dev *kfd);
641void kfd_doorbell_fini(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300642int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
643u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
644 unsigned int *doorbell_off);
645void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
646u32 read_kernel_doorbell(u32 __iomem *db);
647void write_kernel_doorbell(u32 __iomem *db, u32 value);
648unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
649 struct kfd_process *process,
650 unsigned int queue_id);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400651phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
652 struct kfd_process *process);
653int kfd_alloc_process_doorbells(struct kfd_process *process);
654void kfd_free_process_doorbells(struct kfd_process *process);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300655
Oded Gabbay6e810902014-10-27 14:36:07 +0200656/* GTT Sub-Allocator */
657
658int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
659 struct kfd_mem_obj **mem_obj);
660
661int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
662
Oded Gabbay4a488a72014-07-16 21:08:55 +0300663extern struct device *kfd_device;
664
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300665/* Topology */
666int kfd_topology_init(void);
667void kfd_topology_shutdown(void);
668int kfd_topology_add_device(struct kfd_dev *gpu);
669int kfd_topology_remove_device(struct kfd_dev *gpu);
670struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
671struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
672struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
673
Oded Gabbay4a488a72014-07-16 21:08:55 +0300674/* Interrupts */
Andrew Lewycky2249d552014-07-17 01:37:30 +0300675int kfd_interrupt_init(struct kfd_dev *dev);
676void kfd_interrupt_exit(struct kfd_dev *dev);
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300677void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300678bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
679bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300680
681/* Power Management */
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300682void kgd2kfd_suspend(struct kfd_dev *kfd);
683int kgd2kfd_resume(struct kfd_dev *kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300684
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300685/* amdkfd Apertures */
686int kfd_init_apertures(struct kfd_process *process);
687
Ben Gozed6e6a32014-07-17 00:45:35 +0300688/* Queue Context Management */
Edward O'Callaghane88a6142016-09-17 15:01:45 +1000689int init_queue(struct queue **q, const struct queue_properties *properties);
Ben Gozed6e6a32014-07-17 00:45:35 +0300690void uninit_queue(struct queue *q);
Ben Goz45102042014-07-17 01:04:10 +0300691void print_queue_properties(struct queue_properties *q);
Ben Gozed6e6a32014-07-17 00:45:35 +0300692void print_queue(struct queue *q);
693
Ben Goz64c7f8c2014-07-17 01:27:00 +0300694struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
695 struct kfd_dev *dev);
Ben Goz4b8f5892015-01-04 11:24:25 +0200696struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
697 struct kfd_dev *dev);
698struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
699 struct kfd_dev *dev);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300700struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
701void device_queue_manager_uninit(struct device_queue_manager *dqm);
Ben Goz241f24f2014-07-17 00:55:28 +0300702struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
703 enum kfd_queue_type type);
704void kernel_queue_uninit(struct kernel_queue *kq);
705
Ben Goz45102042014-07-17 01:04:10 +0300706/* Process Queue Manager */
707struct process_queue_node {
708 struct queue *q;
709 struct kernel_queue *kq;
710 struct list_head process_queue_list;
711};
712
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400713void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
714void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
Ben Goz45102042014-07-17 01:04:10 +0300715int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
716void pqm_uninit(struct process_queue_manager *pqm);
717int pqm_create_queue(struct process_queue_manager *pqm,
718 struct kfd_dev *dev,
719 struct file *f,
720 struct queue_properties *properties,
Ben Goz45102042014-07-17 01:04:10 +0300721 unsigned int *qid);
722int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
723int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
724 struct queue_properties *p);
Yair Shacharfbeb6612015-05-20 13:48:26 +0300725struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
726 unsigned int qid);
Ben Goz45102042014-07-17 01:04:10 +0300727
Yair Shachar788bf832015-05-20 13:58:12 +0300728int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
729 unsigned int fence_value,
Yong Zhao8c72c3d2017-09-20 18:10:15 -0400730 unsigned int timeout_ms);
Yair Shachar788bf832015-05-20 13:58:12 +0300731
Ben Gozed6e6a32014-07-17 00:45:35 +0300732/* Packet Manager */
733
Ben Goz64c7f8c2014-07-17 01:27:00 +0300734#define KFD_FENCE_COMPLETED (100)
735#define KFD_FENCE_INIT (10)
Ben Goz241f24f2014-07-17 00:55:28 +0300736
Ben Gozed6e6a32014-07-17 00:45:35 +0300737struct packet_manager {
738 struct device_queue_manager *dqm;
739 struct kernel_queue *priv_queue;
740 struct mutex lock;
741 bool allocated;
742 struct kfd_mem_obj *ib_buffer_obj;
Felix Kuehling851a6452017-11-27 18:29:49 -0500743 unsigned int ib_size_bytes;
Ben Gozed6e6a32014-07-17 00:45:35 +0300744};
745
Ben Goz64c7f8c2014-07-17 01:27:00 +0300746int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
747void pm_uninit(struct packet_manager *pm);
748int pm_send_set_resources(struct packet_manager *pm,
749 struct scheduling_resources *res);
750int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
751int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
752 uint32_t fence_value);
753
754int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400755 enum kfd_unmap_queues_filter mode,
Ben Goz64c7f8c2014-07-17 01:27:00 +0300756 uint32_t filter_param, bool reset,
757 unsigned int sdma_engine);
758
Ben Goz241f24f2014-07-17 00:55:28 +0300759void pm_release_ib(struct packet_manager *pm);
760
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300761uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300762
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300763/* Events */
764extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200765extern const struct kfd_device_global_init_class device_global_init_class_cik;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300766
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300767void kfd_event_init_process(struct kfd_process *p);
768void kfd_event_free_process(struct kfd_process *p);
769int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
770int kfd_wait_on_events(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300771 uint32_t num_events, void __user *data,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300772 bool all, uint32_t user_timeout_ms,
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400773 uint32_t *wait_result);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300774void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
775 uint32_t valid_id_bits);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300776void kfd_signal_iommu_event(struct kfd_dev *dev,
777 unsigned int pasid, unsigned long address,
778 bool is_write_requested, bool is_execute_requested);
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200779void kfd_signal_hw_exception_event(unsigned int pasid);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300780int kfd_set_event(struct kfd_process *p, uint32_t event_id);
781int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
782int kfd_event_create(struct file *devkfd, struct kfd_process *p,
783 uint32_t event_type, bool auto_reset, uint32_t node_id,
784 uint32_t *event_id, uint32_t *event_trigger_data,
785 uint64_t *event_page_offset, uint32_t *event_slot_index);
786int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
787
Ben Gozc3447e82015-05-20 18:05:44 +0300788int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
789
Felix Kuehling851a6452017-11-27 18:29:49 -0500790/* Debugfs */
791#if defined(CONFIG_DEBUG_FS)
792
793void kfd_debugfs_init(void);
794void kfd_debugfs_fini(void);
795int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
796int pqm_debugfs_mqds(struct seq_file *m, void *data);
797int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
798int dqm_debugfs_hqds(struct seq_file *m, void *data);
799int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
800int pm_debugfs_runlist(struct seq_file *m, void *data);
801
802#else
803
804static inline void kfd_debugfs_init(void) {}
805static inline void kfd_debugfs_fini(void) {}
806
807#endif
808
Oded Gabbay4a488a72014-07-16 21:08:55 +0300809#endif