blob: dca493b4e17d59b3ab95ccea41a569970418a942 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_PRIV_H_INCLUDED
24#define KFD_PRIV_H_INCLUDED
25
26#include <linux/hashtable.h>
27#include <linux/mmu_notifier.h>
28#include <linux/mutex.h>
29#include <linux/types.h>
30#include <linux/atomic.h>
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030033#include <linux/kfd_ioctl.h>
Felix Kuehling482f0772017-10-27 19:35:27 -040034#include <linux/idr.h>
Andres Rodriguez04ad47b2017-10-27 19:35:31 -040035#include <linux/kfifo.h>
Felix Kuehling851a6452017-11-27 18:29:49 -050036#include <linux/seq_file.h>
Oded Gabbay4a488a72014-07-16 21:08:55 +030037#include <kgd_kfd_interface.h>
38
Yong Zhaoe596b902017-09-20 18:10:19 -040039#include "amd_shared.h"
40
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030041#define KFD_SYSFS_FILE_MODE 0444
42
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030043#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
44#define KFD_MMAP_EVENTS_MASK 0x4000000000000
Felix Kuehling373d7082017-11-14 16:41:19 -050045#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030046
Ben Gozed6e6a32014-07-17 00:45:35 +030047/*
48 * When working with cp scheduler we should assign the HIQ manually or via
49 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
50 * definitions for Kaveri. In Kaveri only the first ME queues participates
51 * in the cp scheduling taking that in mind we set the HIQ slot in the
52 * second ME.
53 */
54#define KFD_CIK_HIQ_PIPE 4
55#define KFD_CIK_HIQ_QUEUE 0
56
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +030057/* GPU ID hash width in bits */
58#define KFD_GPU_ID_HASH_WIDTH 16
59
60/* Macro for allocating structures */
61#define kfd_alloc_struct(ptr_to_struct) \
62 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
63
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030064#define KFD_MAX_NUM_OF_PROCESSES 512
Oded Gabbayb8cbab02015-01-18 13:18:01 +020065#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030066
67/*
Felix Kuehling373d7082017-11-14 16:41:19 -050068 * Size of the per-process TBA+TMA buffer: 2 pages
69 *
70 * The first page is the TBA used for the CWSR ISA code. The second
71 * page is used as TMA for daisy changing a user-mode trap handler.
72 */
73#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
74#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
75
76/*
Oded Gabbayb8cbab02015-01-18 13:18:01 +020077 * Kernel module parameter to specify maximum number of supported queues per
78 * device
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030079 */
Oded Gabbayb8cbab02015-01-18 13:18:01 +020080extern int max_num_of_queues_per_device;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030081
Oded Gabbayb8cbab02015-01-18 13:18:01 +020082#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
83#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
84 (KFD_MAX_NUM_OF_PROCESSES * \
85 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030086
Ben Gozed6e6a32014-07-17 00:45:35 +030087#define KFD_KERNEL_QUEUE_SIZE 2048
88
Ben Goz31c21fe2014-07-17 00:48:28 +030089/* Kernel module parameter to specify the scheduling policy */
90extern int sched_policy;
91
Felix Kuehlinga99c6d42017-11-27 18:29:45 -050092/*
93 * Kernel module parameter to specify the maximum process
94 * number per HW scheduler
95 */
96extern int hws_max_conc_proc;
97
Felix Kuehling373d7082017-11-14 16:41:19 -050098extern int cwsr_enable;
99
Oded Gabbay81663012014-12-24 13:30:52 +0200100/*
101 * Kernel module parameter to specify whether to send sigterm to HSA process on
102 * unhandled exception
103 */
104extern int send_sigterm;
105
Ben Goz31c21fe2014-07-17 00:48:28 +0300106/**
107 * enum kfd_sched_policy
108 *
109 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
110 * scheduling. In this scheduling mode we're using the firmware code to
111 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
112 * the HIQ queue is used as a special queue that dispatches the configuration
113 * to the cp and the user mode queues list that are currently running.
114 * the DIQ queue is a debugging queue that dispatches debugging commands to the
115 * firmware.
116 * in this scheduling mode user mode queues over subscription feature is
117 * enabled.
118 *
119 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
120 * subscription feature disabled.
121 *
122 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
123 * set the command processor registers and sets the queues "manually". This
124 * mode is used *ONLY* for debugging proposes.
125 *
126 */
127enum kfd_sched_policy {
128 KFD_SCHED_POLICY_HWS = 0,
129 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
130 KFD_SCHED_POLICY_NO_HWS
131};
132
Ben Gozed6e6a32014-07-17 00:45:35 +0300133enum cache_policy {
134 cache_policy_coherent,
135 cache_policy_noncoherent
136};
137
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300138struct kfd_event_interrupt_class {
139 bool (*interrupt_isr)(struct kfd_dev *dev,
140 const uint32_t *ih_ring_entry);
141 void (*interrupt_wq)(struct kfd_dev *dev,
142 const uint32_t *ih_ring_entry);
143};
144
Oded Gabbay4a488a72014-07-16 21:08:55 +0300145struct kfd_device_info {
Yong Zhaoe596b902017-09-20 18:10:19 -0400146 enum amd_asic_type asic_family;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300147 const struct kfd_event_interrupt_class *event_interrupt_class;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300148 unsigned int max_pasid_bits;
Yair Shachar992839a2015-05-20 13:43:04 +0300149 unsigned int max_no_of_hqd;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300150 size_t ih_ring_entry_size;
Alexey Skidanovf7c826a2014-10-13 16:35:12 +0300151 uint8_t num_of_watch_points;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300152 uint16_t mqd_size_aligned;
Felix Kuehling373d7082017-11-14 16:41:19 -0500153 bool supports_cwsr;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300154};
155
Oded Gabbay36b5c082014-10-26 09:53:10 +0200156struct kfd_mem_obj {
157 uint32_t range_start;
158 uint32_t range_end;
159 uint64_t gpu_addr;
160 uint32_t *cpu_ptr;
161};
162
Yong Zhao44008d72017-09-20 18:10:18 -0400163struct kfd_vmid_info {
164 uint32_t first_vmid_kfd;
165 uint32_t last_vmid_kfd;
166 uint32_t vmid_num_kfd;
167};
168
Oded Gabbay4a488a72014-07-16 21:08:55 +0300169struct kfd_dev {
170 struct kgd_dev *kgd;
171
172 const struct kfd_device_info *device_info;
173 struct pci_dev *pdev;
174
175 unsigned int id; /* topology stub index */
176
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300177 phys_addr_t doorbell_base; /* Start of actual doorbells used by
178 * KFD. It is aligned for mapping
179 * into user mode
180 */
181 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
182 * to HW doorbell, GFX reserved some
183 * at the start)
184 */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300185 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
186 * page used by kernel queue
187 */
188
Oded Gabbay4a488a72014-07-16 21:08:55 +0300189 struct kgd2kfd_shared_resources shared_resources;
Yong Zhao44008d72017-09-20 18:10:18 -0400190 struct kfd_vmid_info vm_info;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300191
Xihan Zhangcea405b2015-03-17 19:32:53 +0800192 const struct kfd2kgd_calls *kfd2kgd;
193 struct mutex doorbell_mutex;
Joe Perchesf761d8b2015-05-19 18:37:51 -0700194 DECLARE_BITMAP(doorbell_available_index,
195 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800196
Oded Gabbay36b5c082014-10-26 09:53:10 +0200197 void *gtt_mem;
198 uint64_t gtt_start_gpu_addr;
199 void *gtt_start_cpu_ptr;
200 void *gtt_sa_bitmap;
201 struct mutex gtt_sa_lock;
202 unsigned int gtt_sa_chunk_size;
203 unsigned int gtt_sa_num_of_chunks;
204
Andrew Lewycky2249d552014-07-17 01:37:30 +0300205 /* Interrupts */
Andres Rodriguez04ad47b2017-10-27 19:35:31 -0400206 struct kfifo ih_fifo;
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400207 struct workqueue_struct *ih_wq;
Andrew Lewycky2249d552014-07-17 01:37:30 +0300208 struct work_struct interrupt_work;
209 spinlock_t interrupt_lock;
210
Ben Gozed6e6a32014-07-17 00:45:35 +0300211 /* QCM Device instance */
212 struct device_queue_manager *dqm;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300213
Ben Gozed6e6a32014-07-17 00:45:35 +0300214 bool init_complete;
Andrew Lewycky2249d552014-07-17 01:37:30 +0300215 /*
216 * Interrupts of interest to KFD are copied
217 * from the HW ring into a SW ring.
218 */
219 bool interrupts_active;
Yair Shacharfbeb6612015-05-20 13:48:26 +0300220
221 /* Debug manager */
222 struct kfd_dbgmgr *dbgmgr;
Felix Kuehling373d7082017-11-14 16:41:19 -0500223
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500224 /* Maximum process number mapped to HW scheduler */
225 unsigned int max_proc_per_quantum;
226
Felix Kuehling373d7082017-11-14 16:41:19 -0500227 /* CWSR */
228 bool cwsr_enabled;
229 const void *cwsr_isa;
230 unsigned int cwsr_isa_size;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300231};
232
233/* KGD2KFD callbacks */
234void kgd2kfd_exit(void);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800235struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
236 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300237bool kgd2kfd_device_init(struct kfd_dev *kfd,
Xihan Zhangcea405b2015-03-17 19:32:53 +0800238 const struct kgd2kfd_shared_resources *gpu_resources);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300239void kgd2kfd_device_exit(struct kfd_dev *kfd);
240
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300241enum kfd_mempool {
242 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
243 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
244 KFD_MEMPOOL_FRAMEBUFFER = 3,
245};
246
Oded Gabbay4a488a72014-07-16 21:08:55 +0300247/* Character device interface */
248int kfd_chardev_init(void);
249void kfd_chardev_exit(void);
250struct device *kfd_chardev(void);
251
Ben Goz241f24f2014-07-17 00:55:28 +0300252/**
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400253 * enum kfd_unmap_queues_filter
Ben Goz241f24f2014-07-17 00:55:28 +0300254 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400255 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
Ben Goz241f24f2014-07-17 00:55:28 +0300256 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400257 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
Ben Goz241f24f2014-07-17 00:55:28 +0300258 * running queues list.
259 *
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400260 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
Ben Goz241f24f2014-07-17 00:55:28 +0300261 * specific process.
262 *
263 */
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400264enum kfd_unmap_queues_filter {
265 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
266 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
267 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
268 KFD_UNMAP_QUEUES_FILTER_BY_PASID
Ben Goz241f24f2014-07-17 00:55:28 +0300269};
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300270
Ben Gozed8aab42014-07-17 00:18:51 +0300271/**
272 * enum kfd_queue_type
273 *
274 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
275 *
276 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
277 *
278 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
279 *
280 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
281 */
282enum kfd_queue_type {
283 KFD_QUEUE_TYPE_COMPUTE,
284 KFD_QUEUE_TYPE_SDMA,
285 KFD_QUEUE_TYPE_HIQ,
286 KFD_QUEUE_TYPE_DIQ
287};
288
Ben Goz6e99df52014-07-17 00:36:17 +0300289enum kfd_queue_format {
290 KFD_QUEUE_FORMAT_PM4,
291 KFD_QUEUE_FORMAT_AQL
292};
293
Ben Gozed8aab42014-07-17 00:18:51 +0300294/**
295 * struct queue_properties
296 *
297 * @type: The queue type.
298 *
299 * @queue_id: Queue identifier.
300 *
301 * @queue_address: Queue ring buffer address.
302 *
303 * @queue_size: Queue ring buffer size.
304 *
305 * @priority: Defines the queue priority relative to other queues in the
306 * process.
307 * This is just an indication and HW scheduling may override the priority as
308 * necessary while keeping the relative prioritization.
309 * the priority granularity is from 0 to f which f is the highest priority.
310 * currently all queues are initialized with the highest priority.
311 *
312 * @queue_percent: This field is partially implemented and currently a zero in
313 * this field defines that the queue is non active.
314 *
315 * @read_ptr: User space address which points to the number of dwords the
316 * cp read from the ring buffer. This field updates automatically by the H/W.
317 *
318 * @write_ptr: Defines the number of dwords written to the ring buffer.
319 *
320 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
Kent Russell8eabaf52017-08-15 23:00:04 -0400321 * the queue ring buffer. This field should be similar to write_ptr and the
322 * user should update this field after he updated the write_ptr.
Ben Gozed8aab42014-07-17 00:18:51 +0300323 *
324 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
325 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400326 * @is_interop: Defines if this is a interop queue. Interop queue means that
327 * the queue can access both graphics and compute resources.
Ben Gozed8aab42014-07-17 00:18:51 +0300328 *
329 * @is_active: Defines if the queue is active or not.
330 *
331 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
332 * of the queue.
333 *
334 * This structure represents the queue properties for each queue no matter if
335 * it's user mode or kernel mode queue.
336 *
337 */
338struct queue_properties {
339 enum kfd_queue_type type;
Ben Goz6e99df52014-07-17 00:36:17 +0300340 enum kfd_queue_format format;
Ben Gozed8aab42014-07-17 00:18:51 +0300341 unsigned int queue_id;
342 uint64_t queue_address;
343 uint64_t queue_size;
344 uint32_t priority;
345 uint32_t queue_percent;
346 uint32_t *read_ptr;
347 uint32_t *write_ptr;
Oded Gabbay5cd78de2014-11-20 16:14:56 +0200348 uint32_t __iomem *doorbell_ptr;
Ben Gozed8aab42014-07-17 00:18:51 +0300349 uint32_t doorbell_off;
350 bool is_interop;
351 bool is_active;
352 /* Not relevant for user mode queues in cp scheduling */
353 unsigned int vmid;
Ben Goz77669eb2015-01-03 22:12:31 +0200354 /* Relevant only for sdma queues*/
355 uint32_t sdma_engine_id;
356 uint32_t sdma_queue_id;
357 uint32_t sdma_vm_addr;
Ben Gozff3d04a2015-01-04 10:37:18 +0200358 /* Relevant only for VI */
359 uint64_t eop_ring_buffer_address;
360 uint32_t eop_ring_buffer_size;
361 uint64_t ctx_save_restore_area_address;
362 uint32_t ctx_save_restore_area_size;
Felix Kuehling373d7082017-11-14 16:41:19 -0500363 uint32_t ctl_stack_size;
364 uint64_t tba_addr;
365 uint64_t tma_addr;
Ben Gozed8aab42014-07-17 00:18:51 +0300366};
367
368/**
369 * struct queue
370 *
371 * @list: Queue linked list.
372 *
373 * @mqd: The queue MQD.
374 *
375 * @mqd_mem_obj: The MQD local gpu memory object.
376 *
377 * @gart_mqd_addr: The MQD gart mc address.
378 *
379 * @properties: The queue properties.
380 *
381 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
Kent Russell8eabaf52017-08-15 23:00:04 -0400382 * that the queue should be execute on.
Ben Gozed8aab42014-07-17 00:18:51 +0300383 *
Kent Russell8eabaf52017-08-15 23:00:04 -0400384 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
385 * id.
Ben Gozed8aab42014-07-17 00:18:51 +0300386 *
387 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
388 *
389 * @process: The kfd process that created this queue.
390 *
391 * @device: The kfd device that created this queue.
392 *
393 * This structure represents user mode compute queues.
394 * It contains all the necessary data to handle such queues.
395 *
396 */
397
398struct queue {
399 struct list_head list;
400 void *mqd;
401 struct kfd_mem_obj *mqd_mem_obj;
402 uint64_t gart_mqd_addr;
403 struct queue_properties properties;
404
405 uint32_t mec;
406 uint32_t pipe;
407 uint32_t queue;
408
Ben Goz77669eb2015-01-03 22:12:31 +0200409 unsigned int sdma_id;
410
Ben Gozed8aab42014-07-17 00:18:51 +0300411 struct kfd_process *process;
412 struct kfd_dev *device;
413};
414
Ben Goz6e99df52014-07-17 00:36:17 +0300415/*
416 * Please read the kfd_mqd_manager.h description.
417 */
418enum KFD_MQD_TYPE {
Ben Goz85d258f2015-01-04 10:36:30 +0200419 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
420 KFD_MQD_TYPE_HIQ, /* for hiq */
421 KFD_MQD_TYPE_CP, /* for cp queues and diq */
422 KFD_MQD_TYPE_SDMA, /* for sdma queues */
Ben Goz6e99df52014-07-17 00:36:17 +0300423 KFD_MQD_TYPE_MAX
424};
425
Ben Goz241f24f2014-07-17 00:55:28 +0300426struct scheduling_resources {
427 unsigned int vmid_mask;
428 enum kfd_queue_type type;
429 uint64_t queue_mask;
430 uint64_t gws_mask;
431 uint32_t oac_mask;
432 uint32_t gds_heap_base;
433 uint32_t gds_heap_size;
434};
435
436struct process_queue_manager {
437 /* data */
438 struct kfd_process *process;
Ben Goz241f24f2014-07-17 00:55:28 +0300439 struct list_head queues;
440 unsigned long *queue_slot_bitmap;
441};
442
443struct qcm_process_device {
444 /* The Device Queue Manager that owns this data */
445 struct device_queue_manager *dqm;
446 struct process_queue_manager *pqm;
Ben Goz241f24f2014-07-17 00:55:28 +0300447 /* Queues list */
448 struct list_head queues_list;
449 struct list_head priv_queue_list;
450
451 unsigned int queue_count;
452 unsigned int vmid;
453 bool is_debug;
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400454
455 /* This flag tells if we should reset all wavefronts on
456 * process termination
457 */
458 bool reset_wavefronts;
459
Ben Goz241f24f2014-07-17 00:55:28 +0300460 /*
461 * All the memory management data should be here too
462 */
463 uint64_t gds_context_area;
464 uint32_t sh_mem_config;
465 uint32_t sh_mem_bases;
466 uint32_t sh_mem_ape1_base;
467 uint32_t sh_mem_ape1_limit;
468 uint32_t page_table_base;
469 uint32_t gds_size;
470 uint32_t num_gws;
471 uint32_t num_oac;
Moses Reuben6a1c9512017-08-15 23:00:20 -0400472 uint32_t sh_hidden_private_base;
Felix Kuehling373d7082017-11-14 16:41:19 -0500473
474 /* CWSR memory */
475 void *cwsr_kaddr;
476 uint64_t tba_addr;
477 uint64_t tma_addr;
Ben Goz241f24f2014-07-17 00:55:28 +0300478};
479
Yong Zhao733fa1f2017-09-20 18:10:14 -0400480
481enum kfd_pdd_bound {
482 PDD_UNBOUND = 0,
483 PDD_BOUND,
484 PDD_BOUND_SUSPENDED,
485};
486
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300487/* Data that is per-process-per device. */
488struct kfd_process_device {
489 /*
490 * List of all per-device data for a process.
491 * Starts from kfd_process.per_device_data.
492 */
493 struct list_head per_device_list;
494
495 /* The device that owns this data. */
496 struct kfd_dev *dev;
497
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400498 /* The process that owns this kfd_process_device. */
499 struct kfd_process *process;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300500
Ben Goz45102042014-07-17 01:04:10 +0300501 /* per-process-per device QCM data structure */
502 struct qcm_process_device qpd;
503
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300504 /*Apertures*/
505 uint64_t lds_base;
506 uint64_t lds_limit;
507 uint64_t gpuvm_base;
508 uint64_t gpuvm_limit;
509 uint64_t scratch_base;
510 uint64_t scratch_limit;
511
512 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
Yong Zhao733fa1f2017-09-20 18:10:14 -0400513 enum kfd_pdd_bound bound;
Ben Goza82918f2015-03-25 13:12:20 +0200514
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400515 /* Flag used to tell the pdd has dequeued from the dqm.
516 * This is used to prevent dev->dqm->ops.process_termination() from
517 * being called twice when it is already called in IOMMU callback
518 * function.
Ben Goza82918f2015-03-25 13:12:20 +0200519 */
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400520 bool already_dequeued;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300521};
522
Alexey Skidanov52a5fdc2014-11-19 17:07:00 +0200523#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
524
Oded Gabbay4a488a72014-07-16 21:08:55 +0300525/* Process data */
526struct kfd_process {
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300527 /*
528 * kfd_process are stored in an mm_struct*->kfd_process*
529 * hash table (kfd_processes in kfd_process.c)
530 */
531 struct hlist_node kfd_processes;
532
Felix Kuehling9b56bb12017-10-27 19:35:19 -0400533 /*
534 * Opaque pointer to mm_struct. We don't hold a reference to
535 * it so it should never be dereferenced from here. This is
536 * only used for looking up processes by their mm.
537 */
538 void *mm;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300539
540 struct mutex mutex;
541
542 /*
543 * In any process, the thread that started main() is the lead
544 * thread and outlives the rest.
545 * It is here because amd_iommu_bind_pasid wants a task_struct.
Felix Kuehling894a8292017-11-01 19:21:33 -0400546 * It can also be used for safely getting a reference to the
547 * mm_struct of the process.
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300548 */
549 struct task_struct *lead_thread;
550
551 /* We want to receive a notification when the mm_struct is destroyed */
552 struct mmu_notifier mmu_notifier;
553
554 /* Use for delayed freeing of kfd_process structure */
555 struct rcu_head rcu;
556
557 unsigned int pasid;
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400558 unsigned int doorbell_index;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300559
560 /*
561 * List of kfd_process_device structures,
562 * one for each device the process is using.
563 */
564 struct list_head per_device_data;
565
Ben Goz45102042014-07-17 01:04:10 +0300566 struct process_queue_manager pqm;
567
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300568 /*Is the user space process 32 bit?*/
569 bool is_32bit_user_mode;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300570
571 /* Event-related data */
572 struct mutex event_mutex;
Felix Kuehling482f0772017-10-27 19:35:27 -0400573 /* Event ID allocator and lookup */
574 struct idr event_idr;
Felix Kuehling50cb7dd2017-10-27 19:35:26 -0400575 /* Event page */
576 struct kfd_signal_page *signal_page;
Felix Kuehlingb9a5d0a2017-10-27 19:35:29 -0400577 size_t signal_mapped_size;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300578 size_t signal_event_count;
Felix Kuehlingc9861692017-09-20 18:10:22 -0400579 bool signal_event_limit_reached;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300580};
581
Oded Gabbay76baee62014-12-29 14:20:05 +0200582/**
583 * Ioctl function type.
584 *
585 * \param filep pointer to file structure.
586 * \param p amdkfd process pointer.
587 * \param data pointer to arg that was copied from user.
588 */
589typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
590 void *data);
591
592struct amdkfd_ioctl_desc {
593 unsigned int cmd;
594 int flags;
595 amdkfd_ioctl_t *func;
596 unsigned int cmd_drv;
597 const char *name;
598};
599
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300600void kfd_process_create_wq(void);
601void kfd_process_destroy_wq(void);
Felix Kuehling373d7082017-11-14 16:41:19 -0500602struct kfd_process *kfd_create_process(struct file *filep);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300603struct kfd_process *kfd_get_process(const struct task_struct *);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300604struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300605
Ben Goz64c7f8c2014-07-17 01:27:00 +0300606struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
Yong Zhao733fa1f2017-09-20 18:10:14 -0400607 struct kfd_process *p);
608int kfd_bind_processes_to_device(struct kfd_dev *dev);
609void kfd_unbind_processes_from_device(struct kfd_dev *dev);
610void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300611struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
Alexey Skidanov093c7d82014-11-18 14:00:04 +0200612 struct kfd_process *p);
613struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
614 struct kfd_process *p);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300615
Felix Kuehling373d7082017-11-14 16:41:19 -0500616int kfd_reserved_mem_mmap(struct kfd_process *process,
617 struct vm_area_struct *vma);
618
Alexey Skidanov775921e2014-07-17 01:49:36 +0300619/* Process device data iterator */
Kent Russell8eabaf52017-08-15 23:00:04 -0400620struct kfd_process_device *kfd_get_first_process_device_data(
621 struct kfd_process *p);
622struct kfd_process_device *kfd_get_next_process_device_data(
623 struct kfd_process *p,
Alexey Skidanov775921e2014-07-17 01:49:36 +0300624 struct kfd_process_device *pdd);
625bool kfd_has_process_device_data(struct kfd_process *p);
626
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300627/* PASIDs */
628int kfd_pasid_init(void);
629void kfd_pasid_exit(void);
630bool kfd_set_pasid_limit(unsigned int new_limit);
631unsigned int kfd_get_pasid_limit(void);
632unsigned int kfd_pasid_alloc(void);
633void kfd_pasid_free(unsigned int pasid);
634
635/* Doorbells */
Felix Kuehling735df2b2017-08-15 23:00:10 -0400636int kfd_doorbell_init(struct kfd_dev *kfd);
637void kfd_doorbell_fini(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300638int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
639u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
640 unsigned int *doorbell_off);
641void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
642u32 read_kernel_doorbell(u32 __iomem *db);
643void write_kernel_doorbell(u32 __iomem *db, u32 value);
644unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
645 struct kfd_process *process,
646 unsigned int queue_id);
Felix Kuehlinga91e70e2017-08-26 02:00:57 -0400647phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
648 struct kfd_process *process);
649int kfd_alloc_process_doorbells(struct kfd_process *process);
650void kfd_free_process_doorbells(struct kfd_process *process);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300651
Oded Gabbay6e810902014-10-27 14:36:07 +0200652/* GTT Sub-Allocator */
653
654int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
655 struct kfd_mem_obj **mem_obj);
656
657int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
658
Oded Gabbay4a488a72014-07-16 21:08:55 +0300659extern struct device *kfd_device;
660
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300661/* Topology */
662int kfd_topology_init(void);
663void kfd_topology_shutdown(void);
664int kfd_topology_add_device(struct kfd_dev *gpu);
665int kfd_topology_remove_device(struct kfd_dev *gpu);
666struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
667struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
668struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
669
Oded Gabbay4a488a72014-07-16 21:08:55 +0300670/* Interrupts */
Andrew Lewycky2249d552014-07-17 01:37:30 +0300671int kfd_interrupt_init(struct kfd_dev *dev);
672void kfd_interrupt_exit(struct kfd_dev *dev);
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300673void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300674bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
675bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300676
677/* Power Management */
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300678void kgd2kfd_suspend(struct kfd_dev *kfd);
679int kgd2kfd_resume(struct kfd_dev *kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300680
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300681/* amdkfd Apertures */
682int kfd_init_apertures(struct kfd_process *process);
683
Ben Gozed6e6a32014-07-17 00:45:35 +0300684/* Queue Context Management */
Edward O'Callaghane88a6142016-09-17 15:01:45 +1000685int init_queue(struct queue **q, const struct queue_properties *properties);
Ben Gozed6e6a32014-07-17 00:45:35 +0300686void uninit_queue(struct queue *q);
Ben Goz45102042014-07-17 01:04:10 +0300687void print_queue_properties(struct queue_properties *q);
Ben Gozed6e6a32014-07-17 00:45:35 +0300688void print_queue(struct queue *q);
689
Ben Goz64c7f8c2014-07-17 01:27:00 +0300690struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
691 struct kfd_dev *dev);
Ben Goz4b8f5892015-01-04 11:24:25 +0200692struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
693 struct kfd_dev *dev);
694struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
695 struct kfd_dev *dev);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300696struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
697void device_queue_manager_uninit(struct device_queue_manager *dqm);
Ben Goz241f24f2014-07-17 00:55:28 +0300698struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
699 enum kfd_queue_type type);
700void kernel_queue_uninit(struct kernel_queue *kq);
701
Ben Goz45102042014-07-17 01:04:10 +0300702/* Process Queue Manager */
703struct process_queue_node {
704 struct queue *q;
705 struct kernel_queue *kq;
706 struct list_head process_queue_list;
707};
708
Felix Kuehling9fd3f1bf2017-09-27 00:09:52 -0400709void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
710void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
Ben Goz45102042014-07-17 01:04:10 +0300711int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
712void pqm_uninit(struct process_queue_manager *pqm);
713int pqm_create_queue(struct process_queue_manager *pqm,
714 struct kfd_dev *dev,
715 struct file *f,
716 struct queue_properties *properties,
Ben Goz45102042014-07-17 01:04:10 +0300717 unsigned int *qid);
718int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
719int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
720 struct queue_properties *p);
Yair Shacharfbeb6612015-05-20 13:48:26 +0300721struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
722 unsigned int qid);
Ben Goz45102042014-07-17 01:04:10 +0300723
Yair Shachar788bf832015-05-20 13:58:12 +0300724int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
725 unsigned int fence_value,
Yong Zhao8c72c3d2017-09-20 18:10:15 -0400726 unsigned int timeout_ms);
Yair Shachar788bf832015-05-20 13:58:12 +0300727
Ben Gozed6e6a32014-07-17 00:45:35 +0300728/* Packet Manager */
729
Ben Goz64c7f8c2014-07-17 01:27:00 +0300730#define KFD_FENCE_COMPLETED (100)
731#define KFD_FENCE_INIT (10)
Ben Goz241f24f2014-07-17 00:55:28 +0300732
Ben Gozed6e6a32014-07-17 00:45:35 +0300733struct packet_manager {
734 struct device_queue_manager *dqm;
735 struct kernel_queue *priv_queue;
736 struct mutex lock;
737 bool allocated;
738 struct kfd_mem_obj *ib_buffer_obj;
Felix Kuehling851a6452017-11-27 18:29:49 -0500739 unsigned int ib_size_bytes;
Ben Gozed6e6a32014-07-17 00:45:35 +0300740};
741
Ben Goz64c7f8c2014-07-17 01:27:00 +0300742int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
743void pm_uninit(struct packet_manager *pm);
744int pm_send_set_resources(struct packet_manager *pm,
745 struct scheduling_resources *res);
746int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
747int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
748 uint32_t fence_value);
749
750int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
Yong Zhao7da2bcf2017-09-27 00:09:48 -0400751 enum kfd_unmap_queues_filter mode,
Ben Goz64c7f8c2014-07-17 01:27:00 +0300752 uint32_t filter_param, bool reset,
753 unsigned int sdma_engine);
754
Ben Goz241f24f2014-07-17 00:55:28 +0300755void pm_release_ib(struct packet_manager *pm);
756
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300757uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300758
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300759/* Events */
760extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200761extern const struct kfd_device_global_init_class device_global_init_class_cik;
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300762
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300763void kfd_event_init_process(struct kfd_process *p);
764void kfd_event_free_process(struct kfd_process *p);
765int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
766int kfd_wait_on_events(struct kfd_process *p,
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300767 uint32_t num_events, void __user *data,
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300768 bool all, uint32_t user_timeout_ms,
Felix Kuehlingfdf0c832017-10-27 19:35:22 -0400769 uint32_t *wait_result);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300770void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
771 uint32_t valid_id_bits);
Alexey Skidanov59d3e8b2015-04-14 18:05:49 +0300772void kfd_signal_iommu_event(struct kfd_dev *dev,
773 unsigned int pasid, unsigned long address,
774 bool is_write_requested, bool is_execute_requested);
Alexey Skidanov930c5ff2014-11-25 10:34:31 +0200775void kfd_signal_hw_exception_event(unsigned int pasid);
Andrew Lewyckyf3a39812015-05-10 12:15:46 +0300776int kfd_set_event(struct kfd_process *p, uint32_t event_id);
777int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
778int kfd_event_create(struct file *devkfd, struct kfd_process *p,
779 uint32_t event_type, bool auto_reset, uint32_t node_id,
780 uint32_t *event_id, uint32_t *event_trigger_data,
781 uint64_t *event_page_offset, uint32_t *event_slot_index);
782int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
783
Ben Gozc3447e82015-05-20 18:05:44 +0300784int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
785
Felix Kuehling851a6452017-11-27 18:29:49 -0500786/* Debugfs */
787#if defined(CONFIG_DEBUG_FS)
788
789void kfd_debugfs_init(void);
790void kfd_debugfs_fini(void);
791int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
792int pqm_debugfs_mqds(struct seq_file *m, void *data);
793int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
794int dqm_debugfs_hqds(struct seq_file *m, void *data);
795int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
796int pm_debugfs_runlist(struct seq_file *m, void *data);
797
798#else
799
800static inline void kfd_debugfs_init(void) {}
801static inline void kfd_debugfs_fini(void) {}
802
803#endif
804
Oded Gabbay4a488a72014-07-16 21:08:55 +0300805#endif