Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #ifndef KFD_PRIV_H_INCLUDED |
| 24 | #define KFD_PRIV_H_INCLUDED |
| 25 | |
| 26 | #include <linux/hashtable.h> |
| 27 | #include <linux/mmu_notifier.h> |
| 28 | #include <linux/mutex.h> |
| 29 | #include <linux/types.h> |
| 30 | #include <linux/atomic.h> |
| 31 | #include <linux/workqueue.h> |
| 32 | #include <linux/spinlock.h> |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 33 | #include <linux/kfd_ioctl.h> |
Felix Kuehling | 482f077 | 2017-10-27 19:35:27 -0400 | [diff] [blame] | 34 | #include <linux/idr.h> |
Andres Rodriguez | 04ad47b | 2017-10-27 19:35:31 -0400 | [diff] [blame] | 35 | #include <linux/kfifo.h> |
Felix Kuehling | 851a645 | 2017-11-27 18:29:49 -0500 | [diff] [blame] | 36 | #include <linux/seq_file.h> |
Felix Kuehling | 5ce1068 | 2017-11-27 18:29:51 -0500 | [diff] [blame] | 37 | #include <linux/kref.h> |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 38 | #include <kgd_kfd_interface.h> |
| 39 | |
Yong Zhao | e596b90 | 2017-09-20 18:10:19 -0400 | [diff] [blame] | 40 | #include "amd_shared.h" |
| 41 | |
Evgeny Pinchuk | 5b5c4e4 | 2014-07-16 21:22:32 +0300 | [diff] [blame] | 42 | #define KFD_SYSFS_FILE_MODE 0444 |
| 43 | |
Oded Gabbay | a1235e1 | 2018-01-10 12:55:17 +0200 | [diff] [blame] | 44 | #define KFD_MMAP_DOORBELL_MASK 0x8000000000000ull |
| 45 | #define KFD_MMAP_EVENTS_MASK 0x4000000000000ull |
| 46 | #define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000ull |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 47 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 48 | /* |
| 49 | * When working with cp scheduler we should assign the HIQ manually or via |
| 50 | * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot |
| 51 | * definitions for Kaveri. In Kaveri only the first ME queues participates |
| 52 | * in the cp scheduling taking that in mind we set the HIQ slot in the |
| 53 | * second ME. |
| 54 | */ |
| 55 | #define KFD_CIK_HIQ_PIPE 4 |
| 56 | #define KFD_CIK_HIQ_QUEUE 0 |
| 57 | |
Evgeny Pinchuk | 5b5c4e4 | 2014-07-16 21:22:32 +0300 | [diff] [blame] | 58 | /* GPU ID hash width in bits */ |
| 59 | #define KFD_GPU_ID_HASH_WIDTH 16 |
| 60 | |
| 61 | /* Macro for allocating structures */ |
| 62 | #define kfd_alloc_struct(ptr_to_struct) \ |
| 63 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) |
| 64 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 65 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
Oded Gabbay | b8cbab0 | 2015-01-18 13:18:01 +0200 | [diff] [blame] | 66 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 67 | |
| 68 | /* |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 69 | * Size of the per-process TBA+TMA buffer: 2 pages |
| 70 | * |
| 71 | * The first page is the TBA used for the CWSR ISA code. The second |
| 72 | * page is used as TMA for daisy changing a user-mode trap handler. |
| 73 | */ |
| 74 | #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) |
| 75 | #define KFD_CWSR_TMA_OFFSET PAGE_SIZE |
| 76 | |
| 77 | /* |
Oded Gabbay | b8cbab0 | 2015-01-18 13:18:01 +0200 | [diff] [blame] | 78 | * Kernel module parameter to specify maximum number of supported queues per |
| 79 | * device |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 80 | */ |
Oded Gabbay | b8cbab0 | 2015-01-18 13:18:01 +0200 | [diff] [blame] | 81 | extern int max_num_of_queues_per_device; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 82 | |
Oded Gabbay | b8cbab0 | 2015-01-18 13:18:01 +0200 | [diff] [blame] | 83 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
| 84 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ |
| 85 | (KFD_MAX_NUM_OF_PROCESSES * \ |
| 86 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 87 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 88 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
| 89 | |
Ben Goz | 31c21fe | 2014-07-17 00:48:28 +0300 | [diff] [blame] | 90 | /* Kernel module parameter to specify the scheduling policy */ |
| 91 | extern int sched_policy; |
| 92 | |
Felix Kuehling | a99c6d4 | 2017-11-27 18:29:45 -0500 | [diff] [blame] | 93 | /* |
| 94 | * Kernel module parameter to specify the maximum process |
| 95 | * number per HW scheduler |
| 96 | */ |
| 97 | extern int hws_max_conc_proc; |
| 98 | |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 99 | extern int cwsr_enable; |
| 100 | |
Oded Gabbay | 8166301 | 2014-12-24 13:30:52 +0200 | [diff] [blame] | 101 | /* |
| 102 | * Kernel module parameter to specify whether to send sigterm to HSA process on |
| 103 | * unhandled exception |
| 104 | */ |
| 105 | extern int send_sigterm; |
| 106 | |
Felix Kuehling | ebcfd1e | 2017-12-08 23:09:03 -0500 | [diff] [blame] | 107 | /* |
| 108 | * Ignore CRAT table during KFD initialization, can be used to work around |
| 109 | * broken CRAT tables on some AMD systems |
| 110 | */ |
| 111 | extern int ignore_crat; |
| 112 | |
Ben Goz | 31c21fe | 2014-07-17 00:48:28 +0300 | [diff] [blame] | 113 | /** |
| 114 | * enum kfd_sched_policy |
| 115 | * |
| 116 | * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) |
| 117 | * scheduling. In this scheduling mode we're using the firmware code to |
| 118 | * schedule the user mode queues and kernel queues such as HIQ and DIQ. |
| 119 | * the HIQ queue is used as a special queue that dispatches the configuration |
| 120 | * to the cp and the user mode queues list that are currently running. |
| 121 | * the DIQ queue is a debugging queue that dispatches debugging commands to the |
| 122 | * firmware. |
| 123 | * in this scheduling mode user mode queues over subscription feature is |
| 124 | * enabled. |
| 125 | * |
| 126 | * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over |
| 127 | * subscription feature disabled. |
| 128 | * |
| 129 | * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly |
| 130 | * set the command processor registers and sets the queues "manually". This |
| 131 | * mode is used *ONLY* for debugging proposes. |
| 132 | * |
| 133 | */ |
| 134 | enum kfd_sched_policy { |
| 135 | KFD_SCHED_POLICY_HWS = 0, |
| 136 | KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, |
| 137 | KFD_SCHED_POLICY_NO_HWS |
| 138 | }; |
| 139 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 140 | enum cache_policy { |
| 141 | cache_policy_coherent, |
| 142 | cache_policy_noncoherent |
| 143 | }; |
| 144 | |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 145 | struct kfd_event_interrupt_class { |
| 146 | bool (*interrupt_isr)(struct kfd_dev *dev, |
| 147 | const uint32_t *ih_ring_entry); |
| 148 | void (*interrupt_wq)(struct kfd_dev *dev, |
| 149 | const uint32_t *ih_ring_entry); |
| 150 | }; |
| 151 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 152 | struct kfd_device_info { |
Yong Zhao | e596b90 | 2017-09-20 18:10:19 -0400 | [diff] [blame] | 153 | enum amd_asic_type asic_family; |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 154 | const struct kfd_event_interrupt_class *event_interrupt_class; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 155 | unsigned int max_pasid_bits; |
Yair Shachar | 992839a | 2015-05-20 13:43:04 +0300 | [diff] [blame] | 156 | unsigned int max_no_of_hqd; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 157 | size_t ih_ring_entry_size; |
Alexey Skidanov | f7c826a | 2014-10-13 16:35:12 +0300 | [diff] [blame] | 158 | uint8_t num_of_watch_points; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 159 | uint16_t mqd_size_aligned; |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 160 | bool supports_cwsr; |
Felix Kuehling | 64d1c3a | 2017-12-08 19:22:12 -0500 | [diff] [blame] | 161 | bool needs_iommu_device; |
Felix Kuehling | 3ee2d00 | 2018-01-04 17:17:41 -0500 | [diff] [blame] | 162 | bool needs_pci_atomics; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 163 | }; |
| 164 | |
Oded Gabbay | 36b5c08 | 2014-10-26 09:53:10 +0200 | [diff] [blame] | 165 | struct kfd_mem_obj { |
| 166 | uint32_t range_start; |
| 167 | uint32_t range_end; |
| 168 | uint64_t gpu_addr; |
| 169 | uint32_t *cpu_ptr; |
| 170 | }; |
| 171 | |
Yong Zhao | 44008d7 | 2017-09-20 18:10:18 -0400 | [diff] [blame] | 172 | struct kfd_vmid_info { |
| 173 | uint32_t first_vmid_kfd; |
| 174 | uint32_t last_vmid_kfd; |
| 175 | uint32_t vmid_num_kfd; |
| 176 | }; |
| 177 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 178 | struct kfd_dev { |
| 179 | struct kgd_dev *kgd; |
| 180 | |
| 181 | const struct kfd_device_info *device_info; |
| 182 | struct pci_dev *pdev; |
| 183 | |
| 184 | unsigned int id; /* topology stub index */ |
| 185 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 186 | phys_addr_t doorbell_base; /* Start of actual doorbells used by |
| 187 | * KFD. It is aligned for mapping |
| 188 | * into user mode |
| 189 | */ |
| 190 | size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell |
| 191 | * to HW doorbell, GFX reserved some |
| 192 | * at the start) |
| 193 | */ |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 194 | u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells |
| 195 | * page used by kernel queue |
| 196 | */ |
| 197 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 198 | struct kgd2kfd_shared_resources shared_resources; |
Yong Zhao | 44008d7 | 2017-09-20 18:10:18 -0400 | [diff] [blame] | 199 | struct kfd_vmid_info vm_info; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 200 | |
Xihan Zhang | cea405b | 2015-03-17 19:32:53 +0800 | [diff] [blame] | 201 | const struct kfd2kgd_calls *kfd2kgd; |
| 202 | struct mutex doorbell_mutex; |
Joe Perches | f761d8b | 2015-05-19 18:37:51 -0700 | [diff] [blame] | 203 | DECLARE_BITMAP(doorbell_available_index, |
| 204 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
Xihan Zhang | cea405b | 2015-03-17 19:32:53 +0800 | [diff] [blame] | 205 | |
Oded Gabbay | 36b5c08 | 2014-10-26 09:53:10 +0200 | [diff] [blame] | 206 | void *gtt_mem; |
| 207 | uint64_t gtt_start_gpu_addr; |
| 208 | void *gtt_start_cpu_ptr; |
| 209 | void *gtt_sa_bitmap; |
| 210 | struct mutex gtt_sa_lock; |
| 211 | unsigned int gtt_sa_chunk_size; |
| 212 | unsigned int gtt_sa_num_of_chunks; |
| 213 | |
Andrew Lewycky | 2249d55 | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 214 | /* Interrupts */ |
Andres Rodriguez | 04ad47b | 2017-10-27 19:35:31 -0400 | [diff] [blame] | 215 | struct kfifo ih_fifo; |
Andres Rodriguez | 48e876a | 2017-10-27 19:35:34 -0400 | [diff] [blame] | 216 | struct workqueue_struct *ih_wq; |
Andrew Lewycky | 2249d55 | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 217 | struct work_struct interrupt_work; |
| 218 | spinlock_t interrupt_lock; |
| 219 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 220 | /* QCM Device instance */ |
| 221 | struct device_queue_manager *dqm; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 222 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 223 | bool init_complete; |
Andrew Lewycky | 2249d55 | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 224 | /* |
| 225 | * Interrupts of interest to KFD are copied |
| 226 | * from the HW ring into a SW ring. |
| 227 | */ |
| 228 | bool interrupts_active; |
Yair Shachar | fbeb661 | 2015-05-20 13:48:26 +0300 | [diff] [blame] | 229 | |
| 230 | /* Debug manager */ |
| 231 | struct kfd_dbgmgr *dbgmgr; |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 232 | |
Felix Kuehling | a99c6d4 | 2017-11-27 18:29:45 -0500 | [diff] [blame] | 233 | /* Maximum process number mapped to HW scheduler */ |
| 234 | unsigned int max_proc_per_quantum; |
| 235 | |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 236 | /* CWSR */ |
| 237 | bool cwsr_enabled; |
| 238 | const void *cwsr_isa; |
| 239 | unsigned int cwsr_isa_size; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 240 | }; |
| 241 | |
| 242 | /* KGD2KFD callbacks */ |
| 243 | void kgd2kfd_exit(void); |
Xihan Zhang | cea405b | 2015-03-17 19:32:53 +0800 | [diff] [blame] | 244 | struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, |
| 245 | struct pci_dev *pdev, const struct kfd2kgd_calls *f2g); |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 246 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
Xihan Zhang | cea405b | 2015-03-17 19:32:53 +0800 | [diff] [blame] | 247 | const struct kgd2kfd_shared_resources *gpu_resources); |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 248 | void kgd2kfd_device_exit(struct kfd_dev *kfd); |
| 249 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 250 | enum kfd_mempool { |
| 251 | KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, |
| 252 | KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, |
| 253 | KFD_MEMPOOL_FRAMEBUFFER = 3, |
| 254 | }; |
| 255 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 256 | /* Character device interface */ |
| 257 | int kfd_chardev_init(void); |
| 258 | void kfd_chardev_exit(void); |
| 259 | struct device *kfd_chardev(void); |
| 260 | |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 261 | /** |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 262 | * enum kfd_unmap_queues_filter |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 263 | * |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 264 | * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 265 | * |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 266 | * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 267 | * running queues list. |
| 268 | * |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 269 | * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 270 | * specific process. |
| 271 | * |
| 272 | */ |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 273 | enum kfd_unmap_queues_filter { |
| 274 | KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, |
| 275 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, |
| 276 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, |
| 277 | KFD_UNMAP_QUEUES_FILTER_BY_PASID |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 278 | }; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 279 | |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 280 | /** |
| 281 | * enum kfd_queue_type |
| 282 | * |
| 283 | * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. |
| 284 | * |
| 285 | * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type. |
| 286 | * |
| 287 | * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. |
| 288 | * |
| 289 | * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. |
| 290 | */ |
| 291 | enum kfd_queue_type { |
| 292 | KFD_QUEUE_TYPE_COMPUTE, |
| 293 | KFD_QUEUE_TYPE_SDMA, |
| 294 | KFD_QUEUE_TYPE_HIQ, |
| 295 | KFD_QUEUE_TYPE_DIQ |
| 296 | }; |
| 297 | |
Ben Goz | 6e99df5 | 2014-07-17 00:36:17 +0300 | [diff] [blame] | 298 | enum kfd_queue_format { |
| 299 | KFD_QUEUE_FORMAT_PM4, |
| 300 | KFD_QUEUE_FORMAT_AQL |
| 301 | }; |
| 302 | |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 303 | /** |
| 304 | * struct queue_properties |
| 305 | * |
| 306 | * @type: The queue type. |
| 307 | * |
| 308 | * @queue_id: Queue identifier. |
| 309 | * |
| 310 | * @queue_address: Queue ring buffer address. |
| 311 | * |
| 312 | * @queue_size: Queue ring buffer size. |
| 313 | * |
| 314 | * @priority: Defines the queue priority relative to other queues in the |
| 315 | * process. |
| 316 | * This is just an indication and HW scheduling may override the priority as |
| 317 | * necessary while keeping the relative prioritization. |
| 318 | * the priority granularity is from 0 to f which f is the highest priority. |
| 319 | * currently all queues are initialized with the highest priority. |
| 320 | * |
| 321 | * @queue_percent: This field is partially implemented and currently a zero in |
| 322 | * this field defines that the queue is non active. |
| 323 | * |
| 324 | * @read_ptr: User space address which points to the number of dwords the |
| 325 | * cp read from the ring buffer. This field updates automatically by the H/W. |
| 326 | * |
| 327 | * @write_ptr: Defines the number of dwords written to the ring buffer. |
| 328 | * |
| 329 | * @doorbell_ptr: This field aim is to notify the H/W of new packet written to |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 330 | * the queue ring buffer. This field should be similar to write_ptr and the |
| 331 | * user should update this field after he updated the write_ptr. |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 332 | * |
| 333 | * @doorbell_off: The doorbell offset in the doorbell pci-bar. |
| 334 | * |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 335 | * @is_interop: Defines if this is a interop queue. Interop queue means that |
| 336 | * the queue can access both graphics and compute resources. |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 337 | * |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 338 | * @is_evicted: Defines if the queue is evicted. Only active queues |
| 339 | * are evicted, rendering them inactive. |
| 340 | * |
| 341 | * @is_active: Defines if the queue is active or not. @is_active and |
| 342 | * @is_evicted are protected by the DQM lock. |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 343 | * |
| 344 | * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid |
| 345 | * of the queue. |
| 346 | * |
| 347 | * This structure represents the queue properties for each queue no matter if |
| 348 | * it's user mode or kernel mode queue. |
| 349 | * |
| 350 | */ |
| 351 | struct queue_properties { |
| 352 | enum kfd_queue_type type; |
Ben Goz | 6e99df5 | 2014-07-17 00:36:17 +0300 | [diff] [blame] | 353 | enum kfd_queue_format format; |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 354 | unsigned int queue_id; |
| 355 | uint64_t queue_address; |
| 356 | uint64_t queue_size; |
| 357 | uint32_t priority; |
| 358 | uint32_t queue_percent; |
| 359 | uint32_t *read_ptr; |
| 360 | uint32_t *write_ptr; |
Oded Gabbay | 5cd78de | 2014-11-20 16:14:56 +0200 | [diff] [blame] | 361 | uint32_t __iomem *doorbell_ptr; |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 362 | uint32_t doorbell_off; |
| 363 | bool is_interop; |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 364 | bool is_evicted; |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 365 | bool is_active; |
| 366 | /* Not relevant for user mode queues in cp scheduling */ |
| 367 | unsigned int vmid; |
Ben Goz | 77669eb | 2015-01-03 22:12:31 +0200 | [diff] [blame] | 368 | /* Relevant only for sdma queues*/ |
| 369 | uint32_t sdma_engine_id; |
| 370 | uint32_t sdma_queue_id; |
| 371 | uint32_t sdma_vm_addr; |
Ben Goz | ff3d04a | 2015-01-04 10:37:18 +0200 | [diff] [blame] | 372 | /* Relevant only for VI */ |
| 373 | uint64_t eop_ring_buffer_address; |
| 374 | uint32_t eop_ring_buffer_size; |
| 375 | uint64_t ctx_save_restore_area_address; |
| 376 | uint32_t ctx_save_restore_area_size; |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 377 | uint32_t ctl_stack_size; |
| 378 | uint64_t tba_addr; |
| 379 | uint64_t tma_addr; |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 380 | }; |
| 381 | |
| 382 | /** |
| 383 | * struct queue |
| 384 | * |
| 385 | * @list: Queue linked list. |
| 386 | * |
| 387 | * @mqd: The queue MQD. |
| 388 | * |
| 389 | * @mqd_mem_obj: The MQD local gpu memory object. |
| 390 | * |
| 391 | * @gart_mqd_addr: The MQD gart mc address. |
| 392 | * |
| 393 | * @properties: The queue properties. |
| 394 | * |
| 395 | * @mec: Used only in no cp scheduling mode and identifies to micro engine id |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 396 | * that the queue should be execute on. |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 397 | * |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 398 | * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe |
| 399 | * id. |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 400 | * |
| 401 | * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. |
| 402 | * |
| 403 | * @process: The kfd process that created this queue. |
| 404 | * |
| 405 | * @device: The kfd device that created this queue. |
| 406 | * |
| 407 | * This structure represents user mode compute queues. |
| 408 | * It contains all the necessary data to handle such queues. |
| 409 | * |
| 410 | */ |
| 411 | |
| 412 | struct queue { |
| 413 | struct list_head list; |
| 414 | void *mqd; |
| 415 | struct kfd_mem_obj *mqd_mem_obj; |
| 416 | uint64_t gart_mqd_addr; |
| 417 | struct queue_properties properties; |
| 418 | |
| 419 | uint32_t mec; |
| 420 | uint32_t pipe; |
| 421 | uint32_t queue; |
| 422 | |
Ben Goz | 77669eb | 2015-01-03 22:12:31 +0200 | [diff] [blame] | 423 | unsigned int sdma_id; |
| 424 | |
Ben Goz | ed8aab4 | 2014-07-17 00:18:51 +0300 | [diff] [blame] | 425 | struct kfd_process *process; |
| 426 | struct kfd_dev *device; |
| 427 | }; |
| 428 | |
Ben Goz | 6e99df5 | 2014-07-17 00:36:17 +0300 | [diff] [blame] | 429 | /* |
| 430 | * Please read the kfd_mqd_manager.h description. |
| 431 | */ |
| 432 | enum KFD_MQD_TYPE { |
Ben Goz | 85d258f | 2015-01-04 10:36:30 +0200 | [diff] [blame] | 433 | KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ |
| 434 | KFD_MQD_TYPE_HIQ, /* for hiq */ |
| 435 | KFD_MQD_TYPE_CP, /* for cp queues and diq */ |
| 436 | KFD_MQD_TYPE_SDMA, /* for sdma queues */ |
Ben Goz | 6e99df5 | 2014-07-17 00:36:17 +0300 | [diff] [blame] | 437 | KFD_MQD_TYPE_MAX |
| 438 | }; |
| 439 | |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 440 | struct scheduling_resources { |
| 441 | unsigned int vmid_mask; |
| 442 | enum kfd_queue_type type; |
| 443 | uint64_t queue_mask; |
| 444 | uint64_t gws_mask; |
| 445 | uint32_t oac_mask; |
| 446 | uint32_t gds_heap_base; |
| 447 | uint32_t gds_heap_size; |
| 448 | }; |
| 449 | |
| 450 | struct process_queue_manager { |
| 451 | /* data */ |
| 452 | struct kfd_process *process; |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 453 | struct list_head queues; |
| 454 | unsigned long *queue_slot_bitmap; |
| 455 | }; |
| 456 | |
| 457 | struct qcm_process_device { |
| 458 | /* The Device Queue Manager that owns this data */ |
| 459 | struct device_queue_manager *dqm; |
| 460 | struct process_queue_manager *pqm; |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 461 | /* Queues list */ |
| 462 | struct list_head queues_list; |
| 463 | struct list_head priv_queue_list; |
| 464 | |
| 465 | unsigned int queue_count; |
| 466 | unsigned int vmid; |
| 467 | bool is_debug; |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 468 | unsigned int evicted; /* eviction counter, 0=active */ |
Felix Kuehling | 9fd3f1bf | 2017-09-27 00:09:52 -0400 | [diff] [blame] | 469 | |
| 470 | /* This flag tells if we should reset all wavefronts on |
| 471 | * process termination |
| 472 | */ |
| 473 | bool reset_wavefronts; |
| 474 | |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 475 | /* |
| 476 | * All the memory management data should be here too |
| 477 | */ |
| 478 | uint64_t gds_context_area; |
| 479 | uint32_t sh_mem_config; |
| 480 | uint32_t sh_mem_bases; |
| 481 | uint32_t sh_mem_ape1_base; |
| 482 | uint32_t sh_mem_ape1_limit; |
| 483 | uint32_t page_table_base; |
| 484 | uint32_t gds_size; |
| 485 | uint32_t num_gws; |
| 486 | uint32_t num_oac; |
Moses Reuben | 6a1c951 | 2017-08-15 23:00:20 -0400 | [diff] [blame] | 487 | uint32_t sh_hidden_private_base; |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 488 | |
| 489 | /* CWSR memory */ |
| 490 | void *cwsr_kaddr; |
Felix Kuehling | d01994c | 2018-03-15 17:27:47 -0400 | [diff] [blame] | 491 | uint64_t cwsr_base; |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 492 | uint64_t tba_addr; |
| 493 | uint64_t tma_addr; |
Felix Kuehling | d01994c | 2018-03-15 17:27:47 -0400 | [diff] [blame] | 494 | |
| 495 | /* IB memory */ |
| 496 | uint64_t ib_base; |
Felix Kuehling | 552764b | 2018-03-15 17:27:50 -0400 | [diff] [blame^] | 497 | void *ib_kaddr; |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 498 | }; |
| 499 | |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 500 | /* KFD Memory Eviction */ |
| 501 | |
| 502 | /* Approx. wait time before attempting to restore evicted BOs */ |
| 503 | #define PROCESS_RESTORE_TIME_MS 100 |
| 504 | /* Approx. back off time if restore fails due to lack of memory */ |
| 505 | #define PROCESS_BACK_OFF_TIME_MS 100 |
| 506 | /* Approx. time before evicting the process again */ |
| 507 | #define PROCESS_ACTIVE_TIME_MS 10 |
| 508 | |
| 509 | int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, |
| 510 | struct dma_fence *fence); |
Yong Zhao | 733fa1f | 2017-09-20 18:10:14 -0400 | [diff] [blame] | 511 | |
| 512 | enum kfd_pdd_bound { |
| 513 | PDD_UNBOUND = 0, |
| 514 | PDD_BOUND, |
| 515 | PDD_BOUND_SUSPENDED, |
| 516 | }; |
| 517 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 518 | /* Data that is per-process-per device. */ |
| 519 | struct kfd_process_device { |
| 520 | /* |
| 521 | * List of all per-device data for a process. |
| 522 | * Starts from kfd_process.per_device_data. |
| 523 | */ |
| 524 | struct list_head per_device_list; |
| 525 | |
| 526 | /* The device that owns this data. */ |
| 527 | struct kfd_dev *dev; |
| 528 | |
Felix Kuehling | 9fd3f1bf | 2017-09-27 00:09:52 -0400 | [diff] [blame] | 529 | /* The process that owns this kfd_process_device. */ |
| 530 | struct kfd_process *process; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 531 | |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 532 | /* per-process-per device QCM data structure */ |
| 533 | struct qcm_process_device qpd; |
| 534 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 535 | /*Apertures*/ |
| 536 | uint64_t lds_base; |
| 537 | uint64_t lds_limit; |
| 538 | uint64_t gpuvm_base; |
| 539 | uint64_t gpuvm_limit; |
| 540 | uint64_t scratch_base; |
| 541 | uint64_t scratch_limit; |
| 542 | |
Felix Kuehling | 403575c | 2018-02-06 20:32:44 -0500 | [diff] [blame] | 543 | /* VM context for GPUVM allocations */ |
Felix Kuehling | b84394e | 2018-03-15 17:27:44 -0400 | [diff] [blame] | 544 | struct file *drm_file; |
Felix Kuehling | 403575c | 2018-02-06 20:32:44 -0500 | [diff] [blame] | 545 | void *vm; |
| 546 | |
Felix Kuehling | 52b29d7 | 2018-03-15 17:27:48 -0400 | [diff] [blame] | 547 | /* GPUVM allocations storage */ |
| 548 | struct idr alloc_idr; |
| 549 | |
Felix Kuehling | 9fd3f1bf | 2017-09-27 00:09:52 -0400 | [diff] [blame] | 550 | /* Flag used to tell the pdd has dequeued from the dqm. |
| 551 | * This is used to prevent dev->dqm->ops.process_termination() from |
| 552 | * being called twice when it is already called in IOMMU callback |
| 553 | * function. |
Ben Goz | a82918f | 2015-03-25 13:12:20 +0200 | [diff] [blame] | 554 | */ |
Felix Kuehling | 9fd3f1bf | 2017-09-27 00:09:52 -0400 | [diff] [blame] | 555 | bool already_dequeued; |
Felix Kuehling | 64d1c3a | 2017-12-08 19:22:12 -0500 | [diff] [blame] | 556 | |
| 557 | /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ |
| 558 | enum kfd_pdd_bound bound; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 559 | }; |
| 560 | |
Alexey Skidanov | 52a5fdc | 2014-11-19 17:07:00 +0200 | [diff] [blame] | 561 | #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) |
| 562 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 563 | /* Process data */ |
| 564 | struct kfd_process { |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 565 | /* |
| 566 | * kfd_process are stored in an mm_struct*->kfd_process* |
| 567 | * hash table (kfd_processes in kfd_process.c) |
| 568 | */ |
| 569 | struct hlist_node kfd_processes; |
| 570 | |
Felix Kuehling | 9b56bb1 | 2017-10-27 19:35:19 -0400 | [diff] [blame] | 571 | /* |
| 572 | * Opaque pointer to mm_struct. We don't hold a reference to |
| 573 | * it so it should never be dereferenced from here. This is |
| 574 | * only used for looking up processes by their mm. |
| 575 | */ |
| 576 | void *mm; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 577 | |
Felix Kuehling | 5ce1068 | 2017-11-27 18:29:51 -0500 | [diff] [blame] | 578 | struct kref ref; |
| 579 | struct work_struct release_work; |
| 580 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 581 | struct mutex mutex; |
| 582 | |
| 583 | /* |
| 584 | * In any process, the thread that started main() is the lead |
| 585 | * thread and outlives the rest. |
| 586 | * It is here because amd_iommu_bind_pasid wants a task_struct. |
Felix Kuehling | 894a829 | 2017-11-01 19:21:33 -0400 | [diff] [blame] | 587 | * It can also be used for safely getting a reference to the |
| 588 | * mm_struct of the process. |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 589 | */ |
| 590 | struct task_struct *lead_thread; |
| 591 | |
| 592 | /* We want to receive a notification when the mm_struct is destroyed */ |
| 593 | struct mmu_notifier mmu_notifier; |
| 594 | |
| 595 | /* Use for delayed freeing of kfd_process structure */ |
| 596 | struct rcu_head rcu; |
| 597 | |
| 598 | unsigned int pasid; |
Felix Kuehling | a91e70e | 2017-08-26 02:00:57 -0400 | [diff] [blame] | 599 | unsigned int doorbell_index; |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 600 | |
| 601 | /* |
| 602 | * List of kfd_process_device structures, |
| 603 | * one for each device the process is using. |
| 604 | */ |
| 605 | struct list_head per_device_data; |
| 606 | |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 607 | struct process_queue_manager pqm; |
| 608 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 609 | /*Is the user space process 32 bit?*/ |
| 610 | bool is_32bit_user_mode; |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 611 | |
| 612 | /* Event-related data */ |
| 613 | struct mutex event_mutex; |
Felix Kuehling | 482f077 | 2017-10-27 19:35:27 -0400 | [diff] [blame] | 614 | /* Event ID allocator and lookup */ |
| 615 | struct idr event_idr; |
Felix Kuehling | 50cb7dd | 2017-10-27 19:35:26 -0400 | [diff] [blame] | 616 | /* Event page */ |
| 617 | struct kfd_signal_page *signal_page; |
Felix Kuehling | b9a5d0a | 2017-10-27 19:35:29 -0400 | [diff] [blame] | 618 | size_t signal_mapped_size; |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 619 | size_t signal_event_count; |
Felix Kuehling | c986169 | 2017-09-20 18:10:22 -0400 | [diff] [blame] | 620 | bool signal_event_limit_reached; |
Felix Kuehling | 403575c | 2018-02-06 20:32:44 -0500 | [diff] [blame] | 621 | |
| 622 | /* Information used for memory eviction */ |
| 623 | void *kgd_process_info; |
| 624 | /* Eviction fence that is attached to all the BOs of this process. The |
| 625 | * fence will be triggered during eviction and new one will be created |
| 626 | * during restore |
| 627 | */ |
| 628 | struct dma_fence *ef; |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 629 | |
| 630 | /* Work items for evicting and restoring BOs */ |
| 631 | struct delayed_work eviction_work; |
| 632 | struct delayed_work restore_work; |
| 633 | /* seqno of the last scheduled eviction */ |
| 634 | unsigned int last_eviction_seqno; |
| 635 | /* Approx. the last timestamp (in jiffies) when the process was |
| 636 | * restored after an eviction |
| 637 | */ |
| 638 | unsigned long last_restore_timestamp; |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 639 | }; |
| 640 | |
Felix Kuehling | 64d1c3a | 2017-12-08 19:22:12 -0500 | [diff] [blame] | 641 | #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ |
| 642 | extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); |
| 643 | extern struct srcu_struct kfd_processes_srcu; |
| 644 | |
Oded Gabbay | 76baee6 | 2014-12-29 14:20:05 +0200 | [diff] [blame] | 645 | /** |
| 646 | * Ioctl function type. |
| 647 | * |
| 648 | * \param filep pointer to file structure. |
| 649 | * \param p amdkfd process pointer. |
| 650 | * \param data pointer to arg that was copied from user. |
| 651 | */ |
| 652 | typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, |
| 653 | void *data); |
| 654 | |
| 655 | struct amdkfd_ioctl_desc { |
| 656 | unsigned int cmd; |
| 657 | int flags; |
| 658 | amdkfd_ioctl_t *func; |
| 659 | unsigned int cmd_drv; |
| 660 | const char *name; |
| 661 | }; |
| 662 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 663 | void kfd_process_create_wq(void); |
| 664 | void kfd_process_destroy_wq(void); |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 665 | struct kfd_process *kfd_create_process(struct file *filep); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 666 | struct kfd_process *kfd_get_process(const struct task_struct *); |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 667 | struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 668 | struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); |
Felix Kuehling | abb208a | 2017-11-27 18:29:52 -0500 | [diff] [blame] | 669 | void kfd_unref_process(struct kfd_process *p); |
Felix Kuehling | 2610343 | 2018-02-06 20:32:45 -0500 | [diff] [blame] | 670 | void kfd_suspend_all_processes(void); |
| 671 | int kfd_resume_all_processes(void); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 672 | |
Felix Kuehling | b84394e | 2018-03-15 17:27:44 -0400 | [diff] [blame] | 673 | int kfd_process_device_init_vm(struct kfd_process_device *pdd, |
| 674 | struct file *drm_file); |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 675 | struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, |
Yong Zhao | 733fa1f | 2017-09-20 18:10:14 -0400 | [diff] [blame] | 676 | struct kfd_process *p); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 677 | struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, |
Alexey Skidanov | 093c7d8 | 2014-11-18 14:00:04 +0200 | [diff] [blame] | 678 | struct kfd_process *p); |
| 679 | struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, |
| 680 | struct kfd_process *p); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 681 | |
Felix Kuehling | 373d708 | 2017-11-14 16:41:19 -0500 | [diff] [blame] | 682 | int kfd_reserved_mem_mmap(struct kfd_process *process, |
| 683 | struct vm_area_struct *vma); |
| 684 | |
Felix Kuehling | 52b29d7 | 2018-03-15 17:27:48 -0400 | [diff] [blame] | 685 | /* KFD process API for creating and translating handles */ |
| 686 | int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, |
| 687 | void *mem); |
| 688 | void *kfd_process_device_translate_handle(struct kfd_process_device *p, |
| 689 | int handle); |
| 690 | void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, |
| 691 | int handle); |
| 692 | |
Alexey Skidanov | 775921e | 2014-07-17 01:49:36 +0300 | [diff] [blame] | 693 | /* Process device data iterator */ |
Kent Russell | 8eabaf5 | 2017-08-15 23:00:04 -0400 | [diff] [blame] | 694 | struct kfd_process_device *kfd_get_first_process_device_data( |
| 695 | struct kfd_process *p); |
| 696 | struct kfd_process_device *kfd_get_next_process_device_data( |
| 697 | struct kfd_process *p, |
Alexey Skidanov | 775921e | 2014-07-17 01:49:36 +0300 | [diff] [blame] | 698 | struct kfd_process_device *pdd); |
| 699 | bool kfd_has_process_device_data(struct kfd_process *p); |
| 700 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 701 | /* PASIDs */ |
| 702 | int kfd_pasid_init(void); |
| 703 | void kfd_pasid_exit(void); |
| 704 | bool kfd_set_pasid_limit(unsigned int new_limit); |
| 705 | unsigned int kfd_get_pasid_limit(void); |
| 706 | unsigned int kfd_pasid_alloc(void); |
| 707 | void kfd_pasid_free(unsigned int pasid); |
| 708 | |
| 709 | /* Doorbells */ |
Felix Kuehling | 735df2b | 2017-08-15 23:00:10 -0400 | [diff] [blame] | 710 | int kfd_doorbell_init(struct kfd_dev *kfd); |
| 711 | void kfd_doorbell_fini(struct kfd_dev *kfd); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 712 | int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma); |
| 713 | u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, |
| 714 | unsigned int *doorbell_off); |
| 715 | void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); |
| 716 | u32 read_kernel_doorbell(u32 __iomem *db); |
| 717 | void write_kernel_doorbell(u32 __iomem *db, u32 value); |
| 718 | unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, |
| 719 | struct kfd_process *process, |
| 720 | unsigned int queue_id); |
Felix Kuehling | a91e70e | 2017-08-26 02:00:57 -0400 | [diff] [blame] | 721 | phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, |
| 722 | struct kfd_process *process); |
| 723 | int kfd_alloc_process_doorbells(struct kfd_process *process); |
| 724 | void kfd_free_process_doorbells(struct kfd_process *process); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 725 | |
Oded Gabbay | 6e81090 | 2014-10-27 14:36:07 +0200 | [diff] [blame] | 726 | /* GTT Sub-Allocator */ |
| 727 | |
| 728 | int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, |
| 729 | struct kfd_mem_obj **mem_obj); |
| 730 | |
| 731 | int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); |
| 732 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 733 | extern struct device *kfd_device; |
| 734 | |
Evgeny Pinchuk | 5b5c4e4 | 2014-07-16 21:22:32 +0300 | [diff] [blame] | 735 | /* Topology */ |
| 736 | int kfd_topology_init(void); |
| 737 | void kfd_topology_shutdown(void); |
| 738 | int kfd_topology_add_device(struct kfd_dev *gpu); |
| 739 | int kfd_topology_remove_device(struct kfd_dev *gpu); |
Harish Kasiviswanathan | 3a87177 | 2017-12-08 23:08:59 -0500 | [diff] [blame] | 740 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( |
| 741 | uint32_t proximity_domain); |
Evgeny Pinchuk | 5b5c4e4 | 2014-07-16 21:22:32 +0300 | [diff] [blame] | 742 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); |
| 743 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); |
Harish Kasiviswanathan | 6d82eb0 | 2017-12-08 23:08:53 -0500 | [diff] [blame] | 744 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); |
Felix Kuehling | 520b8fb | 2017-12-08 23:08:58 -0500 | [diff] [blame] | 745 | int kfd_numa_node_to_apic_id(int numa_node_id); |
Evgeny Pinchuk | 5b5c4e4 | 2014-07-16 21:22:32 +0300 | [diff] [blame] | 746 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 747 | /* Interrupts */ |
Andrew Lewycky | 2249d55 | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 748 | int kfd_interrupt_init(struct kfd_dev *dev); |
| 749 | void kfd_interrupt_exit(struct kfd_dev *dev); |
Andrew Lewycky | b3f5e6b | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 750 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); |
Andrew Lewycky | 2249d55 | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 751 | bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); |
| 752 | bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry); |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 753 | |
| 754 | /* Power Management */ |
Andrew Lewycky | b3f5e6b | 2014-07-17 01:37:30 +0300 | [diff] [blame] | 755 | void kgd2kfd_suspend(struct kfd_dev *kfd); |
| 756 | int kgd2kfd_resume(struct kfd_dev *kfd); |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 757 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 758 | /* amdkfd Apertures */ |
| 759 | int kfd_init_apertures(struct kfd_process *process); |
| 760 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 761 | /* Queue Context Management */ |
Edward O'Callaghan | e88a614 | 2016-09-17 15:01:45 +1000 | [diff] [blame] | 762 | int init_queue(struct queue **q, const struct queue_properties *properties); |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 763 | void uninit_queue(struct queue *q); |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 764 | void print_queue_properties(struct queue_properties *q); |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 765 | void print_queue(struct queue *q); |
| 766 | |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 767 | struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, |
| 768 | struct kfd_dev *dev); |
Ben Goz | 4b8f589 | 2015-01-04 11:24:25 +0200 | [diff] [blame] | 769 | struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, |
| 770 | struct kfd_dev *dev); |
Felix Kuehling | ee04955a | 2018-01-04 17:17:45 -0500 | [diff] [blame] | 771 | struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, |
| 772 | struct kfd_dev *dev); |
Ben Goz | 4b8f589 | 2015-01-04 11:24:25 +0200 | [diff] [blame] | 773 | struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, |
| 774 | struct kfd_dev *dev); |
Felix Kuehling | ee04955a | 2018-01-04 17:17:45 -0500 | [diff] [blame] | 775 | struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, |
| 776 | struct kfd_dev *dev); |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 777 | struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); |
| 778 | void device_queue_manager_uninit(struct device_queue_manager *dqm); |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 779 | struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, |
| 780 | enum kfd_queue_type type); |
| 781 | void kernel_queue_uninit(struct kernel_queue *kq); |
| 782 | |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 783 | /* Process Queue Manager */ |
| 784 | struct process_queue_node { |
| 785 | struct queue *q; |
| 786 | struct kernel_queue *kq; |
| 787 | struct list_head process_queue_list; |
| 788 | }; |
| 789 | |
Felix Kuehling | 9fd3f1bf | 2017-09-27 00:09:52 -0400 | [diff] [blame] | 790 | void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); |
| 791 | void kfd_process_dequeue_from_all_devices(struct kfd_process *p); |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 792 | int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); |
| 793 | void pqm_uninit(struct process_queue_manager *pqm); |
| 794 | int pqm_create_queue(struct process_queue_manager *pqm, |
| 795 | struct kfd_dev *dev, |
| 796 | struct file *f, |
| 797 | struct queue_properties *properties, |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 798 | unsigned int *qid); |
| 799 | int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); |
| 800 | int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, |
| 801 | struct queue_properties *p); |
Yair Shachar | fbeb661 | 2015-05-20 13:48:26 +0300 | [diff] [blame] | 802 | struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, |
| 803 | unsigned int qid); |
Ben Goz | 4510204 | 2014-07-17 01:04:10 +0300 | [diff] [blame] | 804 | |
Yair Shachar | 788bf83 | 2015-05-20 13:58:12 +0300 | [diff] [blame] | 805 | int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
| 806 | unsigned int fence_value, |
Yong Zhao | 8c72c3d | 2017-09-20 18:10:15 -0400 | [diff] [blame] | 807 | unsigned int timeout_ms); |
Yair Shachar | 788bf83 | 2015-05-20 13:58:12 +0300 | [diff] [blame] | 808 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 809 | /* Packet Manager */ |
| 810 | |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 811 | #define KFD_FENCE_COMPLETED (100) |
| 812 | #define KFD_FENCE_INIT (10) |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 813 | |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 814 | struct packet_manager { |
| 815 | struct device_queue_manager *dqm; |
| 816 | struct kernel_queue *priv_queue; |
| 817 | struct mutex lock; |
| 818 | bool allocated; |
| 819 | struct kfd_mem_obj *ib_buffer_obj; |
Felix Kuehling | 851a645 | 2017-11-27 18:29:49 -0500 | [diff] [blame] | 820 | unsigned int ib_size_bytes; |
Ben Goz | ed6e6a3 | 2014-07-17 00:45:35 +0300 | [diff] [blame] | 821 | }; |
| 822 | |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 823 | int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); |
| 824 | void pm_uninit(struct packet_manager *pm); |
| 825 | int pm_send_set_resources(struct packet_manager *pm, |
| 826 | struct scheduling_resources *res); |
| 827 | int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); |
| 828 | int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, |
| 829 | uint32_t fence_value); |
| 830 | |
| 831 | int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, |
Yong Zhao | 7da2bcf | 2017-09-27 00:09:48 -0400 | [diff] [blame] | 832 | enum kfd_unmap_queues_filter mode, |
Ben Goz | 64c7f8c | 2014-07-17 01:27:00 +0300 | [diff] [blame] | 833 | uint32_t filter_param, bool reset, |
| 834 | unsigned int sdma_engine); |
| 835 | |
Ben Goz | 241f24f | 2014-07-17 00:55:28 +0300 | [diff] [blame] | 836 | void pm_release_ib(struct packet_manager *pm); |
| 837 | |
Felix Kuehling | 552764b | 2018-03-15 17:27:50 -0400 | [diff] [blame^] | 838 | uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer); |
| 839 | |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 840 | uint64_t kfd_get_number_elems(struct kfd_dev *kfd); |
Oded Gabbay | 19f6d2a | 2014-07-16 23:25:31 +0300 | [diff] [blame] | 841 | |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 842 | /* Events */ |
| 843 | extern const struct kfd_event_interrupt_class event_interrupt_class_cik; |
Alexey Skidanov | 930c5ff | 2014-11-25 10:34:31 +0200 | [diff] [blame] | 844 | extern const struct kfd_device_global_init_class device_global_init_class_cik; |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 845 | |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 846 | void kfd_event_init_process(struct kfd_process *p); |
| 847 | void kfd_event_free_process(struct kfd_process *p); |
| 848 | int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); |
| 849 | int kfd_wait_on_events(struct kfd_process *p, |
Alexey Skidanov | 59d3e8b | 2015-04-14 18:05:49 +0300 | [diff] [blame] | 850 | uint32_t num_events, void __user *data, |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 851 | bool all, uint32_t user_timeout_ms, |
Felix Kuehling | fdf0c83 | 2017-10-27 19:35:22 -0400 | [diff] [blame] | 852 | uint32_t *wait_result); |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 853 | void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, |
| 854 | uint32_t valid_id_bits); |
Alexey Skidanov | 59d3e8b | 2015-04-14 18:05:49 +0300 | [diff] [blame] | 855 | void kfd_signal_iommu_event(struct kfd_dev *dev, |
| 856 | unsigned int pasid, unsigned long address, |
| 857 | bool is_write_requested, bool is_execute_requested); |
Alexey Skidanov | 930c5ff | 2014-11-25 10:34:31 +0200 | [diff] [blame] | 858 | void kfd_signal_hw_exception_event(unsigned int pasid); |
Andrew Lewycky | f3a3981 | 2015-05-10 12:15:46 +0300 | [diff] [blame] | 859 | int kfd_set_event(struct kfd_process *p, uint32_t event_id); |
| 860 | int kfd_reset_event(struct kfd_process *p, uint32_t event_id); |
| 861 | int kfd_event_create(struct file *devkfd, struct kfd_process *p, |
| 862 | uint32_t event_type, bool auto_reset, uint32_t node_id, |
| 863 | uint32_t *event_id, uint32_t *event_trigger_data, |
| 864 | uint64_t *event_page_offset, uint32_t *event_slot_index); |
| 865 | int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); |
| 866 | |
Felix Kuehling | 403575c | 2018-02-06 20:32:44 -0500 | [diff] [blame] | 867 | void kfd_flush_tlb(struct kfd_process_device *pdd); |
| 868 | |
Ben Goz | c3447e8 | 2015-05-20 18:05:44 +0300 | [diff] [blame] | 869 | int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); |
| 870 | |
Felix Kuehling | 851a645 | 2017-11-27 18:29:49 -0500 | [diff] [blame] | 871 | /* Debugfs */ |
| 872 | #if defined(CONFIG_DEBUG_FS) |
| 873 | |
| 874 | void kfd_debugfs_init(void); |
| 875 | void kfd_debugfs_fini(void); |
| 876 | int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); |
| 877 | int pqm_debugfs_mqds(struct seq_file *m, void *data); |
| 878 | int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); |
| 879 | int dqm_debugfs_hqds(struct seq_file *m, void *data); |
| 880 | int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); |
| 881 | int pm_debugfs_runlist(struct seq_file *m, void *data); |
| 882 | |
| 883 | #else |
| 884 | |
| 885 | static inline void kfd_debugfs_init(void) {} |
| 886 | static inline void kfd_debugfs_fini(void) {} |
| 887 | |
| 888 | #endif |
| 889 | |
Oded Gabbay | 4a488a7 | 2014-07-16 21:08:55 +0300 | [diff] [blame] | 890 | #endif |