blob: 0e64fb2c95e5b2b4913fe05da883afae6add551f [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050023#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
Oded Gabbay4a488a72014-07-16 21:08:55 +030024#include <linux/amd-iommu.h>
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050025#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030026#include <linux/bsearch.h>
27#include <linux/pci.h>
28#include <linux/slab.h>
29#include "kfd_priv.h"
Ben Goz64c7f8c2014-07-17 01:27:00 +030030#include "kfd_device_queue_manager.h"
Felix Kuehling507968d2017-08-15 23:00:15 -040031#include "kfd_pm4_headers_vi.h"
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include "cwsr_trap_handler_gfx8.asm"
Felix Kuehling3e76c232018-04-10 17:33:16 -040033#include "cwsr_trap_handler_gfx9.asm"
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050034#include "kfd_iommu.h"
Oded Gabbay4a488a72014-07-16 21:08:55 +030035
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030036#define MQD_SIZE_ALIGNED 768
Felix Kuehling26103432018-02-06 20:32:45 -050037static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030038
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050039#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +030040static const struct kfd_device_info kaveri_device_info = {
Ben Goz0da75582015-01-01 17:10:01 +020041 .asic_family = CHIP_KAVERI,
42 .max_pasid_bits = 16,
Yair Shachar992839a2015-05-20 13:43:04 +030043 /* max num of queues for KV.TODO should be a dynamic value */
44 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040045 .doorbell_size = 4,
Ben Goz0da75582015-01-01 17:10:01 +020046 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030047 .event_interrupt_class = &event_interrupt_class_cik,
Yair Shacharfbeb6612015-05-20 13:48:26 +030048 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050049 .mqd_size_aligned = MQD_SIZE_ALIGNED,
50 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050051 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050052 .needs_pci_atomics = false,
Ben Goz0da75582015-01-01 17:10:01 +020053};
54
55static const struct kfd_device_info carrizo_device_info = {
56 .asic_family = CHIP_CARRIZO,
Oded Gabbay4a488a72014-07-16 21:08:55 +030057 .max_pasid_bits = 16,
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030058 /* max num of queues for CZ.TODO should be a dynamic value */
59 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040060 .doorbell_size = 4,
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +030061 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030062 .event_interrupt_class = &event_interrupt_class_cik,
Alexey Skidanovf7c826a2014-10-13 16:35:12 +030063 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050064 .mqd_size_aligned = MQD_SIZE_ALIGNED,
65 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050066 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050067 .needs_pci_atomics = false,
Oded Gabbay4a488a72014-07-16 21:08:55 +030068};
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050069#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030070
Felix Kuehlinga3084e62018-01-04 17:17:47 -050071static const struct kfd_device_info hawaii_device_info = {
72 .asic_family = CHIP_HAWAII,
73 .max_pasid_bits = 16,
74 /* max num of queues for KV.TODO should be a dynamic value */
75 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040076 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050077 .ih_ring_entry_size = 4 * sizeof(uint32_t),
78 .event_interrupt_class = &event_interrupt_class_cik,
79 .num_of_watch_points = 4,
80 .mqd_size_aligned = MQD_SIZE_ALIGNED,
81 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050082 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050083 .needs_pci_atomics = false,
84};
85
86static const struct kfd_device_info tonga_device_info = {
87 .asic_family = CHIP_TONGA,
88 .max_pasid_bits = 16,
89 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040090 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050091 .ih_ring_entry_size = 4 * sizeof(uint32_t),
92 .event_interrupt_class = &event_interrupt_class_cik,
93 .num_of_watch_points = 4,
94 .mqd_size_aligned = MQD_SIZE_ALIGNED,
95 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050096 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050097 .needs_pci_atomics = true,
98};
99
100static const struct kfd_device_info tonga_vf_device_info = {
101 .asic_family = CHIP_TONGA,
102 .max_pasid_bits = 16,
103 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400104 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500105 .ih_ring_entry_size = 4 * sizeof(uint32_t),
106 .event_interrupt_class = &event_interrupt_class_cik,
107 .num_of_watch_points = 4,
108 .mqd_size_aligned = MQD_SIZE_ALIGNED,
109 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500110 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500111 .needs_pci_atomics = false,
112};
113
114static const struct kfd_device_info fiji_device_info = {
115 .asic_family = CHIP_FIJI,
116 .max_pasid_bits = 16,
117 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400118 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500119 .ih_ring_entry_size = 4 * sizeof(uint32_t),
120 .event_interrupt_class = &event_interrupt_class_cik,
121 .num_of_watch_points = 4,
122 .mqd_size_aligned = MQD_SIZE_ALIGNED,
123 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500124 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500125 .needs_pci_atomics = true,
126};
127
128static const struct kfd_device_info fiji_vf_device_info = {
129 .asic_family = CHIP_FIJI,
130 .max_pasid_bits = 16,
131 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400132 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500133 .ih_ring_entry_size = 4 * sizeof(uint32_t),
134 .event_interrupt_class = &event_interrupt_class_cik,
135 .num_of_watch_points = 4,
136 .mqd_size_aligned = MQD_SIZE_ALIGNED,
137 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500138 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500139 .needs_pci_atomics = false,
140};
141
142
143static const struct kfd_device_info polaris10_device_info = {
144 .asic_family = CHIP_POLARIS10,
145 .max_pasid_bits = 16,
146 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400147 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500148 .ih_ring_entry_size = 4 * sizeof(uint32_t),
149 .event_interrupt_class = &event_interrupt_class_cik,
150 .num_of_watch_points = 4,
151 .mqd_size_aligned = MQD_SIZE_ALIGNED,
152 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500153 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500154 .needs_pci_atomics = true,
155};
156
157static const struct kfd_device_info polaris10_vf_device_info = {
158 .asic_family = CHIP_POLARIS10,
159 .max_pasid_bits = 16,
160 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400161 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500162 .ih_ring_entry_size = 4 * sizeof(uint32_t),
163 .event_interrupt_class = &event_interrupt_class_cik,
164 .num_of_watch_points = 4,
165 .mqd_size_aligned = MQD_SIZE_ALIGNED,
166 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500167 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500168 .needs_pci_atomics = false,
169};
170
171static const struct kfd_device_info polaris11_device_info = {
172 .asic_family = CHIP_POLARIS11,
173 .max_pasid_bits = 16,
174 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400175 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500176 .ih_ring_entry_size = 4 * sizeof(uint32_t),
177 .event_interrupt_class = &event_interrupt_class_cik,
178 .num_of_watch_points = 4,
179 .mqd_size_aligned = MQD_SIZE_ALIGNED,
180 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500181 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500182 .needs_pci_atomics = true,
183};
184
185
Oded Gabbay4a488a72014-07-16 21:08:55 +0300186struct kfd_deviceid {
187 unsigned short did;
188 const struct kfd_device_info *device_info;
189};
190
Oded Gabbay4a488a72014-07-16 21:08:55 +0300191static const struct kfd_deviceid supported_devices[] = {
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500192#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +0300193 { 0x1304, &kaveri_device_info }, /* Kaveri */
194 { 0x1305, &kaveri_device_info }, /* Kaveri */
195 { 0x1306, &kaveri_device_info }, /* Kaveri */
196 { 0x1307, &kaveri_device_info }, /* Kaveri */
197 { 0x1309, &kaveri_device_info }, /* Kaveri */
198 { 0x130A, &kaveri_device_info }, /* Kaveri */
199 { 0x130B, &kaveri_device_info }, /* Kaveri */
200 { 0x130C, &kaveri_device_info }, /* Kaveri */
201 { 0x130D, &kaveri_device_info }, /* Kaveri */
202 { 0x130E, &kaveri_device_info }, /* Kaveri */
203 { 0x130F, &kaveri_device_info }, /* Kaveri */
204 { 0x1310, &kaveri_device_info }, /* Kaveri */
205 { 0x1311, &kaveri_device_info }, /* Kaveri */
206 { 0x1312, &kaveri_device_info }, /* Kaveri */
207 { 0x1313, &kaveri_device_info }, /* Kaveri */
208 { 0x1315, &kaveri_device_info }, /* Kaveri */
209 { 0x1316, &kaveri_device_info }, /* Kaveri */
210 { 0x1317, &kaveri_device_info }, /* Kaveri */
211 { 0x1318, &kaveri_device_info }, /* Kaveri */
212 { 0x131B, &kaveri_device_info }, /* Kaveri */
213 { 0x131C, &kaveri_device_info }, /* Kaveri */
Ben Goz123576d2015-01-12 14:37:24 +0200214 { 0x131D, &kaveri_device_info }, /* Kaveri */
215 { 0x9870, &carrizo_device_info }, /* Carrizo */
216 { 0x9874, &carrizo_device_info }, /* Carrizo */
217 { 0x9875, &carrizo_device_info }, /* Carrizo */
218 { 0x9876, &carrizo_device_info }, /* Carrizo */
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500219 { 0x9877, &carrizo_device_info }, /* Carrizo */
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500220#endif
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500221 { 0x67A0, &hawaii_device_info }, /* Hawaii */
222 { 0x67A1, &hawaii_device_info }, /* Hawaii */
223 { 0x67A2, &hawaii_device_info }, /* Hawaii */
224 { 0x67A8, &hawaii_device_info }, /* Hawaii */
225 { 0x67A9, &hawaii_device_info }, /* Hawaii */
226 { 0x67AA, &hawaii_device_info }, /* Hawaii */
227 { 0x67B0, &hawaii_device_info }, /* Hawaii */
228 { 0x67B1, &hawaii_device_info }, /* Hawaii */
229 { 0x67B8, &hawaii_device_info }, /* Hawaii */
230 { 0x67B9, &hawaii_device_info }, /* Hawaii */
231 { 0x67BA, &hawaii_device_info }, /* Hawaii */
232 { 0x67BE, &hawaii_device_info }, /* Hawaii */
233 { 0x6920, &tonga_device_info }, /* Tonga */
234 { 0x6921, &tonga_device_info }, /* Tonga */
235 { 0x6928, &tonga_device_info }, /* Tonga */
236 { 0x6929, &tonga_device_info }, /* Tonga */
237 { 0x692B, &tonga_device_info }, /* Tonga */
238 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
239 { 0x6938, &tonga_device_info }, /* Tonga */
240 { 0x6939, &tonga_device_info }, /* Tonga */
241 { 0x7300, &fiji_device_info }, /* Fiji */
242 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
243 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
244 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
245 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
246 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
247 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
248 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
249 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
250 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
251 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
252 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
253 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
254 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
255 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
256 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
257 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
258 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
259 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
260 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
261 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
262 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
263 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
Oded Gabbay4a488a72014-07-16 21:08:55 +0300264};
265
Oded Gabbay6e810902014-10-27 14:36:07 +0200266static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
267 unsigned int chunk_size);
268static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
269
Yong Zhaob8935a72017-09-20 18:10:13 -0400270static int kfd_resume(struct kfd_dev *kfd);
271
Oded Gabbay4a488a72014-07-16 21:08:55 +0300272static const struct kfd_device_info *lookup_device_info(unsigned short did)
273{
274 size_t i;
275
276 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
277 if (supported_devices[i].did == did) {
Felix Kuehling32fa8212017-08-15 23:00:12 -0400278 WARN_ON(!supported_devices[i].device_info);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300279 return supported_devices[i].device_info;
280 }
281 }
282
Yong Zhao4ebc7182017-08-15 23:00:13 -0400283 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
284 did);
285
Oded Gabbay4a488a72014-07-16 21:08:55 +0300286 return NULL;
287}
288
Xihan Zhangcea405b2015-03-17 19:32:53 +0800289struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
290 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300291{
292 struct kfd_dev *kfd;
welu6106dce2018-04-10 17:33:17 -0400293 int ret;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300294 const struct kfd_device_info *device_info =
295 lookup_device_info(pdev->device);
296
Yong Zhao4ebc7182017-08-15 23:00:13 -0400297 if (!device_info) {
298 dev_err(kfd_device, "kgd2kfd_probe failed\n");
Oded Gabbay4a488a72014-07-16 21:08:55 +0300299 return NULL;
Yong Zhao4ebc7182017-08-15 23:00:13 -0400300 }
Oded Gabbay4a488a72014-07-16 21:08:55 +0300301
welu6106dce2018-04-10 17:33:17 -0400302 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
303 * 32 and 64-bit requests are possible and must be
304 * supported.
305 */
306 ret = pci_enable_atomic_ops_to_root(pdev,
307 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
308 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
309 if (device_info->needs_pci_atomics && ret < 0) {
310 dev_info(kfd_device,
311 "skipped device %x:%x, PCI rejects atomics\n",
312 pdev->vendor, pdev->device);
313 return NULL;
Felix Kuehling3ee2d002018-01-04 17:17:41 -0500314 }
315
Oded Gabbay4a488a72014-07-16 21:08:55 +0300316 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
317 if (!kfd)
318 return NULL;
319
320 kfd->kgd = kgd;
321 kfd->device_info = device_info;
322 kfd->pdev = pdev;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300323 kfd->init_complete = false;
Xihan Zhangcea405b2015-03-17 19:32:53 +0800324 kfd->kfd2kgd = f2g;
325
326 mutex_init(&kfd->doorbell_mutex);
327 memset(&kfd->doorbell_available_index, 0,
328 sizeof(kfd->doorbell_available_index));
Oded Gabbay4a488a72014-07-16 21:08:55 +0300329
330 return kfd;
331}
332
Felix Kuehling373d7082017-11-14 16:41:19 -0500333static void kfd_cwsr_init(struct kfd_dev *kfd)
334{
335 if (cwsr_enable && kfd->device_info->supports_cwsr) {
Felix Kuehling3e76c232018-04-10 17:33:16 -0400336 if (kfd->device_info->asic_family < CHIP_VEGA10) {
337 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
338 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
339 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
340 } else {
341 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
342 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
343 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
344 }
Felix Kuehling373d7082017-11-14 16:41:19 -0500345
Felix Kuehling373d7082017-11-14 16:41:19 -0500346 kfd->cwsr_enabled = true;
347 }
348}
349
Oded Gabbay4a488a72014-07-16 21:08:55 +0300350bool kgd2kfd_device_init(struct kfd_dev *kfd,
351 const struct kgd2kfd_shared_resources *gpu_resources)
352{
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300353 unsigned int size;
354
Oded Gabbay4a488a72014-07-16 21:08:55 +0300355 kfd->shared_resources = *gpu_resources;
356
Yong Zhao44008d72017-09-20 18:10:18 -0400357 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
358 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
359 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
360 - kfd->vm_info.first_vmid_kfd + 1;
361
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500362 /* Verify module parameters regarding mapped process number*/
363 if ((hws_max_conc_proc < 0)
364 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
365 dev_err(kfd_device,
366 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
367 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
368 kfd->vm_info.vmid_num_kfd);
369 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
370 } else
371 kfd->max_proc_per_quantum = hws_max_conc_proc;
372
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300373 /* calculate max size of mqds needed for queues */
Oded Gabbayb8cbab02015-01-18 13:18:01 +0200374 size = max_num_of_queues_per_device *
375 kfd->device_info->mqd_size_aligned;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300376
Oded Gabbaye18e7942014-10-26 10:12:22 +0200377 /*
378 * calculate max size of runlist packet.
379 * There can be only 2 packets at once
380 */
Felix Kuehling507968d2017-08-15 23:00:15 -0400381 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
382 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
383 + sizeof(struct pm4_mes_runlist)) * 2;
Oded Gabbaye18e7942014-10-26 10:12:22 +0200384
385 /* Add size of HIQ & DIQ */
386 size += KFD_KERNEL_QUEUE_SIZE * 2;
387
388 /* add another 512KB for all other allocations on gart (HPD, fences) */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300389 size += 512 * 1024;
390
Xihan Zhangcea405b2015-03-17 19:32:53 +0800391 if (kfd->kfd2kgd->init_gtt_mem_allocation(
392 kfd->kgd, size, &kfd->gtt_mem,
393 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
Kent Russell79775b62017-08-15 23:00:05 -0400394 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300395 goto out;
396 }
397
Kent Russell79775b62017-08-15 23:00:05 -0400398 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
Oded Gabbaye18e7942014-10-26 10:12:22 +0200399
Oded Gabbay73a1da02014-10-26 09:53:37 +0200400 /* Initialize GTT sa with 512 byte chunk size */
401 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
Kent Russell79775b62017-08-15 23:00:05 -0400402 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
Oded Gabbay73a1da02014-10-26 09:53:37 +0200403 goto kfd_gtt_sa_init_error;
404 }
405
Felix Kuehling735df2b2017-08-15 23:00:10 -0400406 if (kfd_doorbell_init(kfd)) {
407 dev_err(kfd_device,
408 "Error initializing doorbell aperture\n");
409 goto kfd_doorbell_error;
410 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300411
Kent Russell4eacc26b2017-08-15 23:00:06 -0400412 if (kfd_topology_add_device(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400413 dev_err(kfd_device, "Error adding device to topology\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300414 goto kfd_topology_add_device_error;
415 }
416
Andrew Lewycky2249d552014-07-17 01:37:30 +0300417 if (kfd_interrupt_init(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400418 dev_err(kfd_device, "Error initializing interrupts\n");
Andrew Lewycky2249d552014-07-17 01:37:30 +0300419 goto kfd_interrupt_error;
420 }
421
Ben Goz64c7f8c2014-07-17 01:27:00 +0300422 kfd->dqm = device_queue_manager_init(kfd);
423 if (!kfd->dqm) {
Kent Russell79775b62017-08-15 23:00:05 -0400424 dev_err(kfd_device, "Error initializing queue manager\n");
Ben Goz64c7f8c2014-07-17 01:27:00 +0300425 goto device_queue_manager_error;
426 }
427
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500428 if (kfd_iommu_device_init(kfd)) {
429 dev_err(kfd_device, "Error initializing iommuv2\n");
430 goto device_iommu_error;
Ben Goz64c7f8c2014-07-17 01:27:00 +0300431 }
432
Felix Kuehling373d7082017-11-14 16:41:19 -0500433 kfd_cwsr_init(kfd);
434
Yong Zhaob8935a72017-09-20 18:10:13 -0400435 if (kfd_resume(kfd))
436 goto kfd_resume_error;
437
Yair Shacharfbeb6612015-05-20 13:48:26 +0300438 kfd->dbgmgr = NULL;
439
Oded Gabbay4a488a72014-07-16 21:08:55 +0300440 kfd->init_complete = true;
Kent Russell79775b62017-08-15 23:00:05 -0400441 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
Oded Gabbay4a488a72014-07-16 21:08:55 +0300442 kfd->pdev->device);
443
Kent Russell79775b62017-08-15 23:00:05 -0400444 pr_debug("Starting kfd with the following scheduling policy %d\n",
Felix Kuehlingd146c5a2018-01-04 17:17:43 -0500445 kfd->dqm->sched_policy);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300446
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300447 goto out;
448
Yong Zhaob8935a72017-09-20 18:10:13 -0400449kfd_resume_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500450device_iommu_error:
Ben Goz64c7f8c2014-07-17 01:27:00 +0300451 device_queue_manager_uninit(kfd->dqm);
452device_queue_manager_error:
Andrew Lewycky2249d552014-07-17 01:37:30 +0300453 kfd_interrupt_exit(kfd);
454kfd_interrupt_error:
Oded Gabbayb17f0682014-07-17 00:06:27 +0300455 kfd_topology_remove_device(kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300456kfd_topology_add_device_error:
Felix Kuehling735df2b2017-08-15 23:00:10 -0400457 kfd_doorbell_fini(kfd);
458kfd_doorbell_error:
Oded Gabbay73a1da02014-10-26 09:53:37 +0200459 kfd_gtt_sa_fini(kfd);
460kfd_gtt_sa_init_error:
Xihan Zhangcea405b2015-03-17 19:32:53 +0800461 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300462 dev_err(kfd_device,
Kent Russell79775b62017-08-15 23:00:05 -0400463 "device %x:%x NOT added due to errors\n",
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300464 kfd->pdev->vendor, kfd->pdev->device);
465out:
466 return kfd->init_complete;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300467}
468
469void kgd2kfd_device_exit(struct kfd_dev *kfd)
470{
Oded Gabbayb17f0682014-07-17 00:06:27 +0300471 if (kfd->init_complete) {
Yong Zhaob8935a72017-09-20 18:10:13 -0400472 kgd2kfd_suspend(kfd);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300473 device_queue_manager_uninit(kfd->dqm);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300474 kfd_interrupt_exit(kfd);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300475 kfd_topology_remove_device(kfd);
Felix Kuehling735df2b2017-08-15 23:00:10 -0400476 kfd_doorbell_fini(kfd);
Oded Gabbay73a1da02014-10-26 09:53:37 +0200477 kfd_gtt_sa_fini(kfd);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800478 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300479 }
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300480
Oded Gabbay4a488a72014-07-16 21:08:55 +0300481 kfree(kfd);
482}
483
484void kgd2kfd_suspend(struct kfd_dev *kfd)
485{
Yong Zhao733fa1f2017-09-20 18:10:14 -0400486 if (!kfd->init_complete)
487 return;
488
Felix Kuehling26103432018-02-06 20:32:45 -0500489 /* For first KFD device suspend all the KFD processes */
490 if (atomic_inc_return(&kfd_device_suspended) == 1)
491 kfd_suspend_all_processes();
492
Yong Zhao733fa1f2017-09-20 18:10:14 -0400493 kfd->dqm->ops.stop(kfd->dqm);
494
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500495 kfd_iommu_suspend(kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300496}
497
498int kgd2kfd_resume(struct kfd_dev *kfd)
499{
Felix Kuehling26103432018-02-06 20:32:45 -0500500 int ret, count;
501
Yong Zhaob8935a72017-09-20 18:10:13 -0400502 if (!kfd->init_complete)
503 return 0;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300504
Felix Kuehling26103432018-02-06 20:32:45 -0500505 ret = kfd_resume(kfd);
506 if (ret)
507 return ret;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300508
Felix Kuehling26103432018-02-06 20:32:45 -0500509 count = atomic_dec_return(&kfd_device_suspended);
510 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
511 if (count == 0)
512 ret = kfd_resume_all_processes();
513
514 return ret;
Yong Zhaob8935a72017-09-20 18:10:13 -0400515}
Yong Zhao4ebc7182017-08-15 23:00:13 -0400516
Yong Zhaob8935a72017-09-20 18:10:13 -0400517static int kfd_resume(struct kfd_dev *kfd)
518{
519 int err = 0;
Yong Zhaob8935a72017-09-20 18:10:13 -0400520
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500521 err = kfd_iommu_resume(kfd);
522 if (err) {
523 dev_err(kfd_device,
524 "Failed to resume IOMMU for device %x:%x\n",
525 kfd->pdev->vendor, kfd->pdev->device);
526 return err;
527 }
Yong Zhao733fa1f2017-09-20 18:10:14 -0400528
Yong Zhaob8935a72017-09-20 18:10:13 -0400529 err = kfd->dqm->ops.start(kfd->dqm);
530 if (err) {
531 dev_err(kfd_device,
532 "Error starting queue manager for device %x:%x\n",
533 kfd->pdev->vendor, kfd->pdev->device);
534 goto dqm_start_error;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300535 }
536
Yong Zhaob8935a72017-09-20 18:10:13 -0400537 return err;
538
539dqm_start_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500540 kfd_iommu_suspend(kfd);
Yong Zhaob8935a72017-09-20 18:10:13 -0400541 return err;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300542}
543
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300544/* This is called directly from KGD at ISR. */
545void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300546{
Andrew Lewycky2249d552014-07-17 01:37:30 +0300547 if (!kfd->init_complete)
548 return;
549
550 spin_lock(&kfd->interrupt_lock);
551
552 if (kfd->interrupts_active
553 && interrupt_is_wanted(kfd, ih_ring_entry)
554 && enqueue_ih_ring_entry(kfd, ih_ring_entry))
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400555 queue_work(kfd->ih_wq, &kfd->interrupt_work);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300556
557 spin_unlock(&kfd->interrupt_lock);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300558}
Oded Gabbay6e810902014-10-27 14:36:07 +0200559
Felix Kuehling6b95e792018-03-23 15:32:32 -0400560int kgd2kfd_quiesce_mm(struct mm_struct *mm)
561{
562 struct kfd_process *p;
563 int r;
564
565 /* Because we are called from arbitrary context (workqueue) as opposed
566 * to process context, kfd_process could attempt to exit while we are
567 * running so the lookup function increments the process ref count.
568 */
569 p = kfd_lookup_process_by_mm(mm);
570 if (!p)
571 return -ESRCH;
572
573 r = kfd_process_evict_queues(p);
574
575 kfd_unref_process(p);
576 return r;
577}
578
579int kgd2kfd_resume_mm(struct mm_struct *mm)
580{
581 struct kfd_process *p;
582 int r;
583
584 /* Because we are called from arbitrary context (workqueue) as opposed
585 * to process context, kfd_process could attempt to exit while we are
586 * running so the lookup function increments the process ref count.
587 */
588 p = kfd_lookup_process_by_mm(mm);
589 if (!p)
590 return -ESRCH;
591
592 r = kfd_process_restore_queues(p);
593
594 kfd_unref_process(p);
595 return r;
596}
597
Felix Kuehling26103432018-02-06 20:32:45 -0500598/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
599 * prepare for safe eviction of KFD BOs that belong to the specified
600 * process.
601 *
602 * @mm: mm_struct that identifies the specified KFD process
603 * @fence: eviction fence attached to KFD process BOs
604 *
605 */
606int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
607 struct dma_fence *fence)
608{
609 struct kfd_process *p;
610 unsigned long active_time;
611 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
612
613 if (!fence)
614 return -EINVAL;
615
616 if (dma_fence_is_signaled(fence))
617 return 0;
618
619 p = kfd_lookup_process_by_mm(mm);
620 if (!p)
621 return -ENODEV;
622
623 if (fence->seqno == p->last_eviction_seqno)
624 goto out;
625
626 p->last_eviction_seqno = fence->seqno;
627
628 /* Avoid KFD process starvation. Wait for at least
629 * PROCESS_ACTIVE_TIME_MS before evicting the process again
630 */
631 active_time = get_jiffies_64() - p->last_restore_timestamp;
632 if (delay_jiffies > active_time)
633 delay_jiffies -= active_time;
634 else
635 delay_jiffies = 0;
636
637 /* During process initialization eviction_work.dwork is initialized
638 * to kfd_evict_bo_worker
639 */
640 schedule_delayed_work(&p->eviction_work, delay_jiffies);
641out:
642 kfd_unref_process(p);
643 return 0;
644}
645
Oded Gabbay6e810902014-10-27 14:36:07 +0200646static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
647 unsigned int chunk_size)
648{
Felix Kuehling8625ff92017-08-15 23:00:11 -0400649 unsigned int num_of_longs;
Oded Gabbay6e810902014-10-27 14:36:07 +0200650
Felix Kuehling32fa8212017-08-15 23:00:12 -0400651 if (WARN_ON(buf_size < chunk_size))
652 return -EINVAL;
653 if (WARN_ON(buf_size == 0))
654 return -EINVAL;
655 if (WARN_ON(chunk_size == 0))
656 return -EINVAL;
Oded Gabbay6e810902014-10-27 14:36:07 +0200657
658 kfd->gtt_sa_chunk_size = chunk_size;
659 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
660
Felix Kuehling8625ff92017-08-15 23:00:11 -0400661 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
662 BITS_PER_LONG;
Oded Gabbay6e810902014-10-27 14:36:07 +0200663
Felix Kuehling8625ff92017-08-15 23:00:11 -0400664 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
Oded Gabbay6e810902014-10-27 14:36:07 +0200665
666 if (!kfd->gtt_sa_bitmap)
667 return -ENOMEM;
668
Kent Russell79775b62017-08-15 23:00:05 -0400669 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200670 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
671
672 mutex_init(&kfd->gtt_sa_lock);
673
674 return 0;
675
676}
677
678static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
679{
680 mutex_destroy(&kfd->gtt_sa_lock);
681 kfree(kfd->gtt_sa_bitmap);
682}
683
684static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
685 unsigned int bit_num,
686 unsigned int chunk_size)
687{
688 return start_addr + bit_num * chunk_size;
689}
690
691static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
692 unsigned int bit_num,
693 unsigned int chunk_size)
694{
695 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
696}
697
698int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
699 struct kfd_mem_obj **mem_obj)
700{
701 unsigned int found, start_search, cur_size;
702
Oded Gabbay6e810902014-10-27 14:36:07 +0200703 if (size == 0)
704 return -EINVAL;
705
706 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
707 return -ENOMEM;
708
Felix Kuehlingb91d43d2018-04-10 17:33:08 -0400709 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
Oded Gabbay6e810902014-10-27 14:36:07 +0200710 if ((*mem_obj) == NULL)
711 return -ENOMEM;
712
Kent Russell79775b62017-08-15 23:00:05 -0400713 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
Oded Gabbay6e810902014-10-27 14:36:07 +0200714
715 start_search = 0;
716
717 mutex_lock(&kfd->gtt_sa_lock);
718
719kfd_gtt_restart_search:
720 /* Find the first chunk that is free */
721 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
722 kfd->gtt_sa_num_of_chunks,
723 start_search);
724
Kent Russell79775b62017-08-15 23:00:05 -0400725 pr_debug("Found = %d\n", found);
Oded Gabbay6e810902014-10-27 14:36:07 +0200726
727 /* If there wasn't any free chunk, bail out */
728 if (found == kfd->gtt_sa_num_of_chunks)
729 goto kfd_gtt_no_free_chunk;
730
731 /* Update fields of mem_obj */
732 (*mem_obj)->range_start = found;
733 (*mem_obj)->range_end = found;
734 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
735 kfd->gtt_start_gpu_addr,
736 found,
737 kfd->gtt_sa_chunk_size);
738 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
739 kfd->gtt_start_cpu_ptr,
740 found,
741 kfd->gtt_sa_chunk_size);
742
Kent Russell79775b62017-08-15 23:00:05 -0400743 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200744 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
745
746 /* If we need only one chunk, mark it as allocated and get out */
747 if (size <= kfd->gtt_sa_chunk_size) {
Kent Russell79775b62017-08-15 23:00:05 -0400748 pr_debug("Single bit\n");
Oded Gabbay6e810902014-10-27 14:36:07 +0200749 set_bit(found, kfd->gtt_sa_bitmap);
750 goto kfd_gtt_out;
751 }
752
753 /* Otherwise, try to see if we have enough contiguous chunks */
754 cur_size = size - kfd->gtt_sa_chunk_size;
755 do {
756 (*mem_obj)->range_end =
757 find_next_zero_bit(kfd->gtt_sa_bitmap,
758 kfd->gtt_sa_num_of_chunks, ++found);
759 /*
760 * If next free chunk is not contiguous than we need to
761 * restart our search from the last free chunk we found (which
762 * wasn't contiguous to the previous ones
763 */
764 if ((*mem_obj)->range_end != found) {
765 start_search = found;
766 goto kfd_gtt_restart_search;
767 }
768
769 /*
770 * If we reached end of buffer, bail out with error
771 */
772 if (found == kfd->gtt_sa_num_of_chunks)
773 goto kfd_gtt_no_free_chunk;
774
775 /* Check if we don't need another chunk */
776 if (cur_size <= kfd->gtt_sa_chunk_size)
777 cur_size = 0;
778 else
779 cur_size -= kfd->gtt_sa_chunk_size;
780
781 } while (cur_size > 0);
782
Kent Russell79775b62017-08-15 23:00:05 -0400783 pr_debug("range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200784 (*mem_obj)->range_start, (*mem_obj)->range_end);
785
786 /* Mark the chunks as allocated */
787 for (found = (*mem_obj)->range_start;
788 found <= (*mem_obj)->range_end;
789 found++)
790 set_bit(found, kfd->gtt_sa_bitmap);
791
792kfd_gtt_out:
793 mutex_unlock(&kfd->gtt_sa_lock);
794 return 0;
795
796kfd_gtt_no_free_chunk:
Kent Russell79775b62017-08-15 23:00:05 -0400797 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
Oded Gabbay6e810902014-10-27 14:36:07 +0200798 mutex_unlock(&kfd->gtt_sa_lock);
799 kfree(mem_obj);
800 return -ENOMEM;
801}
802
803int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
804{
805 unsigned int bit;
806
Oded Gabbay9216ed22015-01-12 22:34:21 +0200807 /* Act like kfree when trying to free a NULL object */
808 if (!mem_obj)
809 return 0;
Oded Gabbay6e810902014-10-27 14:36:07 +0200810
Kent Russell79775b62017-08-15 23:00:05 -0400811 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200812 mem_obj, mem_obj->range_start, mem_obj->range_end);
813
814 mutex_lock(&kfd->gtt_sa_lock);
815
816 /* Mark the chunks as free */
817 for (bit = mem_obj->range_start;
818 bit <= mem_obj->range_end;
819 bit++)
820 clear_bit(bit, kfd->gtt_sa_bitmap);
821
822 mutex_unlock(&kfd->gtt_sa_lock);
823
824 kfree(mem_obj);
825 return 0;
826}