blob: 0434f659eeafb7364b3185eb22531bb99db9a990 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050023#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
Oded Gabbay4a488a72014-07-16 21:08:55 +030024#include <linux/amd-iommu.h>
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050025#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030026#include <linux/bsearch.h>
27#include <linux/pci.h>
28#include <linux/slab.h>
29#include "kfd_priv.h"
Ben Goz64c7f8c2014-07-17 01:27:00 +030030#include "kfd_device_queue_manager.h"
Felix Kuehling507968d2017-08-15 23:00:15 -040031#include "kfd_pm4_headers_vi.h"
Felix Kuehling373d7082017-11-14 16:41:19 -050032#include "cwsr_trap_handler_gfx8.asm"
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050033#include "kfd_iommu.h"
Oded Gabbay4a488a72014-07-16 21:08:55 +030034
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030035#define MQD_SIZE_ALIGNED 768
Felix Kuehling26103432018-02-06 20:32:45 -050036static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030037
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050038#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +030039static const struct kfd_device_info kaveri_device_info = {
Ben Goz0da75582015-01-01 17:10:01 +020040 .asic_family = CHIP_KAVERI,
41 .max_pasid_bits = 16,
Yair Shachar992839a2015-05-20 13:43:04 +030042 /* max num of queues for KV.TODO should be a dynamic value */
43 .max_no_of_hqd = 24,
Ben Goz0da75582015-01-01 17:10:01 +020044 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030045 .event_interrupt_class = &event_interrupt_class_cik,
Yair Shacharfbeb6612015-05-20 13:48:26 +030046 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050047 .mqd_size_aligned = MQD_SIZE_ALIGNED,
48 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050049 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050050 .needs_pci_atomics = false,
Ben Goz0da75582015-01-01 17:10:01 +020051};
52
53static const struct kfd_device_info carrizo_device_info = {
54 .asic_family = CHIP_CARRIZO,
Oded Gabbay4a488a72014-07-16 21:08:55 +030055 .max_pasid_bits = 16,
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030056 /* max num of queues for CZ.TODO should be a dynamic value */
57 .max_no_of_hqd = 24,
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +030058 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030059 .event_interrupt_class = &event_interrupt_class_cik,
Alexey Skidanovf7c826a2014-10-13 16:35:12 +030060 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050061 .mqd_size_aligned = MQD_SIZE_ALIGNED,
62 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050063 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050064 .needs_pci_atomics = false,
Oded Gabbay4a488a72014-07-16 21:08:55 +030065};
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050066#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030067
Felix Kuehlinga3084e62018-01-04 17:17:47 -050068static const struct kfd_device_info hawaii_device_info = {
69 .asic_family = CHIP_HAWAII,
70 .max_pasid_bits = 16,
71 /* max num of queues for KV.TODO should be a dynamic value */
72 .max_no_of_hqd = 24,
73 .ih_ring_entry_size = 4 * sizeof(uint32_t),
74 .event_interrupt_class = &event_interrupt_class_cik,
75 .num_of_watch_points = 4,
76 .mqd_size_aligned = MQD_SIZE_ALIGNED,
77 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050078 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050079 .needs_pci_atomics = false,
80};
81
82static const struct kfd_device_info tonga_device_info = {
83 .asic_family = CHIP_TONGA,
84 .max_pasid_bits = 16,
85 .max_no_of_hqd = 24,
86 .ih_ring_entry_size = 4 * sizeof(uint32_t),
87 .event_interrupt_class = &event_interrupt_class_cik,
88 .num_of_watch_points = 4,
89 .mqd_size_aligned = MQD_SIZE_ALIGNED,
90 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050091 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050092 .needs_pci_atomics = true,
93};
94
95static const struct kfd_device_info tonga_vf_device_info = {
96 .asic_family = CHIP_TONGA,
97 .max_pasid_bits = 16,
98 .max_no_of_hqd = 24,
99 .ih_ring_entry_size = 4 * sizeof(uint32_t),
100 .event_interrupt_class = &event_interrupt_class_cik,
101 .num_of_watch_points = 4,
102 .mqd_size_aligned = MQD_SIZE_ALIGNED,
103 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500104 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500105 .needs_pci_atomics = false,
106};
107
108static const struct kfd_device_info fiji_device_info = {
109 .asic_family = CHIP_FIJI,
110 .max_pasid_bits = 16,
111 .max_no_of_hqd = 24,
112 .ih_ring_entry_size = 4 * sizeof(uint32_t),
113 .event_interrupt_class = &event_interrupt_class_cik,
114 .num_of_watch_points = 4,
115 .mqd_size_aligned = MQD_SIZE_ALIGNED,
116 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500117 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500118 .needs_pci_atomics = true,
119};
120
121static const struct kfd_device_info fiji_vf_device_info = {
122 .asic_family = CHIP_FIJI,
123 .max_pasid_bits = 16,
124 .max_no_of_hqd = 24,
125 .ih_ring_entry_size = 4 * sizeof(uint32_t),
126 .event_interrupt_class = &event_interrupt_class_cik,
127 .num_of_watch_points = 4,
128 .mqd_size_aligned = MQD_SIZE_ALIGNED,
129 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500130 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500131 .needs_pci_atomics = false,
132};
133
134
135static const struct kfd_device_info polaris10_device_info = {
136 .asic_family = CHIP_POLARIS10,
137 .max_pasid_bits = 16,
138 .max_no_of_hqd = 24,
139 .ih_ring_entry_size = 4 * sizeof(uint32_t),
140 .event_interrupt_class = &event_interrupt_class_cik,
141 .num_of_watch_points = 4,
142 .mqd_size_aligned = MQD_SIZE_ALIGNED,
143 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500144 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500145 .needs_pci_atomics = true,
146};
147
148static const struct kfd_device_info polaris10_vf_device_info = {
149 .asic_family = CHIP_POLARIS10,
150 .max_pasid_bits = 16,
151 .max_no_of_hqd = 24,
152 .ih_ring_entry_size = 4 * sizeof(uint32_t),
153 .event_interrupt_class = &event_interrupt_class_cik,
154 .num_of_watch_points = 4,
155 .mqd_size_aligned = MQD_SIZE_ALIGNED,
156 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500157 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500158 .needs_pci_atomics = false,
159};
160
161static const struct kfd_device_info polaris11_device_info = {
162 .asic_family = CHIP_POLARIS11,
163 .max_pasid_bits = 16,
164 .max_no_of_hqd = 24,
165 .ih_ring_entry_size = 4 * sizeof(uint32_t),
166 .event_interrupt_class = &event_interrupt_class_cik,
167 .num_of_watch_points = 4,
168 .mqd_size_aligned = MQD_SIZE_ALIGNED,
169 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500170 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500171 .needs_pci_atomics = true,
172};
173
174
Oded Gabbay4a488a72014-07-16 21:08:55 +0300175struct kfd_deviceid {
176 unsigned short did;
177 const struct kfd_device_info *device_info;
178};
179
Oded Gabbay4a488a72014-07-16 21:08:55 +0300180static const struct kfd_deviceid supported_devices[] = {
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500181#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +0300182 { 0x1304, &kaveri_device_info }, /* Kaveri */
183 { 0x1305, &kaveri_device_info }, /* Kaveri */
184 { 0x1306, &kaveri_device_info }, /* Kaveri */
185 { 0x1307, &kaveri_device_info }, /* Kaveri */
186 { 0x1309, &kaveri_device_info }, /* Kaveri */
187 { 0x130A, &kaveri_device_info }, /* Kaveri */
188 { 0x130B, &kaveri_device_info }, /* Kaveri */
189 { 0x130C, &kaveri_device_info }, /* Kaveri */
190 { 0x130D, &kaveri_device_info }, /* Kaveri */
191 { 0x130E, &kaveri_device_info }, /* Kaveri */
192 { 0x130F, &kaveri_device_info }, /* Kaveri */
193 { 0x1310, &kaveri_device_info }, /* Kaveri */
194 { 0x1311, &kaveri_device_info }, /* Kaveri */
195 { 0x1312, &kaveri_device_info }, /* Kaveri */
196 { 0x1313, &kaveri_device_info }, /* Kaveri */
197 { 0x1315, &kaveri_device_info }, /* Kaveri */
198 { 0x1316, &kaveri_device_info }, /* Kaveri */
199 { 0x1317, &kaveri_device_info }, /* Kaveri */
200 { 0x1318, &kaveri_device_info }, /* Kaveri */
201 { 0x131B, &kaveri_device_info }, /* Kaveri */
202 { 0x131C, &kaveri_device_info }, /* Kaveri */
Ben Goz123576d2015-01-12 14:37:24 +0200203 { 0x131D, &kaveri_device_info }, /* Kaveri */
204 { 0x9870, &carrizo_device_info }, /* Carrizo */
205 { 0x9874, &carrizo_device_info }, /* Carrizo */
206 { 0x9875, &carrizo_device_info }, /* Carrizo */
207 { 0x9876, &carrizo_device_info }, /* Carrizo */
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500208 { 0x9877, &carrizo_device_info }, /* Carrizo */
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500209#endif
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500210 { 0x67A0, &hawaii_device_info }, /* Hawaii */
211 { 0x67A1, &hawaii_device_info }, /* Hawaii */
212 { 0x67A2, &hawaii_device_info }, /* Hawaii */
213 { 0x67A8, &hawaii_device_info }, /* Hawaii */
214 { 0x67A9, &hawaii_device_info }, /* Hawaii */
215 { 0x67AA, &hawaii_device_info }, /* Hawaii */
216 { 0x67B0, &hawaii_device_info }, /* Hawaii */
217 { 0x67B1, &hawaii_device_info }, /* Hawaii */
218 { 0x67B8, &hawaii_device_info }, /* Hawaii */
219 { 0x67B9, &hawaii_device_info }, /* Hawaii */
220 { 0x67BA, &hawaii_device_info }, /* Hawaii */
221 { 0x67BE, &hawaii_device_info }, /* Hawaii */
222 { 0x6920, &tonga_device_info }, /* Tonga */
223 { 0x6921, &tonga_device_info }, /* Tonga */
224 { 0x6928, &tonga_device_info }, /* Tonga */
225 { 0x6929, &tonga_device_info }, /* Tonga */
226 { 0x692B, &tonga_device_info }, /* Tonga */
227 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
228 { 0x6938, &tonga_device_info }, /* Tonga */
229 { 0x6939, &tonga_device_info }, /* Tonga */
230 { 0x7300, &fiji_device_info }, /* Fiji */
231 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
232 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
233 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
234 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
235 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
236 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
237 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
238 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
239 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
240 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
241 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
242 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
243 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
244 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
245 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
246 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
247 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
248 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
249 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
250 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
251 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
252 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
Oded Gabbay4a488a72014-07-16 21:08:55 +0300253};
254
Oded Gabbay6e810902014-10-27 14:36:07 +0200255static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
256 unsigned int chunk_size);
257static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
258
Yong Zhaob8935a72017-09-20 18:10:13 -0400259static int kfd_resume(struct kfd_dev *kfd);
260
Oded Gabbay4a488a72014-07-16 21:08:55 +0300261static const struct kfd_device_info *lookup_device_info(unsigned short did)
262{
263 size_t i;
264
265 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
266 if (supported_devices[i].did == did) {
Felix Kuehling32fa8212017-08-15 23:00:12 -0400267 WARN_ON(!supported_devices[i].device_info);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300268 return supported_devices[i].device_info;
269 }
270 }
271
Yong Zhao4ebc7182017-08-15 23:00:13 -0400272 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
273 did);
274
Oded Gabbay4a488a72014-07-16 21:08:55 +0300275 return NULL;
276}
277
Xihan Zhangcea405b2015-03-17 19:32:53 +0800278struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
279 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300280{
281 struct kfd_dev *kfd;
282
283 const struct kfd_device_info *device_info =
284 lookup_device_info(pdev->device);
285
Yong Zhao4ebc7182017-08-15 23:00:13 -0400286 if (!device_info) {
287 dev_err(kfd_device, "kgd2kfd_probe failed\n");
Oded Gabbay4a488a72014-07-16 21:08:55 +0300288 return NULL;
Yong Zhao4ebc7182017-08-15 23:00:13 -0400289 }
Oded Gabbay4a488a72014-07-16 21:08:55 +0300290
Felix Kuehling3ee2d002018-01-04 17:17:41 -0500291 if (device_info->needs_pci_atomics) {
292 /* Allow BIF to recode atomics to PCIe 3.0
293 * AtomicOps. 32 and 64-bit requests are possible and
294 * must be supported.
295 */
296 if (pci_enable_atomic_ops_to_root(pdev,
297 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
298 PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
299 dev_info(kfd_device,
300 "skipped device %x:%x, PCI rejects atomics",
301 pdev->vendor, pdev->device);
302 return NULL;
303 }
304 }
305
Oded Gabbay4a488a72014-07-16 21:08:55 +0300306 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
307 if (!kfd)
308 return NULL;
309
310 kfd->kgd = kgd;
311 kfd->device_info = device_info;
312 kfd->pdev = pdev;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300313 kfd->init_complete = false;
Xihan Zhangcea405b2015-03-17 19:32:53 +0800314 kfd->kfd2kgd = f2g;
315
316 mutex_init(&kfd->doorbell_mutex);
317 memset(&kfd->doorbell_available_index, 0,
318 sizeof(kfd->doorbell_available_index));
Oded Gabbay4a488a72014-07-16 21:08:55 +0300319
320 return kfd;
321}
322
Felix Kuehling373d7082017-11-14 16:41:19 -0500323static void kfd_cwsr_init(struct kfd_dev *kfd)
324{
325 if (cwsr_enable && kfd->device_info->supports_cwsr) {
326 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
327
328 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
329 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
330 kfd->cwsr_enabled = true;
331 }
332}
333
Oded Gabbay4a488a72014-07-16 21:08:55 +0300334bool kgd2kfd_device_init(struct kfd_dev *kfd,
335 const struct kgd2kfd_shared_resources *gpu_resources)
336{
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300337 unsigned int size;
338
Oded Gabbay4a488a72014-07-16 21:08:55 +0300339 kfd->shared_resources = *gpu_resources;
340
Yong Zhao44008d72017-09-20 18:10:18 -0400341 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
342 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
343 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
344 - kfd->vm_info.first_vmid_kfd + 1;
345
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500346 /* Verify module parameters regarding mapped process number*/
347 if ((hws_max_conc_proc < 0)
348 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
349 dev_err(kfd_device,
350 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
351 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
352 kfd->vm_info.vmid_num_kfd);
353 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
354 } else
355 kfd->max_proc_per_quantum = hws_max_conc_proc;
356
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300357 /* calculate max size of mqds needed for queues */
Oded Gabbayb8cbab02015-01-18 13:18:01 +0200358 size = max_num_of_queues_per_device *
359 kfd->device_info->mqd_size_aligned;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300360
Oded Gabbaye18e7942014-10-26 10:12:22 +0200361 /*
362 * calculate max size of runlist packet.
363 * There can be only 2 packets at once
364 */
Felix Kuehling507968d2017-08-15 23:00:15 -0400365 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
366 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
367 + sizeof(struct pm4_mes_runlist)) * 2;
Oded Gabbaye18e7942014-10-26 10:12:22 +0200368
369 /* Add size of HIQ & DIQ */
370 size += KFD_KERNEL_QUEUE_SIZE * 2;
371
372 /* add another 512KB for all other allocations on gart (HPD, fences) */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300373 size += 512 * 1024;
374
Xihan Zhangcea405b2015-03-17 19:32:53 +0800375 if (kfd->kfd2kgd->init_gtt_mem_allocation(
376 kfd->kgd, size, &kfd->gtt_mem,
377 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
Kent Russell79775b62017-08-15 23:00:05 -0400378 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300379 goto out;
380 }
381
Kent Russell79775b62017-08-15 23:00:05 -0400382 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
Oded Gabbaye18e7942014-10-26 10:12:22 +0200383
Oded Gabbay73a1da02014-10-26 09:53:37 +0200384 /* Initialize GTT sa with 512 byte chunk size */
385 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
Kent Russell79775b62017-08-15 23:00:05 -0400386 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
Oded Gabbay73a1da02014-10-26 09:53:37 +0200387 goto kfd_gtt_sa_init_error;
388 }
389
Felix Kuehling735df2b2017-08-15 23:00:10 -0400390 if (kfd_doorbell_init(kfd)) {
391 dev_err(kfd_device,
392 "Error initializing doorbell aperture\n");
393 goto kfd_doorbell_error;
394 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300395
Kent Russell4eacc26b2017-08-15 23:00:06 -0400396 if (kfd_topology_add_device(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400397 dev_err(kfd_device, "Error adding device to topology\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300398 goto kfd_topology_add_device_error;
399 }
400
Andrew Lewycky2249d552014-07-17 01:37:30 +0300401 if (kfd_interrupt_init(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400402 dev_err(kfd_device, "Error initializing interrupts\n");
Andrew Lewycky2249d552014-07-17 01:37:30 +0300403 goto kfd_interrupt_error;
404 }
405
Ben Goz64c7f8c2014-07-17 01:27:00 +0300406 kfd->dqm = device_queue_manager_init(kfd);
407 if (!kfd->dqm) {
Kent Russell79775b62017-08-15 23:00:05 -0400408 dev_err(kfd_device, "Error initializing queue manager\n");
Ben Goz64c7f8c2014-07-17 01:27:00 +0300409 goto device_queue_manager_error;
410 }
411
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500412 if (kfd_iommu_device_init(kfd)) {
413 dev_err(kfd_device, "Error initializing iommuv2\n");
414 goto device_iommu_error;
Ben Goz64c7f8c2014-07-17 01:27:00 +0300415 }
416
Felix Kuehling373d7082017-11-14 16:41:19 -0500417 kfd_cwsr_init(kfd);
418
Yong Zhaob8935a72017-09-20 18:10:13 -0400419 if (kfd_resume(kfd))
420 goto kfd_resume_error;
421
Yair Shacharfbeb6612015-05-20 13:48:26 +0300422 kfd->dbgmgr = NULL;
423
Oded Gabbay4a488a72014-07-16 21:08:55 +0300424 kfd->init_complete = true;
Kent Russell79775b62017-08-15 23:00:05 -0400425 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
Oded Gabbay4a488a72014-07-16 21:08:55 +0300426 kfd->pdev->device);
427
Kent Russell79775b62017-08-15 23:00:05 -0400428 pr_debug("Starting kfd with the following scheduling policy %d\n",
Felix Kuehlingd146c5a2018-01-04 17:17:43 -0500429 kfd->dqm->sched_policy);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300430
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300431 goto out;
432
Yong Zhaob8935a72017-09-20 18:10:13 -0400433kfd_resume_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500434device_iommu_error:
Ben Goz64c7f8c2014-07-17 01:27:00 +0300435 device_queue_manager_uninit(kfd->dqm);
436device_queue_manager_error:
Andrew Lewycky2249d552014-07-17 01:37:30 +0300437 kfd_interrupt_exit(kfd);
438kfd_interrupt_error:
Oded Gabbayb17f0682014-07-17 00:06:27 +0300439 kfd_topology_remove_device(kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300440kfd_topology_add_device_error:
Felix Kuehling735df2b2017-08-15 23:00:10 -0400441 kfd_doorbell_fini(kfd);
442kfd_doorbell_error:
Oded Gabbay73a1da02014-10-26 09:53:37 +0200443 kfd_gtt_sa_fini(kfd);
444kfd_gtt_sa_init_error:
Xihan Zhangcea405b2015-03-17 19:32:53 +0800445 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300446 dev_err(kfd_device,
Kent Russell79775b62017-08-15 23:00:05 -0400447 "device %x:%x NOT added due to errors\n",
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300448 kfd->pdev->vendor, kfd->pdev->device);
449out:
450 return kfd->init_complete;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300451}
452
453void kgd2kfd_device_exit(struct kfd_dev *kfd)
454{
Oded Gabbayb17f0682014-07-17 00:06:27 +0300455 if (kfd->init_complete) {
Yong Zhaob8935a72017-09-20 18:10:13 -0400456 kgd2kfd_suspend(kfd);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300457 device_queue_manager_uninit(kfd->dqm);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300458 kfd_interrupt_exit(kfd);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300459 kfd_topology_remove_device(kfd);
Felix Kuehling735df2b2017-08-15 23:00:10 -0400460 kfd_doorbell_fini(kfd);
Oded Gabbay73a1da02014-10-26 09:53:37 +0200461 kfd_gtt_sa_fini(kfd);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800462 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300463 }
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300464
Oded Gabbay4a488a72014-07-16 21:08:55 +0300465 kfree(kfd);
466}
467
468void kgd2kfd_suspend(struct kfd_dev *kfd)
469{
Yong Zhao733fa1f2017-09-20 18:10:14 -0400470 if (!kfd->init_complete)
471 return;
472
Felix Kuehling26103432018-02-06 20:32:45 -0500473 /* For first KFD device suspend all the KFD processes */
474 if (atomic_inc_return(&kfd_device_suspended) == 1)
475 kfd_suspend_all_processes();
476
Yong Zhao733fa1f2017-09-20 18:10:14 -0400477 kfd->dqm->ops.stop(kfd->dqm);
478
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500479 kfd_iommu_suspend(kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300480}
481
482int kgd2kfd_resume(struct kfd_dev *kfd)
483{
Felix Kuehling26103432018-02-06 20:32:45 -0500484 int ret, count;
485
Yong Zhaob8935a72017-09-20 18:10:13 -0400486 if (!kfd->init_complete)
487 return 0;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300488
Felix Kuehling26103432018-02-06 20:32:45 -0500489 ret = kfd_resume(kfd);
490 if (ret)
491 return ret;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300492
Felix Kuehling26103432018-02-06 20:32:45 -0500493 count = atomic_dec_return(&kfd_device_suspended);
494 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
495 if (count == 0)
496 ret = kfd_resume_all_processes();
497
498 return ret;
Yong Zhaob8935a72017-09-20 18:10:13 -0400499}
Yong Zhao4ebc7182017-08-15 23:00:13 -0400500
Yong Zhaob8935a72017-09-20 18:10:13 -0400501static int kfd_resume(struct kfd_dev *kfd)
502{
503 int err = 0;
Yong Zhaob8935a72017-09-20 18:10:13 -0400504
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500505 err = kfd_iommu_resume(kfd);
506 if (err) {
507 dev_err(kfd_device,
508 "Failed to resume IOMMU for device %x:%x\n",
509 kfd->pdev->vendor, kfd->pdev->device);
510 return err;
511 }
Yong Zhao733fa1f2017-09-20 18:10:14 -0400512
Yong Zhaob8935a72017-09-20 18:10:13 -0400513 err = kfd->dqm->ops.start(kfd->dqm);
514 if (err) {
515 dev_err(kfd_device,
516 "Error starting queue manager for device %x:%x\n",
517 kfd->pdev->vendor, kfd->pdev->device);
518 goto dqm_start_error;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300519 }
520
Yong Zhaob8935a72017-09-20 18:10:13 -0400521 return err;
522
523dqm_start_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500524 kfd_iommu_suspend(kfd);
Yong Zhaob8935a72017-09-20 18:10:13 -0400525 return err;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300526}
527
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300528/* This is called directly from KGD at ISR. */
529void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300530{
Andrew Lewycky2249d552014-07-17 01:37:30 +0300531 if (!kfd->init_complete)
532 return;
533
534 spin_lock(&kfd->interrupt_lock);
535
536 if (kfd->interrupts_active
537 && interrupt_is_wanted(kfd, ih_ring_entry)
538 && enqueue_ih_ring_entry(kfd, ih_ring_entry))
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400539 queue_work(kfd->ih_wq, &kfd->interrupt_work);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300540
541 spin_unlock(&kfd->interrupt_lock);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300542}
Oded Gabbay6e810902014-10-27 14:36:07 +0200543
Felix Kuehling26103432018-02-06 20:32:45 -0500544/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
545 * prepare for safe eviction of KFD BOs that belong to the specified
546 * process.
547 *
548 * @mm: mm_struct that identifies the specified KFD process
549 * @fence: eviction fence attached to KFD process BOs
550 *
551 */
552int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
553 struct dma_fence *fence)
554{
555 struct kfd_process *p;
556 unsigned long active_time;
557 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
558
559 if (!fence)
560 return -EINVAL;
561
562 if (dma_fence_is_signaled(fence))
563 return 0;
564
565 p = kfd_lookup_process_by_mm(mm);
566 if (!p)
567 return -ENODEV;
568
569 if (fence->seqno == p->last_eviction_seqno)
570 goto out;
571
572 p->last_eviction_seqno = fence->seqno;
573
574 /* Avoid KFD process starvation. Wait for at least
575 * PROCESS_ACTIVE_TIME_MS before evicting the process again
576 */
577 active_time = get_jiffies_64() - p->last_restore_timestamp;
578 if (delay_jiffies > active_time)
579 delay_jiffies -= active_time;
580 else
581 delay_jiffies = 0;
582
583 /* During process initialization eviction_work.dwork is initialized
584 * to kfd_evict_bo_worker
585 */
586 schedule_delayed_work(&p->eviction_work, delay_jiffies);
587out:
588 kfd_unref_process(p);
589 return 0;
590}
591
Oded Gabbay6e810902014-10-27 14:36:07 +0200592static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
593 unsigned int chunk_size)
594{
Felix Kuehling8625ff92017-08-15 23:00:11 -0400595 unsigned int num_of_longs;
Oded Gabbay6e810902014-10-27 14:36:07 +0200596
Felix Kuehling32fa8212017-08-15 23:00:12 -0400597 if (WARN_ON(buf_size < chunk_size))
598 return -EINVAL;
599 if (WARN_ON(buf_size == 0))
600 return -EINVAL;
601 if (WARN_ON(chunk_size == 0))
602 return -EINVAL;
Oded Gabbay6e810902014-10-27 14:36:07 +0200603
604 kfd->gtt_sa_chunk_size = chunk_size;
605 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
606
Felix Kuehling8625ff92017-08-15 23:00:11 -0400607 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
608 BITS_PER_LONG;
Oded Gabbay6e810902014-10-27 14:36:07 +0200609
Felix Kuehling8625ff92017-08-15 23:00:11 -0400610 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
Oded Gabbay6e810902014-10-27 14:36:07 +0200611
612 if (!kfd->gtt_sa_bitmap)
613 return -ENOMEM;
614
Kent Russell79775b62017-08-15 23:00:05 -0400615 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200616 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
617
618 mutex_init(&kfd->gtt_sa_lock);
619
620 return 0;
621
622}
623
624static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
625{
626 mutex_destroy(&kfd->gtt_sa_lock);
627 kfree(kfd->gtt_sa_bitmap);
628}
629
630static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
631 unsigned int bit_num,
632 unsigned int chunk_size)
633{
634 return start_addr + bit_num * chunk_size;
635}
636
637static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
638 unsigned int bit_num,
639 unsigned int chunk_size)
640{
641 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
642}
643
644int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
645 struct kfd_mem_obj **mem_obj)
646{
647 unsigned int found, start_search, cur_size;
648
Oded Gabbay6e810902014-10-27 14:36:07 +0200649 if (size == 0)
650 return -EINVAL;
651
652 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
653 return -ENOMEM;
654
Felix Kuehlingd1853f42018-03-23 15:32:31 -0400655 *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
Oded Gabbay6e810902014-10-27 14:36:07 +0200656 if ((*mem_obj) == NULL)
657 return -ENOMEM;
658
Kent Russell79775b62017-08-15 23:00:05 -0400659 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
Oded Gabbay6e810902014-10-27 14:36:07 +0200660
661 start_search = 0;
662
663 mutex_lock(&kfd->gtt_sa_lock);
664
665kfd_gtt_restart_search:
666 /* Find the first chunk that is free */
667 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
668 kfd->gtt_sa_num_of_chunks,
669 start_search);
670
Kent Russell79775b62017-08-15 23:00:05 -0400671 pr_debug("Found = %d\n", found);
Oded Gabbay6e810902014-10-27 14:36:07 +0200672
673 /* If there wasn't any free chunk, bail out */
674 if (found == kfd->gtt_sa_num_of_chunks)
675 goto kfd_gtt_no_free_chunk;
676
677 /* Update fields of mem_obj */
678 (*mem_obj)->range_start = found;
679 (*mem_obj)->range_end = found;
680 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
681 kfd->gtt_start_gpu_addr,
682 found,
683 kfd->gtt_sa_chunk_size);
684 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
685 kfd->gtt_start_cpu_ptr,
686 found,
687 kfd->gtt_sa_chunk_size);
688
Kent Russell79775b62017-08-15 23:00:05 -0400689 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200690 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
691
692 /* If we need only one chunk, mark it as allocated and get out */
693 if (size <= kfd->gtt_sa_chunk_size) {
Kent Russell79775b62017-08-15 23:00:05 -0400694 pr_debug("Single bit\n");
Oded Gabbay6e810902014-10-27 14:36:07 +0200695 set_bit(found, kfd->gtt_sa_bitmap);
696 goto kfd_gtt_out;
697 }
698
699 /* Otherwise, try to see if we have enough contiguous chunks */
700 cur_size = size - kfd->gtt_sa_chunk_size;
701 do {
702 (*mem_obj)->range_end =
703 find_next_zero_bit(kfd->gtt_sa_bitmap,
704 kfd->gtt_sa_num_of_chunks, ++found);
705 /*
706 * If next free chunk is not contiguous than we need to
707 * restart our search from the last free chunk we found (which
708 * wasn't contiguous to the previous ones
709 */
710 if ((*mem_obj)->range_end != found) {
711 start_search = found;
712 goto kfd_gtt_restart_search;
713 }
714
715 /*
716 * If we reached end of buffer, bail out with error
717 */
718 if (found == kfd->gtt_sa_num_of_chunks)
719 goto kfd_gtt_no_free_chunk;
720
721 /* Check if we don't need another chunk */
722 if (cur_size <= kfd->gtt_sa_chunk_size)
723 cur_size = 0;
724 else
725 cur_size -= kfd->gtt_sa_chunk_size;
726
727 } while (cur_size > 0);
728
Kent Russell79775b62017-08-15 23:00:05 -0400729 pr_debug("range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200730 (*mem_obj)->range_start, (*mem_obj)->range_end);
731
732 /* Mark the chunks as allocated */
733 for (found = (*mem_obj)->range_start;
734 found <= (*mem_obj)->range_end;
735 found++)
736 set_bit(found, kfd->gtt_sa_bitmap);
737
738kfd_gtt_out:
739 mutex_unlock(&kfd->gtt_sa_lock);
740 return 0;
741
742kfd_gtt_no_free_chunk:
Kent Russell79775b62017-08-15 23:00:05 -0400743 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
Oded Gabbay6e810902014-10-27 14:36:07 +0200744 mutex_unlock(&kfd->gtt_sa_lock);
745 kfree(mem_obj);
746 return -ENOMEM;
747}
748
749int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
750{
751 unsigned int bit;
752
Oded Gabbay9216ed22015-01-12 22:34:21 +0200753 /* Act like kfree when trying to free a NULL object */
754 if (!mem_obj)
755 return 0;
Oded Gabbay6e810902014-10-27 14:36:07 +0200756
Kent Russell79775b62017-08-15 23:00:05 -0400757 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200758 mem_obj, mem_obj->range_start, mem_obj->range_end);
759
760 mutex_lock(&kfd->gtt_sa_lock);
761
762 /* Mark the chunks as free */
763 for (bit = mem_obj->range_start;
764 bit <= mem_obj->range_end;
765 bit++)
766 clear_bit(bit, kfd->gtt_sa_bitmap);
767
768 mutex_unlock(&kfd->gtt_sa_lock);
769
770 kfree(mem_obj);
771 return 0;
772}