blob: b2abae06d57ebefd7fd4d9d0d4b2684b5d5631d4 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
Oded Gabbay4a488a72014-07-16 21:08:55 +030023#include <linux/bsearch.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "kfd_priv.h"
Ben Goz64c7f8c2014-07-17 01:27:00 +030027#include "kfd_device_queue_manager.h"
Felix Kuehling507968d2017-08-15 23:00:15 -040028#include "kfd_pm4_headers_vi.h"
Yong Zhao0db54b22018-05-01 17:56:06 -040029#include "cwsr_trap_handler.h"
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050030#include "kfd_iommu.h"
Oded Gabbay4a488a72014-07-16 21:08:55 +030031
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030032#define MQD_SIZE_ALIGNED 768
Shaoyun Liue42051d2018-07-11 22:32:56 -040033
34/*
35 * kfd_locked is used to lock the kfd driver during suspend or reset
36 * once locked, kfd driver will stop any further GPU execution.
37 * create process (open) will return -EAGAIN.
38 */
39static atomic_t kfd_locked = ATOMIC_INIT(0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050041#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +030042static const struct kfd_device_info kaveri_device_info = {
Ben Goz0da75582015-01-01 17:10:01 +020043 .asic_family = CHIP_KAVERI,
44 .max_pasid_bits = 16,
Yair Shachar992839a2015-05-20 13:43:04 +030045 /* max num of queues for KV.TODO should be a dynamic value */
46 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040047 .doorbell_size = 4,
Ben Goz0da75582015-01-01 17:10:01 +020048 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030049 .event_interrupt_class = &event_interrupt_class_cik,
Yair Shacharfbeb6612015-05-20 13:48:26 +030050 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050051 .mqd_size_aligned = MQD_SIZE_ALIGNED,
52 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050053 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050054 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -040055 .num_sdma_engines = 2,
Ben Goz0da75582015-01-01 17:10:01 +020056};
57
58static const struct kfd_device_info carrizo_device_info = {
59 .asic_family = CHIP_CARRIZO,
Oded Gabbay4a488a72014-07-16 21:08:55 +030060 .max_pasid_bits = 16,
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030061 /* max num of queues for CZ.TODO should be a dynamic value */
62 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040063 .doorbell_size = 4,
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +030064 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030065 .event_interrupt_class = &event_interrupt_class_cik,
Alexey Skidanovf7c826a2014-10-13 16:35:12 +030066 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050067 .mqd_size_aligned = MQD_SIZE_ALIGNED,
68 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050069 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050070 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -040071 .num_sdma_engines = 2,
Oded Gabbay4a488a72014-07-16 21:08:55 +030072};
Yong Zhao4d663df2018-07-13 16:17:48 -040073
74static const struct kfd_device_info raven_device_info = {
75 .asic_family = CHIP_RAVEN,
76 .max_pasid_bits = 16,
77 .max_no_of_hqd = 24,
78 .doorbell_size = 8,
79 .ih_ring_entry_size = 8 * sizeof(uint32_t),
80 .event_interrupt_class = &event_interrupt_class_v9,
81 .num_of_watch_points = 4,
82 .mqd_size_aligned = MQD_SIZE_ALIGNED,
83 .supports_cwsr = true,
84 .needs_iommu_device = true,
85 .needs_pci_atomics = true,
86 .num_sdma_engines = 1,
87};
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050088#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030089
Felix Kuehlinga3084e62018-01-04 17:17:47 -050090static const struct kfd_device_info hawaii_device_info = {
91 .asic_family = CHIP_HAWAII,
92 .max_pasid_bits = 16,
93 /* max num of queues for KV.TODO should be a dynamic value */
94 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040095 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050096 .ih_ring_entry_size = 4 * sizeof(uint32_t),
97 .event_interrupt_class = &event_interrupt_class_cik,
98 .num_of_watch_points = 4,
99 .mqd_size_aligned = MQD_SIZE_ALIGNED,
100 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500101 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500102 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400103 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500104};
105
106static const struct kfd_device_info tonga_device_info = {
107 .asic_family = CHIP_TONGA,
108 .max_pasid_bits = 16,
109 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400110 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500111 .ih_ring_entry_size = 4 * sizeof(uint32_t),
112 .event_interrupt_class = &event_interrupt_class_cik,
113 .num_of_watch_points = 4,
114 .mqd_size_aligned = MQD_SIZE_ALIGNED,
115 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500116 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500117 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400118 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500119};
120
121static const struct kfd_device_info tonga_vf_device_info = {
122 .asic_family = CHIP_TONGA,
123 .max_pasid_bits = 16,
124 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400125 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500126 .ih_ring_entry_size = 4 * sizeof(uint32_t),
127 .event_interrupt_class = &event_interrupt_class_cik,
128 .num_of_watch_points = 4,
129 .mqd_size_aligned = MQD_SIZE_ALIGNED,
130 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500131 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500132 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400133 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500134};
135
136static const struct kfd_device_info fiji_device_info = {
137 .asic_family = CHIP_FIJI,
138 .max_pasid_bits = 16,
139 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400140 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500141 .ih_ring_entry_size = 4 * sizeof(uint32_t),
142 .event_interrupt_class = &event_interrupt_class_cik,
143 .num_of_watch_points = 4,
144 .mqd_size_aligned = MQD_SIZE_ALIGNED,
145 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500146 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500147 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400148 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500149};
150
151static const struct kfd_device_info fiji_vf_device_info = {
152 .asic_family = CHIP_FIJI,
153 .max_pasid_bits = 16,
154 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400155 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500156 .ih_ring_entry_size = 4 * sizeof(uint32_t),
157 .event_interrupt_class = &event_interrupt_class_cik,
158 .num_of_watch_points = 4,
159 .mqd_size_aligned = MQD_SIZE_ALIGNED,
160 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500161 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500162 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400163 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500164};
165
166
167static const struct kfd_device_info polaris10_device_info = {
168 .asic_family = CHIP_POLARIS10,
169 .max_pasid_bits = 16,
170 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400171 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500172 .ih_ring_entry_size = 4 * sizeof(uint32_t),
173 .event_interrupt_class = &event_interrupt_class_cik,
174 .num_of_watch_points = 4,
175 .mqd_size_aligned = MQD_SIZE_ALIGNED,
176 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500177 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500178 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400179 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500180};
181
182static const struct kfd_device_info polaris10_vf_device_info = {
183 .asic_family = CHIP_POLARIS10,
184 .max_pasid_bits = 16,
185 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400186 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500187 .ih_ring_entry_size = 4 * sizeof(uint32_t),
188 .event_interrupt_class = &event_interrupt_class_cik,
189 .num_of_watch_points = 4,
190 .mqd_size_aligned = MQD_SIZE_ALIGNED,
191 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500192 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500193 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400194 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500195};
196
197static const struct kfd_device_info polaris11_device_info = {
198 .asic_family = CHIP_POLARIS11,
199 .max_pasid_bits = 16,
200 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400201 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500202 .ih_ring_entry_size = 4 * sizeof(uint32_t),
203 .event_interrupt_class = &event_interrupt_class_cik,
204 .num_of_watch_points = 4,
205 .mqd_size_aligned = MQD_SIZE_ALIGNED,
206 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500207 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500208 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400209 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500210};
211
Felix Kuehling389056e2018-04-10 17:33:18 -0400212static const struct kfd_device_info vega10_device_info = {
213 .asic_family = CHIP_VEGA10,
214 .max_pasid_bits = 16,
215 .max_no_of_hqd = 24,
216 .doorbell_size = 8,
217 .ih_ring_entry_size = 8 * sizeof(uint32_t),
218 .event_interrupt_class = &event_interrupt_class_v9,
219 .num_of_watch_points = 4,
220 .mqd_size_aligned = MQD_SIZE_ALIGNED,
221 .supports_cwsr = true,
222 .needs_iommu_device = false,
223 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400224 .num_sdma_engines = 2,
Felix Kuehling389056e2018-04-10 17:33:18 -0400225};
226
227static const struct kfd_device_info vega10_vf_device_info = {
228 .asic_family = CHIP_VEGA10,
229 .max_pasid_bits = 16,
230 .max_no_of_hqd = 24,
231 .doorbell_size = 8,
232 .ih_ring_entry_size = 8 * sizeof(uint32_t),
233 .event_interrupt_class = &event_interrupt_class_v9,
234 .num_of_watch_points = 4,
235 .mqd_size_aligned = MQD_SIZE_ALIGNED,
236 .supports_cwsr = true,
237 .needs_iommu_device = false,
238 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400239 .num_sdma_engines = 2,
Felix Kuehling389056e2018-04-10 17:33:18 -0400240};
241
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500242
Oded Gabbay4a488a72014-07-16 21:08:55 +0300243struct kfd_deviceid {
244 unsigned short did;
245 const struct kfd_device_info *device_info;
246};
247
Oded Gabbay4a488a72014-07-16 21:08:55 +0300248static const struct kfd_deviceid supported_devices[] = {
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500249#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +0300250 { 0x1304, &kaveri_device_info }, /* Kaveri */
251 { 0x1305, &kaveri_device_info }, /* Kaveri */
252 { 0x1306, &kaveri_device_info }, /* Kaveri */
253 { 0x1307, &kaveri_device_info }, /* Kaveri */
254 { 0x1309, &kaveri_device_info }, /* Kaveri */
255 { 0x130A, &kaveri_device_info }, /* Kaveri */
256 { 0x130B, &kaveri_device_info }, /* Kaveri */
257 { 0x130C, &kaveri_device_info }, /* Kaveri */
258 { 0x130D, &kaveri_device_info }, /* Kaveri */
259 { 0x130E, &kaveri_device_info }, /* Kaveri */
260 { 0x130F, &kaveri_device_info }, /* Kaveri */
261 { 0x1310, &kaveri_device_info }, /* Kaveri */
262 { 0x1311, &kaveri_device_info }, /* Kaveri */
263 { 0x1312, &kaveri_device_info }, /* Kaveri */
264 { 0x1313, &kaveri_device_info }, /* Kaveri */
265 { 0x1315, &kaveri_device_info }, /* Kaveri */
266 { 0x1316, &kaveri_device_info }, /* Kaveri */
267 { 0x1317, &kaveri_device_info }, /* Kaveri */
268 { 0x1318, &kaveri_device_info }, /* Kaveri */
269 { 0x131B, &kaveri_device_info }, /* Kaveri */
270 { 0x131C, &kaveri_device_info }, /* Kaveri */
Ben Goz123576d2015-01-12 14:37:24 +0200271 { 0x131D, &kaveri_device_info }, /* Kaveri */
272 { 0x9870, &carrizo_device_info }, /* Carrizo */
273 { 0x9874, &carrizo_device_info }, /* Carrizo */
274 { 0x9875, &carrizo_device_info }, /* Carrizo */
275 { 0x9876, &carrizo_device_info }, /* Carrizo */
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500276 { 0x9877, &carrizo_device_info }, /* Carrizo */
Yong Zhao4d663df2018-07-13 16:17:48 -0400277 { 0x15DD, &raven_device_info }, /* Raven */
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500278#endif
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500279 { 0x67A0, &hawaii_device_info }, /* Hawaii */
280 { 0x67A1, &hawaii_device_info }, /* Hawaii */
281 { 0x67A2, &hawaii_device_info }, /* Hawaii */
282 { 0x67A8, &hawaii_device_info }, /* Hawaii */
283 { 0x67A9, &hawaii_device_info }, /* Hawaii */
284 { 0x67AA, &hawaii_device_info }, /* Hawaii */
285 { 0x67B0, &hawaii_device_info }, /* Hawaii */
286 { 0x67B1, &hawaii_device_info }, /* Hawaii */
287 { 0x67B8, &hawaii_device_info }, /* Hawaii */
288 { 0x67B9, &hawaii_device_info }, /* Hawaii */
289 { 0x67BA, &hawaii_device_info }, /* Hawaii */
290 { 0x67BE, &hawaii_device_info }, /* Hawaii */
291 { 0x6920, &tonga_device_info }, /* Tonga */
292 { 0x6921, &tonga_device_info }, /* Tonga */
293 { 0x6928, &tonga_device_info }, /* Tonga */
294 { 0x6929, &tonga_device_info }, /* Tonga */
295 { 0x692B, &tonga_device_info }, /* Tonga */
296 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
297 { 0x6938, &tonga_device_info }, /* Tonga */
298 { 0x6939, &tonga_device_info }, /* Tonga */
299 { 0x7300, &fiji_device_info }, /* Fiji */
300 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
301 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
302 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
303 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
304 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
305 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
306 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
307 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
308 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
309 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
310 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
311 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
312 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
313 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
314 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
315 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
316 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
317 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
318 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
319 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
320 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
321 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
Felix Kuehling389056e2018-04-10 17:33:18 -0400322 { 0x6860, &vega10_device_info }, /* Vega10 */
323 { 0x6861, &vega10_device_info }, /* Vega10 */
324 { 0x6862, &vega10_device_info }, /* Vega10 */
325 { 0x6863, &vega10_device_info }, /* Vega10 */
326 { 0x6864, &vega10_device_info }, /* Vega10 */
327 { 0x6867, &vega10_device_info }, /* Vega10 */
328 { 0x6868, &vega10_device_info }, /* Vega10 */
329 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
330 { 0x687F, &vega10_device_info }, /* Vega10 */
Oded Gabbay4a488a72014-07-16 21:08:55 +0300331};
332
Oded Gabbay6e810902014-10-27 14:36:07 +0200333static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
334 unsigned int chunk_size);
335static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
336
Yong Zhaob8935a72017-09-20 18:10:13 -0400337static int kfd_resume(struct kfd_dev *kfd);
338
Oded Gabbay4a488a72014-07-16 21:08:55 +0300339static const struct kfd_device_info *lookup_device_info(unsigned short did)
340{
341 size_t i;
342
343 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
344 if (supported_devices[i].did == did) {
Felix Kuehling32fa8212017-08-15 23:00:12 -0400345 WARN_ON(!supported_devices[i].device_info);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300346 return supported_devices[i].device_info;
347 }
348 }
349
Yong Zhao4ebc7182017-08-15 23:00:13 -0400350 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
351 did);
352
Oded Gabbay4a488a72014-07-16 21:08:55 +0300353 return NULL;
354}
355
Xihan Zhangcea405b2015-03-17 19:32:53 +0800356struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
357 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300358{
359 struct kfd_dev *kfd;
welu6106dce2018-04-10 17:33:17 -0400360 int ret;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300361 const struct kfd_device_info *device_info =
362 lookup_device_info(pdev->device);
363
Yong Zhao4ebc7182017-08-15 23:00:13 -0400364 if (!device_info) {
365 dev_err(kfd_device, "kgd2kfd_probe failed\n");
Oded Gabbay4a488a72014-07-16 21:08:55 +0300366 return NULL;
Yong Zhao4ebc7182017-08-15 23:00:13 -0400367 }
Oded Gabbay4a488a72014-07-16 21:08:55 +0300368
Eric Huangd35f00d2018-06-04 15:22:24 -0400369 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
370 if (!kfd)
371 return NULL;
372
welu6106dce2018-04-10 17:33:17 -0400373 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
374 * 32 and 64-bit requests are possible and must be
375 * supported.
376 */
377 ret = pci_enable_atomic_ops_to_root(pdev,
378 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
379 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
380 if (device_info->needs_pci_atomics && ret < 0) {
381 dev_info(kfd_device,
382 "skipped device %x:%x, PCI rejects atomics\n",
383 pdev->vendor, pdev->device);
Eric Huangd35f00d2018-06-04 15:22:24 -0400384 kfree(kfd);
welu6106dce2018-04-10 17:33:17 -0400385 return NULL;
Eric Huangd35f00d2018-06-04 15:22:24 -0400386 } else if (!ret)
387 kfd->pci_atomic_requested = true;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300388
389 kfd->kgd = kgd;
390 kfd->device_info = device_info;
391 kfd->pdev = pdev;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300392 kfd->init_complete = false;
Xihan Zhangcea405b2015-03-17 19:32:53 +0800393 kfd->kfd2kgd = f2g;
394
395 mutex_init(&kfd->doorbell_mutex);
396 memset(&kfd->doorbell_available_index, 0,
397 sizeof(kfd->doorbell_available_index));
Oded Gabbay4a488a72014-07-16 21:08:55 +0300398
399 return kfd;
400}
401
Felix Kuehling373d7082017-11-14 16:41:19 -0500402static void kfd_cwsr_init(struct kfd_dev *kfd)
403{
404 if (cwsr_enable && kfd->device_info->supports_cwsr) {
Felix Kuehling3e76c232018-04-10 17:33:16 -0400405 if (kfd->device_info->asic_family < CHIP_VEGA10) {
406 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
407 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
408 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
409 } else {
410 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
411 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
412 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
413 }
Felix Kuehling373d7082017-11-14 16:41:19 -0500414
Felix Kuehling373d7082017-11-14 16:41:19 -0500415 kfd->cwsr_enabled = true;
416 }
417}
418
Oded Gabbay4a488a72014-07-16 21:08:55 +0300419bool kgd2kfd_device_init(struct kfd_dev *kfd,
420 const struct kgd2kfd_shared_resources *gpu_resources)
421{
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300422 unsigned int size;
423
Oded Gabbay4a488a72014-07-16 21:08:55 +0300424 kfd->shared_resources = *gpu_resources;
425
Yong Zhao44008d72017-09-20 18:10:18 -0400426 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
427 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
428 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
429 - kfd->vm_info.first_vmid_kfd + 1;
430
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500431 /* Verify module parameters regarding mapped process number*/
432 if ((hws_max_conc_proc < 0)
433 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
434 dev_err(kfd_device,
435 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
436 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
437 kfd->vm_info.vmid_num_kfd);
438 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
439 } else
440 kfd->max_proc_per_quantum = hws_max_conc_proc;
441
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300442 /* calculate max size of mqds needed for queues */
Oded Gabbayb8cbab02015-01-18 13:18:01 +0200443 size = max_num_of_queues_per_device *
444 kfd->device_info->mqd_size_aligned;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300445
Oded Gabbaye18e7942014-10-26 10:12:22 +0200446 /*
447 * calculate max size of runlist packet.
448 * There can be only 2 packets at once
449 */
Felix Kuehling507968d2017-08-15 23:00:15 -0400450 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
451 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
452 + sizeof(struct pm4_mes_runlist)) * 2;
Oded Gabbaye18e7942014-10-26 10:12:22 +0200453
454 /* Add size of HIQ & DIQ */
455 size += KFD_KERNEL_QUEUE_SIZE * 2;
456
457 /* add another 512KB for all other allocations on gart (HPD, fences) */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300458 size += 512 * 1024;
459
Xihan Zhangcea405b2015-03-17 19:32:53 +0800460 if (kfd->kfd2kgd->init_gtt_mem_allocation(
461 kfd->kgd, size, &kfd->gtt_mem,
Yong Zhao15426db2018-09-12 21:42:19 -0400462 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
463 false)) {
Kent Russell79775b62017-08-15 23:00:05 -0400464 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300465 goto out;
466 }
467
Kent Russell79775b62017-08-15 23:00:05 -0400468 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
Oded Gabbaye18e7942014-10-26 10:12:22 +0200469
Oded Gabbay73a1da02014-10-26 09:53:37 +0200470 /* Initialize GTT sa with 512 byte chunk size */
471 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
Kent Russell79775b62017-08-15 23:00:05 -0400472 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
Oded Gabbay73a1da02014-10-26 09:53:37 +0200473 goto kfd_gtt_sa_init_error;
474 }
475
Felix Kuehling735df2b2017-08-15 23:00:10 -0400476 if (kfd_doorbell_init(kfd)) {
477 dev_err(kfd_device,
478 "Error initializing doorbell aperture\n");
479 goto kfd_doorbell_error;
480 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300481
Shaoyun Liu0c1690e2018-07-06 11:32:42 -0400482 if (kfd->kfd2kgd->get_hive_id)
483 kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
484
Kent Russell4eacc26b2017-08-15 23:00:06 -0400485 if (kfd_topology_add_device(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400486 dev_err(kfd_device, "Error adding device to topology\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300487 goto kfd_topology_add_device_error;
488 }
489
Andrew Lewycky2249d552014-07-17 01:37:30 +0300490 if (kfd_interrupt_init(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400491 dev_err(kfd_device, "Error initializing interrupts\n");
Andrew Lewycky2249d552014-07-17 01:37:30 +0300492 goto kfd_interrupt_error;
493 }
494
Ben Goz64c7f8c2014-07-17 01:27:00 +0300495 kfd->dqm = device_queue_manager_init(kfd);
496 if (!kfd->dqm) {
Kent Russell79775b62017-08-15 23:00:05 -0400497 dev_err(kfd_device, "Error initializing queue manager\n");
Ben Goz64c7f8c2014-07-17 01:27:00 +0300498 goto device_queue_manager_error;
499 }
500
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500501 if (kfd_iommu_device_init(kfd)) {
502 dev_err(kfd_device, "Error initializing iommuv2\n");
503 goto device_iommu_error;
Ben Goz64c7f8c2014-07-17 01:27:00 +0300504 }
505
Felix Kuehling373d7082017-11-14 16:41:19 -0500506 kfd_cwsr_init(kfd);
507
Yong Zhaob8935a72017-09-20 18:10:13 -0400508 if (kfd_resume(kfd))
509 goto kfd_resume_error;
510
Yair Shacharfbeb6612015-05-20 13:48:26 +0300511 kfd->dbgmgr = NULL;
512
Oded Gabbay4a488a72014-07-16 21:08:55 +0300513 kfd->init_complete = true;
Kent Russell79775b62017-08-15 23:00:05 -0400514 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
Oded Gabbay4a488a72014-07-16 21:08:55 +0300515 kfd->pdev->device);
516
Kent Russell79775b62017-08-15 23:00:05 -0400517 pr_debug("Starting kfd with the following scheduling policy %d\n",
Felix Kuehlingd146c5a2018-01-04 17:17:43 -0500518 kfd->dqm->sched_policy);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300519
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300520 goto out;
521
Yong Zhaob8935a72017-09-20 18:10:13 -0400522kfd_resume_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500523device_iommu_error:
Ben Goz64c7f8c2014-07-17 01:27:00 +0300524 device_queue_manager_uninit(kfd->dqm);
525device_queue_manager_error:
Andrew Lewycky2249d552014-07-17 01:37:30 +0300526 kfd_interrupt_exit(kfd);
527kfd_interrupt_error:
Oded Gabbayb17f0682014-07-17 00:06:27 +0300528 kfd_topology_remove_device(kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300529kfd_topology_add_device_error:
Felix Kuehling735df2b2017-08-15 23:00:10 -0400530 kfd_doorbell_fini(kfd);
531kfd_doorbell_error:
Oded Gabbay73a1da02014-10-26 09:53:37 +0200532 kfd_gtt_sa_fini(kfd);
533kfd_gtt_sa_init_error:
Xihan Zhangcea405b2015-03-17 19:32:53 +0800534 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300535 dev_err(kfd_device,
Kent Russell79775b62017-08-15 23:00:05 -0400536 "device %x:%x NOT added due to errors\n",
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300537 kfd->pdev->vendor, kfd->pdev->device);
538out:
539 return kfd->init_complete;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300540}
541
542void kgd2kfd_device_exit(struct kfd_dev *kfd)
543{
Oded Gabbayb17f0682014-07-17 00:06:27 +0300544 if (kfd->init_complete) {
Yong Zhaob8935a72017-09-20 18:10:13 -0400545 kgd2kfd_suspend(kfd);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300546 device_queue_manager_uninit(kfd->dqm);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300547 kfd_interrupt_exit(kfd);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300548 kfd_topology_remove_device(kfd);
Felix Kuehling735df2b2017-08-15 23:00:10 -0400549 kfd_doorbell_fini(kfd);
Oded Gabbay73a1da02014-10-26 09:53:37 +0200550 kfd_gtt_sa_fini(kfd);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800551 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300552 }
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300553
Oded Gabbay4a488a72014-07-16 21:08:55 +0300554 kfree(kfd);
555}
556
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400557int kgd2kfd_pre_reset(struct kfd_dev *kfd)
558{
Shaoyun Liue42051d2018-07-11 22:32:56 -0400559 if (!kfd->init_complete)
560 return 0;
561 kgd2kfd_suspend(kfd);
562
563 /* hold dqm->lock to prevent further execution*/
564 dqm_lock(kfd->dqm);
565
566 kfd_signal_reset_event(kfd);
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400567 return 0;
568}
569
Shaoyun Liue42051d2018-07-11 22:32:56 -0400570/*
571 * Fix me. KFD won't be able to resume existing process for now.
572 * We will keep all existing process in a evicted state and
573 * wait the process to be terminated.
574 */
575
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400576int kgd2kfd_post_reset(struct kfd_dev *kfd)
577{
Shaoyun Liue42051d2018-07-11 22:32:56 -0400578 int ret, count;
579
580 if (!kfd->init_complete)
581 return 0;
582
583 dqm_unlock(kfd->dqm);
584
585 ret = kfd_resume(kfd);
586 if (ret)
587 return ret;
588 count = atomic_dec_return(&kfd_locked);
589 WARN_ONCE(count != 0, "KFD reset ref. error");
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400590 return 0;
591}
592
Shaoyun Liue42051d2018-07-11 22:32:56 -0400593bool kfd_is_locked(void)
594{
595 return (atomic_read(&kfd_locked) > 0);
596}
597
Oded Gabbay4a488a72014-07-16 21:08:55 +0300598void kgd2kfd_suspend(struct kfd_dev *kfd)
599{
Yong Zhao733fa1f2017-09-20 18:10:14 -0400600 if (!kfd->init_complete)
601 return;
602
Felix Kuehling26103432018-02-06 20:32:45 -0500603 /* For first KFD device suspend all the KFD processes */
Shaoyun Liue42051d2018-07-11 22:32:56 -0400604 if (atomic_inc_return(&kfd_locked) == 1)
Felix Kuehling26103432018-02-06 20:32:45 -0500605 kfd_suspend_all_processes();
606
Yong Zhao733fa1f2017-09-20 18:10:14 -0400607 kfd->dqm->ops.stop(kfd->dqm);
608
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500609 kfd_iommu_suspend(kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300610}
611
612int kgd2kfd_resume(struct kfd_dev *kfd)
613{
Felix Kuehling26103432018-02-06 20:32:45 -0500614 int ret, count;
615
Yong Zhaob8935a72017-09-20 18:10:13 -0400616 if (!kfd->init_complete)
617 return 0;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300618
Felix Kuehling26103432018-02-06 20:32:45 -0500619 ret = kfd_resume(kfd);
620 if (ret)
621 return ret;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300622
Shaoyun Liue42051d2018-07-11 22:32:56 -0400623 count = atomic_dec_return(&kfd_locked);
Felix Kuehling26103432018-02-06 20:32:45 -0500624 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
625 if (count == 0)
626 ret = kfd_resume_all_processes();
627
628 return ret;
Yong Zhaob8935a72017-09-20 18:10:13 -0400629}
Yong Zhao4ebc7182017-08-15 23:00:13 -0400630
Yong Zhaob8935a72017-09-20 18:10:13 -0400631static int kfd_resume(struct kfd_dev *kfd)
632{
633 int err = 0;
Yong Zhaob8935a72017-09-20 18:10:13 -0400634
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500635 err = kfd_iommu_resume(kfd);
636 if (err) {
637 dev_err(kfd_device,
638 "Failed to resume IOMMU for device %x:%x\n",
639 kfd->pdev->vendor, kfd->pdev->device);
640 return err;
641 }
Yong Zhao733fa1f2017-09-20 18:10:14 -0400642
Yong Zhaob8935a72017-09-20 18:10:13 -0400643 err = kfd->dqm->ops.start(kfd->dqm);
644 if (err) {
645 dev_err(kfd_device,
646 "Error starting queue manager for device %x:%x\n",
647 kfd->pdev->vendor, kfd->pdev->device);
648 goto dqm_start_error;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300649 }
650
Yong Zhaob8935a72017-09-20 18:10:13 -0400651 return err;
652
653dqm_start_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500654 kfd_iommu_suspend(kfd);
Yong Zhaob8935a72017-09-20 18:10:13 -0400655 return err;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300656}
657
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300658/* This is called directly from KGD at ISR. */
659void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300660{
Lan Xiao58e69882018-07-11 22:32:51 -0400661 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
662 bool is_patched = false;
663
Andrew Lewycky2249d552014-07-17 01:37:30 +0300664 if (!kfd->init_complete)
665 return;
666
Lan Xiao58e69882018-07-11 22:32:51 -0400667 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
668 dev_err_once(kfd_device, "Ring entry too small\n");
669 return;
670 }
671
Andrew Lewycky2249d552014-07-17 01:37:30 +0300672 spin_lock(&kfd->interrupt_lock);
673
674 if (kfd->interrupts_active
Lan Xiao58e69882018-07-11 22:32:51 -0400675 && interrupt_is_wanted(kfd, ih_ring_entry,
676 patched_ihre, &is_patched)
677 && enqueue_ih_ring_entry(kfd,
678 is_patched ? patched_ihre : ih_ring_entry))
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400679 queue_work(kfd->ih_wq, &kfd->interrupt_work);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300680
681 spin_unlock(&kfd->interrupt_lock);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300682}
Oded Gabbay6e810902014-10-27 14:36:07 +0200683
Felix Kuehling6b95e792018-03-23 15:32:32 -0400684int kgd2kfd_quiesce_mm(struct mm_struct *mm)
685{
686 struct kfd_process *p;
687 int r;
688
689 /* Because we are called from arbitrary context (workqueue) as opposed
690 * to process context, kfd_process could attempt to exit while we are
691 * running so the lookup function increments the process ref count.
692 */
693 p = kfd_lookup_process_by_mm(mm);
694 if (!p)
695 return -ESRCH;
696
697 r = kfd_process_evict_queues(p);
698
699 kfd_unref_process(p);
700 return r;
701}
702
703int kgd2kfd_resume_mm(struct mm_struct *mm)
704{
705 struct kfd_process *p;
706 int r;
707
708 /* Because we are called from arbitrary context (workqueue) as opposed
709 * to process context, kfd_process could attempt to exit while we are
710 * running so the lookup function increments the process ref count.
711 */
712 p = kfd_lookup_process_by_mm(mm);
713 if (!p)
714 return -ESRCH;
715
716 r = kfd_process_restore_queues(p);
717
718 kfd_unref_process(p);
719 return r;
720}
721
Felix Kuehling26103432018-02-06 20:32:45 -0500722/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
723 * prepare for safe eviction of KFD BOs that belong to the specified
724 * process.
725 *
726 * @mm: mm_struct that identifies the specified KFD process
727 * @fence: eviction fence attached to KFD process BOs
728 *
729 */
730int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
731 struct dma_fence *fence)
732{
733 struct kfd_process *p;
734 unsigned long active_time;
735 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
736
737 if (!fence)
738 return -EINVAL;
739
740 if (dma_fence_is_signaled(fence))
741 return 0;
742
743 p = kfd_lookup_process_by_mm(mm);
744 if (!p)
745 return -ENODEV;
746
747 if (fence->seqno == p->last_eviction_seqno)
748 goto out;
749
750 p->last_eviction_seqno = fence->seqno;
751
752 /* Avoid KFD process starvation. Wait for at least
753 * PROCESS_ACTIVE_TIME_MS before evicting the process again
754 */
755 active_time = get_jiffies_64() - p->last_restore_timestamp;
756 if (delay_jiffies > active_time)
757 delay_jiffies -= active_time;
758 else
759 delay_jiffies = 0;
760
761 /* During process initialization eviction_work.dwork is initialized
762 * to kfd_evict_bo_worker
763 */
764 schedule_delayed_work(&p->eviction_work, delay_jiffies);
765out:
766 kfd_unref_process(p);
767 return 0;
768}
769
Oded Gabbay6e810902014-10-27 14:36:07 +0200770static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
771 unsigned int chunk_size)
772{
Felix Kuehling8625ff92017-08-15 23:00:11 -0400773 unsigned int num_of_longs;
Oded Gabbay6e810902014-10-27 14:36:07 +0200774
Felix Kuehling32fa8212017-08-15 23:00:12 -0400775 if (WARN_ON(buf_size < chunk_size))
776 return -EINVAL;
777 if (WARN_ON(buf_size == 0))
778 return -EINVAL;
779 if (WARN_ON(chunk_size == 0))
780 return -EINVAL;
Oded Gabbay6e810902014-10-27 14:36:07 +0200781
782 kfd->gtt_sa_chunk_size = chunk_size;
783 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
784
Felix Kuehling8625ff92017-08-15 23:00:11 -0400785 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
786 BITS_PER_LONG;
Oded Gabbay6e810902014-10-27 14:36:07 +0200787
Felix Kuehling8625ff92017-08-15 23:00:11 -0400788 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
Oded Gabbay6e810902014-10-27 14:36:07 +0200789
790 if (!kfd->gtt_sa_bitmap)
791 return -ENOMEM;
792
Kent Russell79775b62017-08-15 23:00:05 -0400793 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200794 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
795
796 mutex_init(&kfd->gtt_sa_lock);
797
798 return 0;
799
800}
801
802static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
803{
804 mutex_destroy(&kfd->gtt_sa_lock);
805 kfree(kfd->gtt_sa_bitmap);
806}
807
808static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
809 unsigned int bit_num,
810 unsigned int chunk_size)
811{
812 return start_addr + bit_num * chunk_size;
813}
814
815static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
816 unsigned int bit_num,
817 unsigned int chunk_size)
818{
819 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
820}
821
822int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
823 struct kfd_mem_obj **mem_obj)
824{
825 unsigned int found, start_search, cur_size;
826
Oded Gabbay6e810902014-10-27 14:36:07 +0200827 if (size == 0)
828 return -EINVAL;
829
830 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
831 return -ENOMEM;
832
Felix Kuehling1cd106e2018-07-11 22:32:45 -0400833 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
834 if (!(*mem_obj))
Oded Gabbay6e810902014-10-27 14:36:07 +0200835 return -ENOMEM;
836
Kent Russell79775b62017-08-15 23:00:05 -0400837 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
Oded Gabbay6e810902014-10-27 14:36:07 +0200838
839 start_search = 0;
840
841 mutex_lock(&kfd->gtt_sa_lock);
842
843kfd_gtt_restart_search:
844 /* Find the first chunk that is free */
845 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
846 kfd->gtt_sa_num_of_chunks,
847 start_search);
848
Kent Russell79775b62017-08-15 23:00:05 -0400849 pr_debug("Found = %d\n", found);
Oded Gabbay6e810902014-10-27 14:36:07 +0200850
851 /* If there wasn't any free chunk, bail out */
852 if (found == kfd->gtt_sa_num_of_chunks)
853 goto kfd_gtt_no_free_chunk;
854
855 /* Update fields of mem_obj */
856 (*mem_obj)->range_start = found;
857 (*mem_obj)->range_end = found;
858 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
859 kfd->gtt_start_gpu_addr,
860 found,
861 kfd->gtt_sa_chunk_size);
862 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
863 kfd->gtt_start_cpu_ptr,
864 found,
865 kfd->gtt_sa_chunk_size);
866
Kent Russell79775b62017-08-15 23:00:05 -0400867 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200868 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
869
870 /* If we need only one chunk, mark it as allocated and get out */
871 if (size <= kfd->gtt_sa_chunk_size) {
Kent Russell79775b62017-08-15 23:00:05 -0400872 pr_debug("Single bit\n");
Oded Gabbay6e810902014-10-27 14:36:07 +0200873 set_bit(found, kfd->gtt_sa_bitmap);
874 goto kfd_gtt_out;
875 }
876
877 /* Otherwise, try to see if we have enough contiguous chunks */
878 cur_size = size - kfd->gtt_sa_chunk_size;
879 do {
880 (*mem_obj)->range_end =
881 find_next_zero_bit(kfd->gtt_sa_bitmap,
882 kfd->gtt_sa_num_of_chunks, ++found);
883 /*
884 * If next free chunk is not contiguous than we need to
885 * restart our search from the last free chunk we found (which
886 * wasn't contiguous to the previous ones
887 */
888 if ((*mem_obj)->range_end != found) {
889 start_search = found;
890 goto kfd_gtt_restart_search;
891 }
892
893 /*
894 * If we reached end of buffer, bail out with error
895 */
896 if (found == kfd->gtt_sa_num_of_chunks)
897 goto kfd_gtt_no_free_chunk;
898
899 /* Check if we don't need another chunk */
900 if (cur_size <= kfd->gtt_sa_chunk_size)
901 cur_size = 0;
902 else
903 cur_size -= kfd->gtt_sa_chunk_size;
904
905 } while (cur_size > 0);
906
Kent Russell79775b62017-08-15 23:00:05 -0400907 pr_debug("range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200908 (*mem_obj)->range_start, (*mem_obj)->range_end);
909
910 /* Mark the chunks as allocated */
911 for (found = (*mem_obj)->range_start;
912 found <= (*mem_obj)->range_end;
913 found++)
914 set_bit(found, kfd->gtt_sa_bitmap);
915
916kfd_gtt_out:
917 mutex_unlock(&kfd->gtt_sa_lock);
918 return 0;
919
920kfd_gtt_no_free_chunk:
Kent Russell79775b62017-08-15 23:00:05 -0400921 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
Oded Gabbay6e810902014-10-27 14:36:07 +0200922 mutex_unlock(&kfd->gtt_sa_lock);
923 kfree(mem_obj);
924 return -ENOMEM;
925}
926
927int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
928{
929 unsigned int bit;
930
Oded Gabbay9216ed22015-01-12 22:34:21 +0200931 /* Act like kfree when trying to free a NULL object */
932 if (!mem_obj)
933 return 0;
Oded Gabbay6e810902014-10-27 14:36:07 +0200934
Kent Russell79775b62017-08-15 23:00:05 -0400935 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200936 mem_obj, mem_obj->range_start, mem_obj->range_end);
937
938 mutex_lock(&kfd->gtt_sa_lock);
939
940 /* Mark the chunks as free */
941 for (bit = mem_obj->range_start;
942 bit <= mem_obj->range_end;
943 bit++)
944 clear_bit(bit, kfd->gtt_sa_bitmap);
945
946 mutex_unlock(&kfd->gtt_sa_lock);
947
948 kfree(mem_obj);
949 return 0;
950}
Shaoyun Liua29ec472018-07-11 22:33:04 -0400951
952#if defined(CONFIG_DEBUG_FS)
953
954/* This function will send a package to HIQ to hang the HWS
955 * which will trigger a GPU reset and bring the HWS back to normal state
956 */
957int kfd_debugfs_hang_hws(struct kfd_dev *dev)
958{
959 int r = 0;
960
961 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
962 pr_err("HWS is not enabled");
963 return -EINVAL;
964 }
965
966 r = pm_debugfs_hang_hws(&dev->dqm->packets);
967 if (!r)
968 r = dqm_debugfs_execute_queues(dev->dqm);
969
970 return r;
971}
972
973#endif