blob: b4d9e6b4f5833d5719a9432c42d798e973c77fa4 [file] [log] [blame]
Oded Gabbay4a488a72014-07-16 21:08:55 +03001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
Oded Gabbay4a488a72014-07-16 21:08:55 +030023#include <linux/bsearch.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "kfd_priv.h"
Ben Goz64c7f8c2014-07-17 01:27:00 +030027#include "kfd_device_queue_manager.h"
Felix Kuehling507968d2017-08-15 23:00:15 -040028#include "kfd_pm4_headers_vi.h"
Yong Zhao0db54b22018-05-01 17:56:06 -040029#include "cwsr_trap_handler.h"
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050030#include "kfd_iommu.h"
Oded Gabbay4a488a72014-07-16 21:08:55 +030031
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030032#define MQD_SIZE_ALIGNED 768
Shaoyun Liue42051d2018-07-11 22:32:56 -040033
34/*
35 * kfd_locked is used to lock the kfd driver during suspend or reset
36 * once locked, kfd driver will stop any further GPU execution.
37 * create process (open) will return -EAGAIN.
38 */
39static atomic_t kfd_locked = ATOMIC_INIT(0);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +030040
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050041#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +030042static const struct kfd_device_info kaveri_device_info = {
Ben Goz0da75582015-01-01 17:10:01 +020043 .asic_family = CHIP_KAVERI,
44 .max_pasid_bits = 16,
Yair Shachar992839a2015-05-20 13:43:04 +030045 /* max num of queues for KV.TODO should be a dynamic value */
46 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040047 .doorbell_size = 4,
Ben Goz0da75582015-01-01 17:10:01 +020048 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Andrew Lewyckyf3a39812015-05-10 12:15:46 +030049 .event_interrupt_class = &event_interrupt_class_cik,
Yair Shacharfbeb6612015-05-20 13:48:26 +030050 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050051 .mqd_size_aligned = MQD_SIZE_ALIGNED,
52 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050053 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050054 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -040055 .num_sdma_engines = 2,
Ben Goz0da75582015-01-01 17:10:01 +020056};
57
58static const struct kfd_device_info carrizo_device_info = {
59 .asic_family = CHIP_CARRIZO,
Oded Gabbay4a488a72014-07-16 21:08:55 +030060 .max_pasid_bits = 16,
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030061 /* max num of queues for CZ.TODO should be a dynamic value */
62 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040063 .doorbell_size = 4,
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +030064 .ih_ring_entry_size = 4 * sizeof(uint32_t),
Oded Gabbayeaccd6e2015-06-06 21:45:43 +030065 .event_interrupt_class = &event_interrupt_class_cik,
Alexey Skidanovf7c826a2014-10-13 16:35:12 +030066 .num_of_watch_points = 4,
Felix Kuehling373d7082017-11-14 16:41:19 -050067 .mqd_size_aligned = MQD_SIZE_ALIGNED,
68 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050069 .needs_iommu_device = true,
Felix Kuehling3ee2d002018-01-04 17:17:41 -050070 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -040071 .num_sdma_engines = 2,
Oded Gabbay4a488a72014-07-16 21:08:55 +030072};
Yong Zhao4d663df2018-07-13 16:17:48 -040073
74static const struct kfd_device_info raven_device_info = {
75 .asic_family = CHIP_RAVEN,
76 .max_pasid_bits = 16,
77 .max_no_of_hqd = 24,
78 .doorbell_size = 8,
79 .ih_ring_entry_size = 8 * sizeof(uint32_t),
80 .event_interrupt_class = &event_interrupt_class_v9,
81 .num_of_watch_points = 4,
82 .mqd_size_aligned = MQD_SIZE_ALIGNED,
83 .supports_cwsr = true,
84 .needs_iommu_device = true,
85 .needs_pci_atomics = true,
86 .num_sdma_engines = 1,
87};
Felix Kuehling64d1c3a2017-12-08 19:22:12 -050088#endif
Oded Gabbay4a488a72014-07-16 21:08:55 +030089
Felix Kuehlinga3084e62018-01-04 17:17:47 -050090static const struct kfd_device_info hawaii_device_info = {
91 .asic_family = CHIP_HAWAII,
92 .max_pasid_bits = 16,
93 /* max num of queues for KV.TODO should be a dynamic value */
94 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -040095 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -050096 .ih_ring_entry_size = 4 * sizeof(uint32_t),
97 .event_interrupt_class = &event_interrupt_class_cik,
98 .num_of_watch_points = 4,
99 .mqd_size_aligned = MQD_SIZE_ALIGNED,
100 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500101 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500102 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400103 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500104};
105
106static const struct kfd_device_info tonga_device_info = {
107 .asic_family = CHIP_TONGA,
108 .max_pasid_bits = 16,
109 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400110 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500111 .ih_ring_entry_size = 4 * sizeof(uint32_t),
112 .event_interrupt_class = &event_interrupt_class_cik,
113 .num_of_watch_points = 4,
114 .mqd_size_aligned = MQD_SIZE_ALIGNED,
115 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500116 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500117 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400118 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500119};
120
121static const struct kfd_device_info tonga_vf_device_info = {
122 .asic_family = CHIP_TONGA,
123 .max_pasid_bits = 16,
124 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400125 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500126 .ih_ring_entry_size = 4 * sizeof(uint32_t),
127 .event_interrupt_class = &event_interrupt_class_cik,
128 .num_of_watch_points = 4,
129 .mqd_size_aligned = MQD_SIZE_ALIGNED,
130 .supports_cwsr = false,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500131 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500132 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400133 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500134};
135
136static const struct kfd_device_info fiji_device_info = {
137 .asic_family = CHIP_FIJI,
138 .max_pasid_bits = 16,
139 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400140 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500141 .ih_ring_entry_size = 4 * sizeof(uint32_t),
142 .event_interrupt_class = &event_interrupt_class_cik,
143 .num_of_watch_points = 4,
144 .mqd_size_aligned = MQD_SIZE_ALIGNED,
145 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500146 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500147 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400148 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500149};
150
151static const struct kfd_device_info fiji_vf_device_info = {
152 .asic_family = CHIP_FIJI,
153 .max_pasid_bits = 16,
154 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400155 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500156 .ih_ring_entry_size = 4 * sizeof(uint32_t),
157 .event_interrupt_class = &event_interrupt_class_cik,
158 .num_of_watch_points = 4,
159 .mqd_size_aligned = MQD_SIZE_ALIGNED,
160 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500161 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500162 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400163 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500164};
165
166
167static const struct kfd_device_info polaris10_device_info = {
168 .asic_family = CHIP_POLARIS10,
169 .max_pasid_bits = 16,
170 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400171 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500172 .ih_ring_entry_size = 4 * sizeof(uint32_t),
173 .event_interrupt_class = &event_interrupt_class_cik,
174 .num_of_watch_points = 4,
175 .mqd_size_aligned = MQD_SIZE_ALIGNED,
176 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500177 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500178 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400179 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500180};
181
182static const struct kfd_device_info polaris10_vf_device_info = {
183 .asic_family = CHIP_POLARIS10,
184 .max_pasid_bits = 16,
185 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400186 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500187 .ih_ring_entry_size = 4 * sizeof(uint32_t),
188 .event_interrupt_class = &event_interrupt_class_cik,
189 .num_of_watch_points = 4,
190 .mqd_size_aligned = MQD_SIZE_ALIGNED,
191 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500192 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500193 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400194 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500195};
196
197static const struct kfd_device_info polaris11_device_info = {
198 .asic_family = CHIP_POLARIS11,
199 .max_pasid_bits = 16,
200 .max_no_of_hqd = 24,
Felix Kuehlingada2b292018-04-10 17:33:03 -0400201 .doorbell_size = 4,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500202 .ih_ring_entry_size = 4 * sizeof(uint32_t),
203 .event_interrupt_class = &event_interrupt_class_cik,
204 .num_of_watch_points = 4,
205 .mqd_size_aligned = MQD_SIZE_ALIGNED,
206 .supports_cwsr = true,
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500207 .needs_iommu_device = false,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500208 .needs_pci_atomics = true,
Yong Zhao98bb9222018-07-13 16:17:44 -0400209 .num_sdma_engines = 2,
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500210};
211
Felix Kuehling389056e2018-04-10 17:33:18 -0400212static const struct kfd_device_info vega10_device_info = {
213 .asic_family = CHIP_VEGA10,
214 .max_pasid_bits = 16,
215 .max_no_of_hqd = 24,
216 .doorbell_size = 8,
217 .ih_ring_entry_size = 8 * sizeof(uint32_t),
218 .event_interrupt_class = &event_interrupt_class_v9,
219 .num_of_watch_points = 4,
220 .mqd_size_aligned = MQD_SIZE_ALIGNED,
221 .supports_cwsr = true,
222 .needs_iommu_device = false,
223 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400224 .num_sdma_engines = 2,
Felix Kuehling389056e2018-04-10 17:33:18 -0400225};
226
227static const struct kfd_device_info vega10_vf_device_info = {
228 .asic_family = CHIP_VEGA10,
229 .max_pasid_bits = 16,
230 .max_no_of_hqd = 24,
231 .doorbell_size = 8,
232 .ih_ring_entry_size = 8 * sizeof(uint32_t),
233 .event_interrupt_class = &event_interrupt_class_v9,
234 .num_of_watch_points = 4,
235 .mqd_size_aligned = MQD_SIZE_ALIGNED,
236 .supports_cwsr = true,
237 .needs_iommu_device = false,
238 .needs_pci_atomics = false,
Yong Zhao98bb9222018-07-13 16:17:44 -0400239 .num_sdma_engines = 2,
Felix Kuehling389056e2018-04-10 17:33:18 -0400240};
241
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500242
Oded Gabbay4a488a72014-07-16 21:08:55 +0300243struct kfd_deviceid {
244 unsigned short did;
245 const struct kfd_device_info *device_info;
246};
247
Oded Gabbay4a488a72014-07-16 21:08:55 +0300248static const struct kfd_deviceid supported_devices[] = {
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500249#ifdef KFD_SUPPORT_IOMMU_V2
Oded Gabbay4a488a72014-07-16 21:08:55 +0300250 { 0x1304, &kaveri_device_info }, /* Kaveri */
251 { 0x1305, &kaveri_device_info }, /* Kaveri */
252 { 0x1306, &kaveri_device_info }, /* Kaveri */
253 { 0x1307, &kaveri_device_info }, /* Kaveri */
254 { 0x1309, &kaveri_device_info }, /* Kaveri */
255 { 0x130A, &kaveri_device_info }, /* Kaveri */
256 { 0x130B, &kaveri_device_info }, /* Kaveri */
257 { 0x130C, &kaveri_device_info }, /* Kaveri */
258 { 0x130D, &kaveri_device_info }, /* Kaveri */
259 { 0x130E, &kaveri_device_info }, /* Kaveri */
260 { 0x130F, &kaveri_device_info }, /* Kaveri */
261 { 0x1310, &kaveri_device_info }, /* Kaveri */
262 { 0x1311, &kaveri_device_info }, /* Kaveri */
263 { 0x1312, &kaveri_device_info }, /* Kaveri */
264 { 0x1313, &kaveri_device_info }, /* Kaveri */
265 { 0x1315, &kaveri_device_info }, /* Kaveri */
266 { 0x1316, &kaveri_device_info }, /* Kaveri */
267 { 0x1317, &kaveri_device_info }, /* Kaveri */
268 { 0x1318, &kaveri_device_info }, /* Kaveri */
269 { 0x131B, &kaveri_device_info }, /* Kaveri */
270 { 0x131C, &kaveri_device_info }, /* Kaveri */
Ben Goz123576d2015-01-12 14:37:24 +0200271 { 0x131D, &kaveri_device_info }, /* Kaveri */
272 { 0x9870, &carrizo_device_info }, /* Carrizo */
273 { 0x9874, &carrizo_device_info }, /* Carrizo */
274 { 0x9875, &carrizo_device_info }, /* Carrizo */
275 { 0x9876, &carrizo_device_info }, /* Carrizo */
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500276 { 0x9877, &carrizo_device_info }, /* Carrizo */
Yong Zhao4d663df2018-07-13 16:17:48 -0400277 { 0x15DD, &raven_device_info }, /* Raven */
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500278#endif
Felix Kuehlinga3084e62018-01-04 17:17:47 -0500279 { 0x67A0, &hawaii_device_info }, /* Hawaii */
280 { 0x67A1, &hawaii_device_info }, /* Hawaii */
281 { 0x67A2, &hawaii_device_info }, /* Hawaii */
282 { 0x67A8, &hawaii_device_info }, /* Hawaii */
283 { 0x67A9, &hawaii_device_info }, /* Hawaii */
284 { 0x67AA, &hawaii_device_info }, /* Hawaii */
285 { 0x67B0, &hawaii_device_info }, /* Hawaii */
286 { 0x67B1, &hawaii_device_info }, /* Hawaii */
287 { 0x67B8, &hawaii_device_info }, /* Hawaii */
288 { 0x67B9, &hawaii_device_info }, /* Hawaii */
289 { 0x67BA, &hawaii_device_info }, /* Hawaii */
290 { 0x67BE, &hawaii_device_info }, /* Hawaii */
291 { 0x6920, &tonga_device_info }, /* Tonga */
292 { 0x6921, &tonga_device_info }, /* Tonga */
293 { 0x6928, &tonga_device_info }, /* Tonga */
294 { 0x6929, &tonga_device_info }, /* Tonga */
295 { 0x692B, &tonga_device_info }, /* Tonga */
296 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
297 { 0x6938, &tonga_device_info }, /* Tonga */
298 { 0x6939, &tonga_device_info }, /* Tonga */
299 { 0x7300, &fiji_device_info }, /* Fiji */
300 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
301 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
302 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
303 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
304 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
305 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
306 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
307 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
308 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
309 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
310 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
311 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
312 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
313 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
314 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
315 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
316 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
317 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
318 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
319 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
320 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
321 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
Felix Kuehling389056e2018-04-10 17:33:18 -0400322 { 0x6860, &vega10_device_info }, /* Vega10 */
323 { 0x6861, &vega10_device_info }, /* Vega10 */
324 { 0x6862, &vega10_device_info }, /* Vega10 */
325 { 0x6863, &vega10_device_info }, /* Vega10 */
326 { 0x6864, &vega10_device_info }, /* Vega10 */
327 { 0x6867, &vega10_device_info }, /* Vega10 */
328 { 0x6868, &vega10_device_info }, /* Vega10 */
329 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
330 { 0x687F, &vega10_device_info }, /* Vega10 */
Oded Gabbay4a488a72014-07-16 21:08:55 +0300331};
332
Oded Gabbay6e810902014-10-27 14:36:07 +0200333static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
334 unsigned int chunk_size);
335static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
336
Yong Zhaob8935a72017-09-20 18:10:13 -0400337static int kfd_resume(struct kfd_dev *kfd);
338
Oded Gabbay4a488a72014-07-16 21:08:55 +0300339static const struct kfd_device_info *lookup_device_info(unsigned short did)
340{
341 size_t i;
342
343 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
344 if (supported_devices[i].did == did) {
Felix Kuehling32fa8212017-08-15 23:00:12 -0400345 WARN_ON(!supported_devices[i].device_info);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300346 return supported_devices[i].device_info;
347 }
348 }
349
Yong Zhao4ebc7182017-08-15 23:00:13 -0400350 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
351 did);
352
Oded Gabbay4a488a72014-07-16 21:08:55 +0300353 return NULL;
354}
355
Xihan Zhangcea405b2015-03-17 19:32:53 +0800356struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
357 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300358{
359 struct kfd_dev *kfd;
welu6106dce2018-04-10 17:33:17 -0400360 int ret;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300361 const struct kfd_device_info *device_info =
362 lookup_device_info(pdev->device);
363
Yong Zhao4ebc7182017-08-15 23:00:13 -0400364 if (!device_info) {
365 dev_err(kfd_device, "kgd2kfd_probe failed\n");
Oded Gabbay4a488a72014-07-16 21:08:55 +0300366 return NULL;
Yong Zhao4ebc7182017-08-15 23:00:13 -0400367 }
Oded Gabbay4a488a72014-07-16 21:08:55 +0300368
welu6106dce2018-04-10 17:33:17 -0400369 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
370 * 32 and 64-bit requests are possible and must be
371 * supported.
372 */
373 ret = pci_enable_atomic_ops_to_root(pdev,
374 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
375 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
376 if (device_info->needs_pci_atomics && ret < 0) {
377 dev_info(kfd_device,
378 "skipped device %x:%x, PCI rejects atomics\n",
379 pdev->vendor, pdev->device);
380 return NULL;
Felix Kuehling3ee2d002018-01-04 17:17:41 -0500381 }
382
Oded Gabbay4a488a72014-07-16 21:08:55 +0300383 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
384 if (!kfd)
385 return NULL;
386
387 kfd->kgd = kgd;
388 kfd->device_info = device_info;
389 kfd->pdev = pdev;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300390 kfd->init_complete = false;
Xihan Zhangcea405b2015-03-17 19:32:53 +0800391 kfd->kfd2kgd = f2g;
392
393 mutex_init(&kfd->doorbell_mutex);
394 memset(&kfd->doorbell_available_index, 0,
395 sizeof(kfd->doorbell_available_index));
Oded Gabbay4a488a72014-07-16 21:08:55 +0300396
397 return kfd;
398}
399
Felix Kuehling373d7082017-11-14 16:41:19 -0500400static void kfd_cwsr_init(struct kfd_dev *kfd)
401{
402 if (cwsr_enable && kfd->device_info->supports_cwsr) {
Felix Kuehling3e76c232018-04-10 17:33:16 -0400403 if (kfd->device_info->asic_family < CHIP_VEGA10) {
404 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
405 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
406 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
407 } else {
408 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
409 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
410 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
411 }
Felix Kuehling373d7082017-11-14 16:41:19 -0500412
Felix Kuehling373d7082017-11-14 16:41:19 -0500413 kfd->cwsr_enabled = true;
414 }
415}
416
Oded Gabbay4a488a72014-07-16 21:08:55 +0300417bool kgd2kfd_device_init(struct kfd_dev *kfd,
418 const struct kgd2kfd_shared_resources *gpu_resources)
419{
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300420 unsigned int size;
421
Oded Gabbay4a488a72014-07-16 21:08:55 +0300422 kfd->shared_resources = *gpu_resources;
423
Yong Zhao44008d72017-09-20 18:10:18 -0400424 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
425 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
426 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
427 - kfd->vm_info.first_vmid_kfd + 1;
428
Felix Kuehlinga99c6d42017-11-27 18:29:45 -0500429 /* Verify module parameters regarding mapped process number*/
430 if ((hws_max_conc_proc < 0)
431 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
432 dev_err(kfd_device,
433 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
434 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
435 kfd->vm_info.vmid_num_kfd);
436 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
437 } else
438 kfd->max_proc_per_quantum = hws_max_conc_proc;
439
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300440 /* calculate max size of mqds needed for queues */
Oded Gabbayb8cbab02015-01-18 13:18:01 +0200441 size = max_num_of_queues_per_device *
442 kfd->device_info->mqd_size_aligned;
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300443
Oded Gabbaye18e7942014-10-26 10:12:22 +0200444 /*
445 * calculate max size of runlist packet.
446 * There can be only 2 packets at once
447 */
Felix Kuehling507968d2017-08-15 23:00:15 -0400448 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
449 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
450 + sizeof(struct pm4_mes_runlist)) * 2;
Oded Gabbaye18e7942014-10-26 10:12:22 +0200451
452 /* Add size of HIQ & DIQ */
453 size += KFD_KERNEL_QUEUE_SIZE * 2;
454
455 /* add another 512KB for all other allocations on gart (HPD, fences) */
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300456 size += 512 * 1024;
457
Xihan Zhangcea405b2015-03-17 19:32:53 +0800458 if (kfd->kfd2kgd->init_gtt_mem_allocation(
459 kfd->kgd, size, &kfd->gtt_mem,
460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
Kent Russell79775b62017-08-15 23:00:05 -0400461 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300462 goto out;
463 }
464
Kent Russell79775b62017-08-15 23:00:05 -0400465 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
Oded Gabbaye18e7942014-10-26 10:12:22 +0200466
Oded Gabbay73a1da02014-10-26 09:53:37 +0200467 /* Initialize GTT sa with 512 byte chunk size */
468 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
Kent Russell79775b62017-08-15 23:00:05 -0400469 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
Oded Gabbay73a1da02014-10-26 09:53:37 +0200470 goto kfd_gtt_sa_init_error;
471 }
472
Felix Kuehling735df2b2017-08-15 23:00:10 -0400473 if (kfd_doorbell_init(kfd)) {
474 dev_err(kfd_device,
475 "Error initializing doorbell aperture\n");
476 goto kfd_doorbell_error;
477 }
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300478
Shaoyun Liu0c1690e2018-07-06 11:32:42 -0400479 if (kfd->kfd2kgd->get_hive_id)
480 kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
481
Kent Russell4eacc26b2017-08-15 23:00:06 -0400482 if (kfd_topology_add_device(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400483 dev_err(kfd_device, "Error adding device to topology\n");
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300484 goto kfd_topology_add_device_error;
485 }
486
Andrew Lewycky2249d552014-07-17 01:37:30 +0300487 if (kfd_interrupt_init(kfd)) {
Kent Russell79775b62017-08-15 23:00:05 -0400488 dev_err(kfd_device, "Error initializing interrupts\n");
Andrew Lewycky2249d552014-07-17 01:37:30 +0300489 goto kfd_interrupt_error;
490 }
491
Ben Goz64c7f8c2014-07-17 01:27:00 +0300492 kfd->dqm = device_queue_manager_init(kfd);
493 if (!kfd->dqm) {
Kent Russell79775b62017-08-15 23:00:05 -0400494 dev_err(kfd_device, "Error initializing queue manager\n");
Ben Goz64c7f8c2014-07-17 01:27:00 +0300495 goto device_queue_manager_error;
496 }
497
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500498 if (kfd_iommu_device_init(kfd)) {
499 dev_err(kfd_device, "Error initializing iommuv2\n");
500 goto device_iommu_error;
Ben Goz64c7f8c2014-07-17 01:27:00 +0300501 }
502
Felix Kuehling373d7082017-11-14 16:41:19 -0500503 kfd_cwsr_init(kfd);
504
Yong Zhaob8935a72017-09-20 18:10:13 -0400505 if (kfd_resume(kfd))
506 goto kfd_resume_error;
507
Yair Shacharfbeb6612015-05-20 13:48:26 +0300508 kfd->dbgmgr = NULL;
509
Oded Gabbay4a488a72014-07-16 21:08:55 +0300510 kfd->init_complete = true;
Kent Russell79775b62017-08-15 23:00:05 -0400511 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
Oded Gabbay4a488a72014-07-16 21:08:55 +0300512 kfd->pdev->device);
513
Kent Russell79775b62017-08-15 23:00:05 -0400514 pr_debug("Starting kfd with the following scheduling policy %d\n",
Felix Kuehlingd146c5a2018-01-04 17:17:43 -0500515 kfd->dqm->sched_policy);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300516
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300517 goto out;
518
Yong Zhaob8935a72017-09-20 18:10:13 -0400519kfd_resume_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500520device_iommu_error:
Ben Goz64c7f8c2014-07-17 01:27:00 +0300521 device_queue_manager_uninit(kfd->dqm);
522device_queue_manager_error:
Andrew Lewycky2249d552014-07-17 01:37:30 +0300523 kfd_interrupt_exit(kfd);
524kfd_interrupt_error:
Oded Gabbayb17f0682014-07-17 00:06:27 +0300525 kfd_topology_remove_device(kfd);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300526kfd_topology_add_device_error:
Felix Kuehling735df2b2017-08-15 23:00:10 -0400527 kfd_doorbell_fini(kfd);
528kfd_doorbell_error:
Oded Gabbay73a1da02014-10-26 09:53:37 +0200529 kfd_gtt_sa_fini(kfd);
530kfd_gtt_sa_init_error:
Xihan Zhangcea405b2015-03-17 19:32:53 +0800531 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300532 dev_err(kfd_device,
Kent Russell79775b62017-08-15 23:00:05 -0400533 "device %x:%x NOT added due to errors\n",
Oded Gabbay19f6d2a2014-07-16 23:25:31 +0300534 kfd->pdev->vendor, kfd->pdev->device);
535out:
536 return kfd->init_complete;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300537}
538
539void kgd2kfd_device_exit(struct kfd_dev *kfd)
540{
Oded Gabbayb17f0682014-07-17 00:06:27 +0300541 if (kfd->init_complete) {
Yong Zhaob8935a72017-09-20 18:10:13 -0400542 kgd2kfd_suspend(kfd);
Ben Goz64c7f8c2014-07-17 01:27:00 +0300543 device_queue_manager_uninit(kfd->dqm);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300544 kfd_interrupt_exit(kfd);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300545 kfd_topology_remove_device(kfd);
Felix Kuehling735df2b2017-08-15 23:00:10 -0400546 kfd_doorbell_fini(kfd);
Oded Gabbay73a1da02014-10-26 09:53:37 +0200547 kfd_gtt_sa_fini(kfd);
Xihan Zhangcea405b2015-03-17 19:32:53 +0800548 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
Oded Gabbayb17f0682014-07-17 00:06:27 +0300549 }
Evgeny Pinchuk5b5c4e42014-07-16 21:22:32 +0300550
Oded Gabbay4a488a72014-07-16 21:08:55 +0300551 kfree(kfd);
552}
553
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400554int kgd2kfd_pre_reset(struct kfd_dev *kfd)
555{
Shaoyun Liue42051d2018-07-11 22:32:56 -0400556 if (!kfd->init_complete)
557 return 0;
558 kgd2kfd_suspend(kfd);
559
560 /* hold dqm->lock to prevent further execution*/
561 dqm_lock(kfd->dqm);
562
563 kfd_signal_reset_event(kfd);
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400564 return 0;
565}
566
Shaoyun Liue42051d2018-07-11 22:32:56 -0400567/*
568 * Fix me. KFD won't be able to resume existing process for now.
569 * We will keep all existing process in a evicted state and
570 * wait the process to be terminated.
571 */
572
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400573int kgd2kfd_post_reset(struct kfd_dev *kfd)
574{
Shaoyun Liue42051d2018-07-11 22:32:56 -0400575 int ret, count;
576
577 if (!kfd->init_complete)
578 return 0;
579
580 dqm_unlock(kfd->dqm);
581
582 ret = kfd_resume(kfd);
583 if (ret)
584 return ret;
585 count = atomic_dec_return(&kfd_locked);
586 WARN_ONCE(count != 0, "KFD reset ref. error");
Shaoyun Liue3b7a962018-07-11 22:32:54 -0400587 return 0;
588}
589
Shaoyun Liue42051d2018-07-11 22:32:56 -0400590bool kfd_is_locked(void)
591{
592 return (atomic_read(&kfd_locked) > 0);
593}
594
Oded Gabbay4a488a72014-07-16 21:08:55 +0300595void kgd2kfd_suspend(struct kfd_dev *kfd)
596{
Yong Zhao733fa1f2017-09-20 18:10:14 -0400597 if (!kfd->init_complete)
598 return;
599
Felix Kuehling26103432018-02-06 20:32:45 -0500600 /* For first KFD device suspend all the KFD processes */
Shaoyun Liue42051d2018-07-11 22:32:56 -0400601 if (atomic_inc_return(&kfd_locked) == 1)
Felix Kuehling26103432018-02-06 20:32:45 -0500602 kfd_suspend_all_processes();
603
Yong Zhao733fa1f2017-09-20 18:10:14 -0400604 kfd->dqm->ops.stop(kfd->dqm);
605
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500606 kfd_iommu_suspend(kfd);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300607}
608
609int kgd2kfd_resume(struct kfd_dev *kfd)
610{
Felix Kuehling26103432018-02-06 20:32:45 -0500611 int ret, count;
612
Yong Zhaob8935a72017-09-20 18:10:13 -0400613 if (!kfd->init_complete)
614 return 0;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300615
Felix Kuehling26103432018-02-06 20:32:45 -0500616 ret = kfd_resume(kfd);
617 if (ret)
618 return ret;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300619
Shaoyun Liue42051d2018-07-11 22:32:56 -0400620 count = atomic_dec_return(&kfd_locked);
Felix Kuehling26103432018-02-06 20:32:45 -0500621 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
622 if (count == 0)
623 ret = kfd_resume_all_processes();
624
625 return ret;
Yong Zhaob8935a72017-09-20 18:10:13 -0400626}
Yong Zhao4ebc7182017-08-15 23:00:13 -0400627
Yong Zhaob8935a72017-09-20 18:10:13 -0400628static int kfd_resume(struct kfd_dev *kfd)
629{
630 int err = 0;
Yong Zhaob8935a72017-09-20 18:10:13 -0400631
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500632 err = kfd_iommu_resume(kfd);
633 if (err) {
634 dev_err(kfd_device,
635 "Failed to resume IOMMU for device %x:%x\n",
636 kfd->pdev->vendor, kfd->pdev->device);
637 return err;
638 }
Yong Zhao733fa1f2017-09-20 18:10:14 -0400639
Yong Zhaob8935a72017-09-20 18:10:13 -0400640 err = kfd->dqm->ops.start(kfd->dqm);
641 if (err) {
642 dev_err(kfd_device,
643 "Error starting queue manager for device %x:%x\n",
644 kfd->pdev->vendor, kfd->pdev->device);
645 goto dqm_start_error;
Oded Gabbayb17f0682014-07-17 00:06:27 +0300646 }
647
Yong Zhaob8935a72017-09-20 18:10:13 -0400648 return err;
649
650dqm_start_error:
Felix Kuehling64d1c3a2017-12-08 19:22:12 -0500651 kfd_iommu_suspend(kfd);
Yong Zhaob8935a72017-09-20 18:10:13 -0400652 return err;
Oded Gabbay4a488a72014-07-16 21:08:55 +0300653}
654
Andrew Lewyckyb3f5e6b2014-07-17 01:37:30 +0300655/* This is called directly from KGD at ISR. */
656void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
Oded Gabbay4a488a72014-07-16 21:08:55 +0300657{
Lan Xiao58e69882018-07-11 22:32:51 -0400658 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
659 bool is_patched = false;
660
Andrew Lewycky2249d552014-07-17 01:37:30 +0300661 if (!kfd->init_complete)
662 return;
663
Lan Xiao58e69882018-07-11 22:32:51 -0400664 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
665 dev_err_once(kfd_device, "Ring entry too small\n");
666 return;
667 }
668
Andrew Lewycky2249d552014-07-17 01:37:30 +0300669 spin_lock(&kfd->interrupt_lock);
670
671 if (kfd->interrupts_active
Lan Xiao58e69882018-07-11 22:32:51 -0400672 && interrupt_is_wanted(kfd, ih_ring_entry,
673 patched_ihre, &is_patched)
674 && enqueue_ih_ring_entry(kfd,
675 is_patched ? patched_ihre : ih_ring_entry))
Andres Rodriguez48e876a2017-10-27 19:35:34 -0400676 queue_work(kfd->ih_wq, &kfd->interrupt_work);
Andrew Lewycky2249d552014-07-17 01:37:30 +0300677
678 spin_unlock(&kfd->interrupt_lock);
Oded Gabbay4a488a72014-07-16 21:08:55 +0300679}
Oded Gabbay6e810902014-10-27 14:36:07 +0200680
Felix Kuehling6b95e792018-03-23 15:32:32 -0400681int kgd2kfd_quiesce_mm(struct mm_struct *mm)
682{
683 struct kfd_process *p;
684 int r;
685
686 /* Because we are called from arbitrary context (workqueue) as opposed
687 * to process context, kfd_process could attempt to exit while we are
688 * running so the lookup function increments the process ref count.
689 */
690 p = kfd_lookup_process_by_mm(mm);
691 if (!p)
692 return -ESRCH;
693
694 r = kfd_process_evict_queues(p);
695
696 kfd_unref_process(p);
697 return r;
698}
699
700int kgd2kfd_resume_mm(struct mm_struct *mm)
701{
702 struct kfd_process *p;
703 int r;
704
705 /* Because we are called from arbitrary context (workqueue) as opposed
706 * to process context, kfd_process could attempt to exit while we are
707 * running so the lookup function increments the process ref count.
708 */
709 p = kfd_lookup_process_by_mm(mm);
710 if (!p)
711 return -ESRCH;
712
713 r = kfd_process_restore_queues(p);
714
715 kfd_unref_process(p);
716 return r;
717}
718
Felix Kuehling26103432018-02-06 20:32:45 -0500719/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
720 * prepare for safe eviction of KFD BOs that belong to the specified
721 * process.
722 *
723 * @mm: mm_struct that identifies the specified KFD process
724 * @fence: eviction fence attached to KFD process BOs
725 *
726 */
727int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
728 struct dma_fence *fence)
729{
730 struct kfd_process *p;
731 unsigned long active_time;
732 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
733
734 if (!fence)
735 return -EINVAL;
736
737 if (dma_fence_is_signaled(fence))
738 return 0;
739
740 p = kfd_lookup_process_by_mm(mm);
741 if (!p)
742 return -ENODEV;
743
744 if (fence->seqno == p->last_eviction_seqno)
745 goto out;
746
747 p->last_eviction_seqno = fence->seqno;
748
749 /* Avoid KFD process starvation. Wait for at least
750 * PROCESS_ACTIVE_TIME_MS before evicting the process again
751 */
752 active_time = get_jiffies_64() - p->last_restore_timestamp;
753 if (delay_jiffies > active_time)
754 delay_jiffies -= active_time;
755 else
756 delay_jiffies = 0;
757
758 /* During process initialization eviction_work.dwork is initialized
759 * to kfd_evict_bo_worker
760 */
761 schedule_delayed_work(&p->eviction_work, delay_jiffies);
762out:
763 kfd_unref_process(p);
764 return 0;
765}
766
Oded Gabbay6e810902014-10-27 14:36:07 +0200767static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
768 unsigned int chunk_size)
769{
Felix Kuehling8625ff92017-08-15 23:00:11 -0400770 unsigned int num_of_longs;
Oded Gabbay6e810902014-10-27 14:36:07 +0200771
Felix Kuehling32fa8212017-08-15 23:00:12 -0400772 if (WARN_ON(buf_size < chunk_size))
773 return -EINVAL;
774 if (WARN_ON(buf_size == 0))
775 return -EINVAL;
776 if (WARN_ON(chunk_size == 0))
777 return -EINVAL;
Oded Gabbay6e810902014-10-27 14:36:07 +0200778
779 kfd->gtt_sa_chunk_size = chunk_size;
780 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
781
Felix Kuehling8625ff92017-08-15 23:00:11 -0400782 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
783 BITS_PER_LONG;
Oded Gabbay6e810902014-10-27 14:36:07 +0200784
Felix Kuehling8625ff92017-08-15 23:00:11 -0400785 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
Oded Gabbay6e810902014-10-27 14:36:07 +0200786
787 if (!kfd->gtt_sa_bitmap)
788 return -ENOMEM;
789
Kent Russell79775b62017-08-15 23:00:05 -0400790 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200791 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
792
793 mutex_init(&kfd->gtt_sa_lock);
794
795 return 0;
796
797}
798
799static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
800{
801 mutex_destroy(&kfd->gtt_sa_lock);
802 kfree(kfd->gtt_sa_bitmap);
803}
804
805static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
806 unsigned int bit_num,
807 unsigned int chunk_size)
808{
809 return start_addr + bit_num * chunk_size;
810}
811
812static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
813 unsigned int bit_num,
814 unsigned int chunk_size)
815{
816 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
817}
818
819int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
820 struct kfd_mem_obj **mem_obj)
821{
822 unsigned int found, start_search, cur_size;
823
Oded Gabbay6e810902014-10-27 14:36:07 +0200824 if (size == 0)
825 return -EINVAL;
826
827 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
828 return -ENOMEM;
829
Felix Kuehling1cd106e2018-07-11 22:32:45 -0400830 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
831 if (!(*mem_obj))
Oded Gabbay6e810902014-10-27 14:36:07 +0200832 return -ENOMEM;
833
Kent Russell79775b62017-08-15 23:00:05 -0400834 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
Oded Gabbay6e810902014-10-27 14:36:07 +0200835
836 start_search = 0;
837
838 mutex_lock(&kfd->gtt_sa_lock);
839
840kfd_gtt_restart_search:
841 /* Find the first chunk that is free */
842 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
843 kfd->gtt_sa_num_of_chunks,
844 start_search);
845
Kent Russell79775b62017-08-15 23:00:05 -0400846 pr_debug("Found = %d\n", found);
Oded Gabbay6e810902014-10-27 14:36:07 +0200847
848 /* If there wasn't any free chunk, bail out */
849 if (found == kfd->gtt_sa_num_of_chunks)
850 goto kfd_gtt_no_free_chunk;
851
852 /* Update fields of mem_obj */
853 (*mem_obj)->range_start = found;
854 (*mem_obj)->range_end = found;
855 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
856 kfd->gtt_start_gpu_addr,
857 found,
858 kfd->gtt_sa_chunk_size);
859 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
860 kfd->gtt_start_cpu_ptr,
861 found,
862 kfd->gtt_sa_chunk_size);
863
Kent Russell79775b62017-08-15 23:00:05 -0400864 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200865 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
866
867 /* If we need only one chunk, mark it as allocated and get out */
868 if (size <= kfd->gtt_sa_chunk_size) {
Kent Russell79775b62017-08-15 23:00:05 -0400869 pr_debug("Single bit\n");
Oded Gabbay6e810902014-10-27 14:36:07 +0200870 set_bit(found, kfd->gtt_sa_bitmap);
871 goto kfd_gtt_out;
872 }
873
874 /* Otherwise, try to see if we have enough contiguous chunks */
875 cur_size = size - kfd->gtt_sa_chunk_size;
876 do {
877 (*mem_obj)->range_end =
878 find_next_zero_bit(kfd->gtt_sa_bitmap,
879 kfd->gtt_sa_num_of_chunks, ++found);
880 /*
881 * If next free chunk is not contiguous than we need to
882 * restart our search from the last free chunk we found (which
883 * wasn't contiguous to the previous ones
884 */
885 if ((*mem_obj)->range_end != found) {
886 start_search = found;
887 goto kfd_gtt_restart_search;
888 }
889
890 /*
891 * If we reached end of buffer, bail out with error
892 */
893 if (found == kfd->gtt_sa_num_of_chunks)
894 goto kfd_gtt_no_free_chunk;
895
896 /* Check if we don't need another chunk */
897 if (cur_size <= kfd->gtt_sa_chunk_size)
898 cur_size = 0;
899 else
900 cur_size -= kfd->gtt_sa_chunk_size;
901
902 } while (cur_size > 0);
903
Kent Russell79775b62017-08-15 23:00:05 -0400904 pr_debug("range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200905 (*mem_obj)->range_start, (*mem_obj)->range_end);
906
907 /* Mark the chunks as allocated */
908 for (found = (*mem_obj)->range_start;
909 found <= (*mem_obj)->range_end;
910 found++)
911 set_bit(found, kfd->gtt_sa_bitmap);
912
913kfd_gtt_out:
914 mutex_unlock(&kfd->gtt_sa_lock);
915 return 0;
916
917kfd_gtt_no_free_chunk:
Kent Russell79775b62017-08-15 23:00:05 -0400918 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
Oded Gabbay6e810902014-10-27 14:36:07 +0200919 mutex_unlock(&kfd->gtt_sa_lock);
920 kfree(mem_obj);
921 return -ENOMEM;
922}
923
924int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
925{
926 unsigned int bit;
927
Oded Gabbay9216ed22015-01-12 22:34:21 +0200928 /* Act like kfree when trying to free a NULL object */
929 if (!mem_obj)
930 return 0;
Oded Gabbay6e810902014-10-27 14:36:07 +0200931
Kent Russell79775b62017-08-15 23:00:05 -0400932 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
Oded Gabbay6e810902014-10-27 14:36:07 +0200933 mem_obj, mem_obj->range_start, mem_obj->range_end);
934
935 mutex_lock(&kfd->gtt_sa_lock);
936
937 /* Mark the chunks as free */
938 for (bit = mem_obj->range_start;
939 bit <= mem_obj->range_end;
940 bit++)
941 clear_bit(bit, kfd->gtt_sa_bitmap);
942
943 mutex_unlock(&kfd->gtt_sa_lock);
944
945 kfree(mem_obj);
946 return 0;
947}
Shaoyun Liua29ec472018-07-11 22:33:04 -0400948
949#if defined(CONFIG_DEBUG_FS)
950
951/* This function will send a package to HIQ to hang the HWS
952 * which will trigger a GPU reset and bring the HWS back to normal state
953 */
954int kfd_debugfs_hang_hws(struct kfd_dev *dev)
955{
956 int r = 0;
957
958 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
959 pr_err("HWS is not enabled");
960 return -EINVAL;
961 }
962
963 r = pm_debugfs_hang_hws(&dev->dqm->packets);
964 if (!r)
965 r = dqm_debugfs_execute_queues(dev->dqm);
966
967 return r;
968}
969
970#endif