drm/amdkfd: Add quiesce_mm and resume_mm to kgd2kfd_calls
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device.c
CommitLineData
4a488a7a
OG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
64d1c3a4 23#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
4a488a7a 24#include <linux/amd-iommu.h>
64d1c3a4 25#endif
4a488a7a
OG
26#include <linux/bsearch.h>
27#include <linux/pci.h>
28#include <linux/slab.h>
29#include "kfd_priv.h"
64c7f8cf 30#include "kfd_device_queue_manager.h"
507968dd 31#include "kfd_pm4_headers_vi.h"
373d7080 32#include "cwsr_trap_handler_gfx8.asm"
64d1c3a4 33#include "kfd_iommu.h"
4a488a7a 34
19f6d2a6 35#define MQD_SIZE_ALIGNED 768
26103436 36static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
19f6d2a6 37
64d1c3a4 38#ifdef KFD_SUPPORT_IOMMU_V2
4a488a7a 39static const struct kfd_device_info kaveri_device_info = {
0da7558c
BG
40 .asic_family = CHIP_KAVERI,
41 .max_pasid_bits = 16,
992839ad
YS
42 /* max num of queues for KV.TODO should be a dynamic value */
43 .max_no_of_hqd = 24,
0da7558c 44 .ih_ring_entry_size = 4 * sizeof(uint32_t),
f3a39818 45 .event_interrupt_class = &event_interrupt_class_cik,
fbeb661b 46 .num_of_watch_points = 4,
373d7080
FK
47 .mqd_size_aligned = MQD_SIZE_ALIGNED,
48 .supports_cwsr = false,
64d1c3a4 49 .needs_iommu_device = true,
3ee2d00c 50 .needs_pci_atomics = false,
0da7558c
BG
51};
52
53static const struct kfd_device_info carrizo_device_info = {
54 .asic_family = CHIP_CARRIZO,
4a488a7a 55 .max_pasid_bits = 16,
eaccd6e7
OG
56 /* max num of queues for CZ.TODO should be a dynamic value */
57 .max_no_of_hqd = 24,
b3f5e6b4 58 .ih_ring_entry_size = 4 * sizeof(uint32_t),
eaccd6e7 59 .event_interrupt_class = &event_interrupt_class_cik,
f7c826ad 60 .num_of_watch_points = 4,
373d7080
FK
61 .mqd_size_aligned = MQD_SIZE_ALIGNED,
62 .supports_cwsr = true,
64d1c3a4 63 .needs_iommu_device = true,
3ee2d00c 64 .needs_pci_atomics = false,
4a488a7a 65};
64d1c3a4 66#endif
4a488a7a 67
a3084e6c
FK
68static const struct kfd_device_info hawaii_device_info = {
69 .asic_family = CHIP_HAWAII,
70 .max_pasid_bits = 16,
71 /* max num of queues for KV.TODO should be a dynamic value */
72 .max_no_of_hqd = 24,
73 .ih_ring_entry_size = 4 * sizeof(uint32_t),
74 .event_interrupt_class = &event_interrupt_class_cik,
75 .num_of_watch_points = 4,
76 .mqd_size_aligned = MQD_SIZE_ALIGNED,
77 .supports_cwsr = false,
64d1c3a4 78 .needs_iommu_device = false,
a3084e6c
FK
79 .needs_pci_atomics = false,
80};
81
82static const struct kfd_device_info tonga_device_info = {
83 .asic_family = CHIP_TONGA,
84 .max_pasid_bits = 16,
85 .max_no_of_hqd = 24,
86 .ih_ring_entry_size = 4 * sizeof(uint32_t),
87 .event_interrupt_class = &event_interrupt_class_cik,
88 .num_of_watch_points = 4,
89 .mqd_size_aligned = MQD_SIZE_ALIGNED,
90 .supports_cwsr = false,
64d1c3a4 91 .needs_iommu_device = false,
a3084e6c
FK
92 .needs_pci_atomics = true,
93};
94
95static const struct kfd_device_info tonga_vf_device_info = {
96 .asic_family = CHIP_TONGA,
97 .max_pasid_bits = 16,
98 .max_no_of_hqd = 24,
99 .ih_ring_entry_size = 4 * sizeof(uint32_t),
100 .event_interrupt_class = &event_interrupt_class_cik,
101 .num_of_watch_points = 4,
102 .mqd_size_aligned = MQD_SIZE_ALIGNED,
103 .supports_cwsr = false,
64d1c3a4 104 .needs_iommu_device = false,
a3084e6c
FK
105 .needs_pci_atomics = false,
106};
107
108static const struct kfd_device_info fiji_device_info = {
109 .asic_family = CHIP_FIJI,
110 .max_pasid_bits = 16,
111 .max_no_of_hqd = 24,
112 .ih_ring_entry_size = 4 * sizeof(uint32_t),
113 .event_interrupt_class = &event_interrupt_class_cik,
114 .num_of_watch_points = 4,
115 .mqd_size_aligned = MQD_SIZE_ALIGNED,
116 .supports_cwsr = true,
64d1c3a4 117 .needs_iommu_device = false,
a3084e6c
FK
118 .needs_pci_atomics = true,
119};
120
121static const struct kfd_device_info fiji_vf_device_info = {
122 .asic_family = CHIP_FIJI,
123 .max_pasid_bits = 16,
124 .max_no_of_hqd = 24,
125 .ih_ring_entry_size = 4 * sizeof(uint32_t),
126 .event_interrupt_class = &event_interrupt_class_cik,
127 .num_of_watch_points = 4,
128 .mqd_size_aligned = MQD_SIZE_ALIGNED,
129 .supports_cwsr = true,
64d1c3a4 130 .needs_iommu_device = false,
a3084e6c
FK
131 .needs_pci_atomics = false,
132};
133
134
135static const struct kfd_device_info polaris10_device_info = {
136 .asic_family = CHIP_POLARIS10,
137 .max_pasid_bits = 16,
138 .max_no_of_hqd = 24,
139 .ih_ring_entry_size = 4 * sizeof(uint32_t),
140 .event_interrupt_class = &event_interrupt_class_cik,
141 .num_of_watch_points = 4,
142 .mqd_size_aligned = MQD_SIZE_ALIGNED,
143 .supports_cwsr = true,
64d1c3a4 144 .needs_iommu_device = false,
a3084e6c
FK
145 .needs_pci_atomics = true,
146};
147
148static const struct kfd_device_info polaris10_vf_device_info = {
149 .asic_family = CHIP_POLARIS10,
150 .max_pasid_bits = 16,
151 .max_no_of_hqd = 24,
152 .ih_ring_entry_size = 4 * sizeof(uint32_t),
153 .event_interrupt_class = &event_interrupt_class_cik,
154 .num_of_watch_points = 4,
155 .mqd_size_aligned = MQD_SIZE_ALIGNED,
156 .supports_cwsr = true,
64d1c3a4 157 .needs_iommu_device = false,
a3084e6c
FK
158 .needs_pci_atomics = false,
159};
160
161static const struct kfd_device_info polaris11_device_info = {
162 .asic_family = CHIP_POLARIS11,
163 .max_pasid_bits = 16,
164 .max_no_of_hqd = 24,
165 .ih_ring_entry_size = 4 * sizeof(uint32_t),
166 .event_interrupt_class = &event_interrupt_class_cik,
167 .num_of_watch_points = 4,
168 .mqd_size_aligned = MQD_SIZE_ALIGNED,
169 .supports_cwsr = true,
64d1c3a4 170 .needs_iommu_device = false,
a3084e6c
FK
171 .needs_pci_atomics = true,
172};
173
174
4a488a7a
OG
175struct kfd_deviceid {
176 unsigned short did;
177 const struct kfd_device_info *device_info;
178};
179
4a488a7a 180static const struct kfd_deviceid supported_devices[] = {
64d1c3a4 181#ifdef KFD_SUPPORT_IOMMU_V2
4a488a7a
OG
182 { 0x1304, &kaveri_device_info }, /* Kaveri */
183 { 0x1305, &kaveri_device_info }, /* Kaveri */
184 { 0x1306, &kaveri_device_info }, /* Kaveri */
185 { 0x1307, &kaveri_device_info }, /* Kaveri */
186 { 0x1309, &kaveri_device_info }, /* Kaveri */
187 { 0x130A, &kaveri_device_info }, /* Kaveri */
188 { 0x130B, &kaveri_device_info }, /* Kaveri */
189 { 0x130C, &kaveri_device_info }, /* Kaveri */
190 { 0x130D, &kaveri_device_info }, /* Kaveri */
191 { 0x130E, &kaveri_device_info }, /* Kaveri */
192 { 0x130F, &kaveri_device_info }, /* Kaveri */
193 { 0x1310, &kaveri_device_info }, /* Kaveri */
194 { 0x1311, &kaveri_device_info }, /* Kaveri */
195 { 0x1312, &kaveri_device_info }, /* Kaveri */
196 { 0x1313, &kaveri_device_info }, /* Kaveri */
197 { 0x1315, &kaveri_device_info }, /* Kaveri */
198 { 0x1316, &kaveri_device_info }, /* Kaveri */
199 { 0x1317, &kaveri_device_info }, /* Kaveri */
200 { 0x1318, &kaveri_device_info }, /* Kaveri */
201 { 0x131B, &kaveri_device_info }, /* Kaveri */
202 { 0x131C, &kaveri_device_info }, /* Kaveri */
123576d1
BG
203 { 0x131D, &kaveri_device_info }, /* Kaveri */
204 { 0x9870, &carrizo_device_info }, /* Carrizo */
205 { 0x9874, &carrizo_device_info }, /* Carrizo */
206 { 0x9875, &carrizo_device_info }, /* Carrizo */
207 { 0x9876, &carrizo_device_info }, /* Carrizo */
a3084e6c 208 { 0x9877, &carrizo_device_info }, /* Carrizo */
64d1c3a4 209#endif
a3084e6c
FK
210 { 0x67A0, &hawaii_device_info }, /* Hawaii */
211 { 0x67A1, &hawaii_device_info }, /* Hawaii */
212 { 0x67A2, &hawaii_device_info }, /* Hawaii */
213 { 0x67A8, &hawaii_device_info }, /* Hawaii */
214 { 0x67A9, &hawaii_device_info }, /* Hawaii */
215 { 0x67AA, &hawaii_device_info }, /* Hawaii */
216 { 0x67B0, &hawaii_device_info }, /* Hawaii */
217 { 0x67B1, &hawaii_device_info }, /* Hawaii */
218 { 0x67B8, &hawaii_device_info }, /* Hawaii */
219 { 0x67B9, &hawaii_device_info }, /* Hawaii */
220 { 0x67BA, &hawaii_device_info }, /* Hawaii */
221 { 0x67BE, &hawaii_device_info }, /* Hawaii */
222 { 0x6920, &tonga_device_info }, /* Tonga */
223 { 0x6921, &tonga_device_info }, /* Tonga */
224 { 0x6928, &tonga_device_info }, /* Tonga */
225 { 0x6929, &tonga_device_info }, /* Tonga */
226 { 0x692B, &tonga_device_info }, /* Tonga */
227 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */
228 { 0x6938, &tonga_device_info }, /* Tonga */
229 { 0x6939, &tonga_device_info }, /* Tonga */
230 { 0x7300, &fiji_device_info }, /* Fiji */
231 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
232 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
233 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
234 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
235 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
236 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
237 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
238 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
239 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
240 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
241 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
242 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
243 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
244 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
245 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
246 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
247 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
248 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
249 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
250 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
251 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
252 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
4a488a7a
OG
253};
254
6e81090b
OG
255static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
256 unsigned int chunk_size);
257static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
258
b8935a7c
YZ
259static int kfd_resume(struct kfd_dev *kfd);
260
4a488a7a
OG
261static const struct kfd_device_info *lookup_device_info(unsigned short did)
262{
263 size_t i;
264
265 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
266 if (supported_devices[i].did == did) {
32fa8219 267 WARN_ON(!supported_devices[i].device_info);
4a488a7a
OG
268 return supported_devices[i].device_info;
269 }
270 }
271
4ebc7182
YZ
272 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
273 did);
274
4a488a7a
OG
275 return NULL;
276}
277
cea405b1
XZ
278struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
279 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
4a488a7a
OG
280{
281 struct kfd_dev *kfd;
282
283 const struct kfd_device_info *device_info =
284 lookup_device_info(pdev->device);
285
4ebc7182
YZ
286 if (!device_info) {
287 dev_err(kfd_device, "kgd2kfd_probe failed\n");
4a488a7a 288 return NULL;
4ebc7182 289 }
4a488a7a 290
3ee2d00c
FK
291 if (device_info->needs_pci_atomics) {
292 /* Allow BIF to recode atomics to PCIe 3.0
293 * AtomicOps. 32 and 64-bit requests are possible and
294 * must be supported.
295 */
296 if (pci_enable_atomic_ops_to_root(pdev,
297 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
298 PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) {
299 dev_info(kfd_device,
300 "skipped device %x:%x, PCI rejects atomics",
301 pdev->vendor, pdev->device);
302 return NULL;
303 }
304 }
305
4a488a7a
OG
306 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
307 if (!kfd)
308 return NULL;
309
310 kfd->kgd = kgd;
311 kfd->device_info = device_info;
312 kfd->pdev = pdev;
19f6d2a6 313 kfd->init_complete = false;
cea405b1
XZ
314 kfd->kfd2kgd = f2g;
315
316 mutex_init(&kfd->doorbell_mutex);
317 memset(&kfd->doorbell_available_index, 0,
318 sizeof(kfd->doorbell_available_index));
4a488a7a
OG
319
320 return kfd;
321}
322
373d7080
FK
323static void kfd_cwsr_init(struct kfd_dev *kfd)
324{
325 if (cwsr_enable && kfd->device_info->supports_cwsr) {
326 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
327
328 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
329 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
330 kfd->cwsr_enabled = true;
331 }
332}
333
4a488a7a
OG
334bool kgd2kfd_device_init(struct kfd_dev *kfd,
335 const struct kgd2kfd_shared_resources *gpu_resources)
336{
19f6d2a6
OG
337 unsigned int size;
338
4a488a7a
OG
339 kfd->shared_resources = *gpu_resources;
340
44008d7a
YZ
341 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
342 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
343 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
344 - kfd->vm_info.first_vmid_kfd + 1;
345
a99c6d4f
FK
346 /* Verify module parameters regarding mapped process number*/
347 if ((hws_max_conc_proc < 0)
348 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
349 dev_err(kfd_device,
350 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
351 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
352 kfd->vm_info.vmid_num_kfd);
353 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
354 } else
355 kfd->max_proc_per_quantum = hws_max_conc_proc;
356
19f6d2a6 357 /* calculate max size of mqds needed for queues */
b8cbab04
OG
358 size = max_num_of_queues_per_device *
359 kfd->device_info->mqd_size_aligned;
19f6d2a6 360
e18e794e
OG
361 /*
362 * calculate max size of runlist packet.
363 * There can be only 2 packets at once
364 */
507968dd
FK
365 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
366 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
367 + sizeof(struct pm4_mes_runlist)) * 2;
e18e794e
OG
368
369 /* Add size of HIQ & DIQ */
370 size += KFD_KERNEL_QUEUE_SIZE * 2;
371
372 /* add another 512KB for all other allocations on gart (HPD, fences) */
19f6d2a6
OG
373 size += 512 * 1024;
374
cea405b1
XZ
375 if (kfd->kfd2kgd->init_gtt_mem_allocation(
376 kfd->kgd, size, &kfd->gtt_mem,
377 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
79775b62 378 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
19f6d2a6
OG
379 goto out;
380 }
381
79775b62 382 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
e18e794e 383
73a1da0b
OG
384 /* Initialize GTT sa with 512 byte chunk size */
385 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
79775b62 386 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
73a1da0b
OG
387 goto kfd_gtt_sa_init_error;
388 }
389
735df2ba
FK
390 if (kfd_doorbell_init(kfd)) {
391 dev_err(kfd_device,
392 "Error initializing doorbell aperture\n");
393 goto kfd_doorbell_error;
394 }
19f6d2a6 395
4eacc26b 396 if (kfd_topology_add_device(kfd)) {
79775b62 397 dev_err(kfd_device, "Error adding device to topology\n");
19f6d2a6
OG
398 goto kfd_topology_add_device_error;
399 }
400
2249d558 401 if (kfd_interrupt_init(kfd)) {
79775b62 402 dev_err(kfd_device, "Error initializing interrupts\n");
2249d558
AL
403 goto kfd_interrupt_error;
404 }
405
64c7f8cf
BG
406 kfd->dqm = device_queue_manager_init(kfd);
407 if (!kfd->dqm) {
79775b62 408 dev_err(kfd_device, "Error initializing queue manager\n");
64c7f8cf
BG
409 goto device_queue_manager_error;
410 }
411
64d1c3a4
FK
412 if (kfd_iommu_device_init(kfd)) {
413 dev_err(kfd_device, "Error initializing iommuv2\n");
414 goto device_iommu_error;
64c7f8cf
BG
415 }
416
373d7080
FK
417 kfd_cwsr_init(kfd);
418
b8935a7c
YZ
419 if (kfd_resume(kfd))
420 goto kfd_resume_error;
421
fbeb661b
YS
422 kfd->dbgmgr = NULL;
423
4a488a7a 424 kfd->init_complete = true;
79775b62 425 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
4a488a7a
OG
426 kfd->pdev->device);
427
79775b62 428 pr_debug("Starting kfd with the following scheduling policy %d\n",
d146c5a7 429 kfd->dqm->sched_policy);
64c7f8cf 430
19f6d2a6
OG
431 goto out;
432
b8935a7c 433kfd_resume_error:
64d1c3a4 434device_iommu_error:
64c7f8cf
BG
435 device_queue_manager_uninit(kfd->dqm);
436device_queue_manager_error:
2249d558
AL
437 kfd_interrupt_exit(kfd);
438kfd_interrupt_error:
b17f068a 439 kfd_topology_remove_device(kfd);
19f6d2a6 440kfd_topology_add_device_error:
735df2ba
FK
441 kfd_doorbell_fini(kfd);
442kfd_doorbell_error:
73a1da0b
OG
443 kfd_gtt_sa_fini(kfd);
444kfd_gtt_sa_init_error:
cea405b1 445 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
19f6d2a6 446 dev_err(kfd_device,
79775b62 447 "device %x:%x NOT added due to errors\n",
19f6d2a6
OG
448 kfd->pdev->vendor, kfd->pdev->device);
449out:
450 return kfd->init_complete;
4a488a7a
OG
451}
452
453void kgd2kfd_device_exit(struct kfd_dev *kfd)
454{
b17f068a 455 if (kfd->init_complete) {
b8935a7c 456 kgd2kfd_suspend(kfd);
64c7f8cf 457 device_queue_manager_uninit(kfd->dqm);
2249d558 458 kfd_interrupt_exit(kfd);
b17f068a 459 kfd_topology_remove_device(kfd);
735df2ba 460 kfd_doorbell_fini(kfd);
73a1da0b 461 kfd_gtt_sa_fini(kfd);
cea405b1 462 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
b17f068a 463 }
5b5c4e40 464
4a488a7a
OG
465 kfree(kfd);
466}
467
468void kgd2kfd_suspend(struct kfd_dev *kfd)
469{
733fa1f7
YZ
470 if (!kfd->init_complete)
471 return;
472
26103436
FK
473 /* For first KFD device suspend all the KFD processes */
474 if (atomic_inc_return(&kfd_device_suspended) == 1)
475 kfd_suspend_all_processes();
476
733fa1f7
YZ
477 kfd->dqm->ops.stop(kfd->dqm);
478
64d1c3a4 479 kfd_iommu_suspend(kfd);
4a488a7a
OG
480}
481
482int kgd2kfd_resume(struct kfd_dev *kfd)
483{
26103436
FK
484 int ret, count;
485
b8935a7c
YZ
486 if (!kfd->init_complete)
487 return 0;
b17f068a 488
26103436
FK
489 ret = kfd_resume(kfd);
490 if (ret)
491 return ret;
492
493 count = atomic_dec_return(&kfd_device_suspended);
494 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
495 if (count == 0)
496 ret = kfd_resume_all_processes();
b17f068a 497
26103436 498 return ret;
b8935a7c
YZ
499}
500
501static int kfd_resume(struct kfd_dev *kfd)
502{
503 int err = 0;
b8935a7c 504
64d1c3a4
FK
505 err = kfd_iommu_resume(kfd);
506 if (err) {
507 dev_err(kfd_device,
508 "Failed to resume IOMMU for device %x:%x\n",
509 kfd->pdev->vendor, kfd->pdev->device);
510 return err;
511 }
733fa1f7 512
b8935a7c
YZ
513 err = kfd->dqm->ops.start(kfd->dqm);
514 if (err) {
515 dev_err(kfd_device,
516 "Error starting queue manager for device %x:%x\n",
517 kfd->pdev->vendor, kfd->pdev->device);
518 goto dqm_start_error;
b17f068a
OG
519 }
520
b8935a7c
YZ
521 return err;
522
523dqm_start_error:
64d1c3a4 524 kfd_iommu_suspend(kfd);
b8935a7c 525 return err;
4a488a7a
OG
526}
527
b3f5e6b4
AL
528/* This is called directly from KGD at ISR. */
529void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
4a488a7a 530{
2249d558
AL
531 if (!kfd->init_complete)
532 return;
533
534 spin_lock(&kfd->interrupt_lock);
535
536 if (kfd->interrupts_active
537 && interrupt_is_wanted(kfd, ih_ring_entry)
538 && enqueue_ih_ring_entry(kfd, ih_ring_entry))
48e876a2 539 queue_work(kfd->ih_wq, &kfd->interrupt_work);
2249d558
AL
540
541 spin_unlock(&kfd->interrupt_lock);
4a488a7a 542}
6e81090b 543
6b95e797
FK
544int kgd2kfd_quiesce_mm(struct mm_struct *mm)
545{
546 struct kfd_process *p;
547 int r;
548
549 /* Because we are called from arbitrary context (workqueue) as opposed
550 * to process context, kfd_process could attempt to exit while we are
551 * running so the lookup function increments the process ref count.
552 */
553 p = kfd_lookup_process_by_mm(mm);
554 if (!p)
555 return -ESRCH;
556
557 r = kfd_process_evict_queues(p);
558
559 kfd_unref_process(p);
560 return r;
561}
562
563int kgd2kfd_resume_mm(struct mm_struct *mm)
564{
565 struct kfd_process *p;
566 int r;
567
568 /* Because we are called from arbitrary context (workqueue) as opposed
569 * to process context, kfd_process could attempt to exit while we are
570 * running so the lookup function increments the process ref count.
571 */
572 p = kfd_lookup_process_by_mm(mm);
573 if (!p)
574 return -ESRCH;
575
576 r = kfd_process_restore_queues(p);
577
578 kfd_unref_process(p);
579 return r;
580}
581
26103436
FK
582/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
583 * prepare for safe eviction of KFD BOs that belong to the specified
584 * process.
585 *
586 * @mm: mm_struct that identifies the specified KFD process
587 * @fence: eviction fence attached to KFD process BOs
588 *
589 */
590int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
591 struct dma_fence *fence)
592{
593 struct kfd_process *p;
594 unsigned long active_time;
595 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
596
597 if (!fence)
598 return -EINVAL;
599
600 if (dma_fence_is_signaled(fence))
601 return 0;
602
603 p = kfd_lookup_process_by_mm(mm);
604 if (!p)
605 return -ENODEV;
606
607 if (fence->seqno == p->last_eviction_seqno)
608 goto out;
609
610 p->last_eviction_seqno = fence->seqno;
611
612 /* Avoid KFD process starvation. Wait for at least
613 * PROCESS_ACTIVE_TIME_MS before evicting the process again
614 */
615 active_time = get_jiffies_64() - p->last_restore_timestamp;
616 if (delay_jiffies > active_time)
617 delay_jiffies -= active_time;
618 else
619 delay_jiffies = 0;
620
621 /* During process initialization eviction_work.dwork is initialized
622 * to kfd_evict_bo_worker
623 */
624 schedule_delayed_work(&p->eviction_work, delay_jiffies);
625out:
626 kfd_unref_process(p);
627 return 0;
628}
629
6e81090b
OG
630static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
631 unsigned int chunk_size)
632{
8625ff9c 633 unsigned int num_of_longs;
6e81090b 634
32fa8219
FK
635 if (WARN_ON(buf_size < chunk_size))
636 return -EINVAL;
637 if (WARN_ON(buf_size == 0))
638 return -EINVAL;
639 if (WARN_ON(chunk_size == 0))
640 return -EINVAL;
6e81090b
OG
641
642 kfd->gtt_sa_chunk_size = chunk_size;
643 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
644
8625ff9c
FK
645 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
646 BITS_PER_LONG;
6e81090b 647
8625ff9c 648 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
6e81090b
OG
649
650 if (!kfd->gtt_sa_bitmap)
651 return -ENOMEM;
652
79775b62 653 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
6e81090b
OG
654 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
655
656 mutex_init(&kfd->gtt_sa_lock);
657
658 return 0;
659
660}
661
662static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
663{
664 mutex_destroy(&kfd->gtt_sa_lock);
665 kfree(kfd->gtt_sa_bitmap);
666}
667
668static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
669 unsigned int bit_num,
670 unsigned int chunk_size)
671{
672 return start_addr + bit_num * chunk_size;
673}
674
675static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
676 unsigned int bit_num,
677 unsigned int chunk_size)
678{
679 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
680}
681
682int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
683 struct kfd_mem_obj **mem_obj)
684{
685 unsigned int found, start_search, cur_size;
686
6e81090b
OG
687 if (size == 0)
688 return -EINVAL;
689
690 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
691 return -ENOMEM;
692
d1853f42 693 *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
6e81090b
OG
694 if ((*mem_obj) == NULL)
695 return -ENOMEM;
696
79775b62 697 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
6e81090b
OG
698
699 start_search = 0;
700
701 mutex_lock(&kfd->gtt_sa_lock);
702
703kfd_gtt_restart_search:
704 /* Find the first chunk that is free */
705 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
706 kfd->gtt_sa_num_of_chunks,
707 start_search);
708
79775b62 709 pr_debug("Found = %d\n", found);
6e81090b
OG
710
711 /* If there wasn't any free chunk, bail out */
712 if (found == kfd->gtt_sa_num_of_chunks)
713 goto kfd_gtt_no_free_chunk;
714
715 /* Update fields of mem_obj */
716 (*mem_obj)->range_start = found;
717 (*mem_obj)->range_end = found;
718 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
719 kfd->gtt_start_gpu_addr,
720 found,
721 kfd->gtt_sa_chunk_size);
722 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
723 kfd->gtt_start_cpu_ptr,
724 found,
725 kfd->gtt_sa_chunk_size);
726
79775b62 727 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
6e81090b
OG
728 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
729
730 /* If we need only one chunk, mark it as allocated and get out */
731 if (size <= kfd->gtt_sa_chunk_size) {
79775b62 732 pr_debug("Single bit\n");
6e81090b
OG
733 set_bit(found, kfd->gtt_sa_bitmap);
734 goto kfd_gtt_out;
735 }
736
737 /* Otherwise, try to see if we have enough contiguous chunks */
738 cur_size = size - kfd->gtt_sa_chunk_size;
739 do {
740 (*mem_obj)->range_end =
741 find_next_zero_bit(kfd->gtt_sa_bitmap,
742 kfd->gtt_sa_num_of_chunks, ++found);
743 /*
744 * If next free chunk is not contiguous than we need to
745 * restart our search from the last free chunk we found (which
746 * wasn't contiguous to the previous ones
747 */
748 if ((*mem_obj)->range_end != found) {
749 start_search = found;
750 goto kfd_gtt_restart_search;
751 }
752
753 /*
754 * If we reached end of buffer, bail out with error
755 */
756 if (found == kfd->gtt_sa_num_of_chunks)
757 goto kfd_gtt_no_free_chunk;
758
759 /* Check if we don't need another chunk */
760 if (cur_size <= kfd->gtt_sa_chunk_size)
761 cur_size = 0;
762 else
763 cur_size -= kfd->gtt_sa_chunk_size;
764
765 } while (cur_size > 0);
766
79775b62 767 pr_debug("range_start = %d, range_end = %d\n",
6e81090b
OG
768 (*mem_obj)->range_start, (*mem_obj)->range_end);
769
770 /* Mark the chunks as allocated */
771 for (found = (*mem_obj)->range_start;
772 found <= (*mem_obj)->range_end;
773 found++)
774 set_bit(found, kfd->gtt_sa_bitmap);
775
776kfd_gtt_out:
777 mutex_unlock(&kfd->gtt_sa_lock);
778 return 0;
779
780kfd_gtt_no_free_chunk:
79775b62 781 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
6e81090b
OG
782 mutex_unlock(&kfd->gtt_sa_lock);
783 kfree(mem_obj);
784 return -ENOMEM;
785}
786
787int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
788{
789 unsigned int bit;
790
9216ed29
OG
791 /* Act like kfree when trying to free a NULL object */
792 if (!mem_obj)
793 return 0;
6e81090b 794
79775b62 795 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
6e81090b
OG
796 mem_obj, mem_obj->range_start, mem_obj->range_end);
797
798 mutex_lock(&kfd->gtt_sa_lock);
799
800 /* Mark the chunks as free */
801 for (bit = mem_obj->range_start;
802 bit <= mem_obj->range_end;
803 bit++)
804 clear_bit(bit, kfd->gtt_sa_bitmap);
805
806 mutex_unlock(&kfd->gtt_sa_lock);
807
808 kfree(mem_obj);
809 return 0;
810}