Commit | Line | Data |
---|---|---|
4a488a7a OG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include <linux/amd-iommu.h> | |
24 | #include <linux/bsearch.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/slab.h> | |
27 | #include "kfd_priv.h" | |
64c7f8cf | 28 | #include "kfd_device_queue_manager.h" |
e18e794e | 29 | #include "kfd_pm4_headers.h" |
4a488a7a | 30 | |
19f6d2a6 OG |
31 | #define MQD_SIZE_ALIGNED 768 |
32 | ||
4a488a7a | 33 | static const struct kfd_device_info kaveri_device_info = { |
0da7558c BG |
34 | .asic_family = CHIP_KAVERI, |
35 | .max_pasid_bits = 16, | |
992839ad YS |
36 | /* max num of queues for KV.TODO should be a dynamic value */ |
37 | .max_no_of_hqd = 24, | |
0da7558c | 38 | .ih_ring_entry_size = 4 * sizeof(uint32_t), |
f3a39818 | 39 | .event_interrupt_class = &event_interrupt_class_cik, |
fbeb661b | 40 | .num_of_watch_points = 4, |
0da7558c BG |
41 | .mqd_size_aligned = MQD_SIZE_ALIGNED |
42 | }; | |
43 | ||
44 | static const struct kfd_device_info carrizo_device_info = { | |
45 | .asic_family = CHIP_CARRIZO, | |
4a488a7a | 46 | .max_pasid_bits = 16, |
eaccd6e7 OG |
47 | /* max num of queues for CZ.TODO should be a dynamic value */ |
48 | .max_no_of_hqd = 24, | |
b3f5e6b4 | 49 | .ih_ring_entry_size = 4 * sizeof(uint32_t), |
eaccd6e7 | 50 | .event_interrupt_class = &event_interrupt_class_cik, |
f7c826ad | 51 | .num_of_watch_points = 4, |
19f6d2a6 | 52 | .mqd_size_aligned = MQD_SIZE_ALIGNED |
4a488a7a OG |
53 | }; |
54 | ||
55 | struct kfd_deviceid { | |
56 | unsigned short did; | |
57 | const struct kfd_device_info *device_info; | |
58 | }; | |
59 | ||
60 | /* Please keep this sorted by increasing device id. */ | |
61 | static const struct kfd_deviceid supported_devices[] = { | |
62 | { 0x1304, &kaveri_device_info }, /* Kaveri */ | |
63 | { 0x1305, &kaveri_device_info }, /* Kaveri */ | |
64 | { 0x1306, &kaveri_device_info }, /* Kaveri */ | |
65 | { 0x1307, &kaveri_device_info }, /* Kaveri */ | |
66 | { 0x1309, &kaveri_device_info }, /* Kaveri */ | |
67 | { 0x130A, &kaveri_device_info }, /* Kaveri */ | |
68 | { 0x130B, &kaveri_device_info }, /* Kaveri */ | |
69 | { 0x130C, &kaveri_device_info }, /* Kaveri */ | |
70 | { 0x130D, &kaveri_device_info }, /* Kaveri */ | |
71 | { 0x130E, &kaveri_device_info }, /* Kaveri */ | |
72 | { 0x130F, &kaveri_device_info }, /* Kaveri */ | |
73 | { 0x1310, &kaveri_device_info }, /* Kaveri */ | |
74 | { 0x1311, &kaveri_device_info }, /* Kaveri */ | |
75 | { 0x1312, &kaveri_device_info }, /* Kaveri */ | |
76 | { 0x1313, &kaveri_device_info }, /* Kaveri */ | |
77 | { 0x1315, &kaveri_device_info }, /* Kaveri */ | |
78 | { 0x1316, &kaveri_device_info }, /* Kaveri */ | |
79 | { 0x1317, &kaveri_device_info }, /* Kaveri */ | |
80 | { 0x1318, &kaveri_device_info }, /* Kaveri */ | |
81 | { 0x131B, &kaveri_device_info }, /* Kaveri */ | |
82 | { 0x131C, &kaveri_device_info }, /* Kaveri */ | |
0da7558c | 83 | { 0x131D, &kaveri_device_info } /* Kaveri */ |
4a488a7a OG |
84 | }; |
85 | ||
6e81090b OG |
86 | static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, |
87 | unsigned int chunk_size); | |
88 | static void kfd_gtt_sa_fini(struct kfd_dev *kfd); | |
89 | ||
4a488a7a OG |
90 | static const struct kfd_device_info *lookup_device_info(unsigned short did) |
91 | { | |
92 | size_t i; | |
93 | ||
94 | for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { | |
95 | if (supported_devices[i].did == did) { | |
96 | BUG_ON(supported_devices[i].device_info == NULL); | |
97 | return supported_devices[i].device_info; | |
98 | } | |
99 | } | |
100 | ||
101 | return NULL; | |
102 | } | |
103 | ||
cea405b1 XZ |
104 | struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, |
105 | struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) | |
4a488a7a OG |
106 | { |
107 | struct kfd_dev *kfd; | |
108 | ||
109 | const struct kfd_device_info *device_info = | |
110 | lookup_device_info(pdev->device); | |
111 | ||
112 | if (!device_info) | |
113 | return NULL; | |
114 | ||
115 | kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); | |
116 | if (!kfd) | |
117 | return NULL; | |
118 | ||
119 | kfd->kgd = kgd; | |
120 | kfd->device_info = device_info; | |
121 | kfd->pdev = pdev; | |
19f6d2a6 | 122 | kfd->init_complete = false; |
cea405b1 XZ |
123 | kfd->kfd2kgd = f2g; |
124 | ||
125 | mutex_init(&kfd->doorbell_mutex); | |
126 | memset(&kfd->doorbell_available_index, 0, | |
127 | sizeof(kfd->doorbell_available_index)); | |
4a488a7a OG |
128 | |
129 | return kfd; | |
130 | } | |
131 | ||
b17f068a OG |
132 | static bool device_iommu_pasid_init(struct kfd_dev *kfd) |
133 | { | |
134 | const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | | |
135 | AMD_IOMMU_DEVICE_FLAG_PRI_SUP | | |
136 | AMD_IOMMU_DEVICE_FLAG_PASID_SUP; | |
137 | ||
138 | struct amd_iommu_device_info iommu_info; | |
139 | unsigned int pasid_limit; | |
140 | int err; | |
141 | ||
142 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); | |
143 | if (err < 0) { | |
144 | dev_err(kfd_device, | |
145 | "error getting iommu info. is the iommu enabled?\n"); | |
146 | return false; | |
147 | } | |
148 | ||
149 | if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { | |
150 | dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", | |
151 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, | |
152 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, | |
153 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); | |
154 | return false; | |
155 | } | |
156 | ||
157 | pasid_limit = min_t(unsigned int, | |
158 | (unsigned int)1 << kfd->device_info->max_pasid_bits, | |
159 | iommu_info.max_pasids); | |
160 | /* | |
161 | * last pasid is used for kernel queues doorbells | |
162 | * in the future the last pasid might be used for a kernel thread. | |
163 | */ | |
164 | pasid_limit = min_t(unsigned int, | |
165 | pasid_limit, | |
166 | kfd->doorbell_process_limit - 1); | |
167 | ||
168 | err = amd_iommu_init_device(kfd->pdev, pasid_limit); | |
169 | if (err < 0) { | |
170 | dev_err(kfd_device, "error initializing iommu device\n"); | |
171 | return false; | |
172 | } | |
173 | ||
174 | if (!kfd_set_pasid_limit(pasid_limit)) { | |
175 | dev_err(kfd_device, "error setting pasid limit\n"); | |
176 | amd_iommu_free_device(kfd->pdev); | |
177 | return false; | |
178 | } | |
179 | ||
180 | return true; | |
181 | } | |
182 | ||
183 | static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) | |
184 | { | |
185 | struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); | |
186 | ||
187 | if (dev) | |
188 | kfd_unbind_process_from_device(dev, pasid); | |
189 | } | |
190 | ||
59d3e8be AS |
191 | /* |
192 | * This function called by IOMMU driver on PPR failure | |
193 | */ | |
194 | static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, | |
195 | unsigned long address, u16 flags) | |
196 | { | |
197 | struct kfd_dev *dev; | |
198 | ||
199 | dev_warn(kfd_device, | |
200 | "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", | |
201 | PCI_BUS_NUM(pdev->devfn), | |
202 | PCI_SLOT(pdev->devfn), | |
203 | PCI_FUNC(pdev->devfn), | |
204 | pasid, | |
205 | address, | |
206 | flags); | |
207 | ||
208 | dev = kfd_device_by_pci_dev(pdev); | |
209 | BUG_ON(dev == NULL); | |
210 | ||
211 | kfd_signal_iommu_event(dev, pasid, address, | |
212 | flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); | |
213 | ||
214 | return AMD_IOMMU_INV_PRI_RSP_INVALID; | |
215 | } | |
216 | ||
4a488a7a OG |
217 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
218 | const struct kgd2kfd_shared_resources *gpu_resources) | |
219 | { | |
19f6d2a6 OG |
220 | unsigned int size; |
221 | ||
4a488a7a OG |
222 | kfd->shared_resources = *gpu_resources; |
223 | ||
19f6d2a6 | 224 | /* calculate max size of mqds needed for queues */ |
b8cbab04 OG |
225 | size = max_num_of_queues_per_device * |
226 | kfd->device_info->mqd_size_aligned; | |
19f6d2a6 | 227 | |
e18e794e OG |
228 | /* |
229 | * calculate max size of runlist packet. | |
230 | * There can be only 2 packets at once | |
231 | */ | |
b3869b17 DA |
232 | size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + |
233 | max_num_of_queues_per_device * | |
e18e794e OG |
234 | sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; |
235 | ||
236 | /* Add size of HIQ & DIQ */ | |
237 | size += KFD_KERNEL_QUEUE_SIZE * 2; | |
238 | ||
239 | /* add another 512KB for all other allocations on gart (HPD, fences) */ | |
19f6d2a6 OG |
240 | size += 512 * 1024; |
241 | ||
cea405b1 XZ |
242 | if (kfd->kfd2kgd->init_gtt_mem_allocation( |
243 | kfd->kgd, size, &kfd->gtt_mem, | |
244 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ | |
19f6d2a6 | 245 | dev_err(kfd_device, |
e18e794e OG |
246 | "Could not allocate %d bytes for device (%x:%x)\n", |
247 | size, kfd->pdev->vendor, kfd->pdev->device); | |
19f6d2a6 OG |
248 | goto out; |
249 | } | |
250 | ||
e18e794e OG |
251 | dev_info(kfd_device, |
252 | "Allocated %d bytes on gart for device(%x:%x)\n", | |
253 | size, kfd->pdev->vendor, kfd->pdev->device); | |
254 | ||
73a1da0b OG |
255 | /* Initialize GTT sa with 512 byte chunk size */ |
256 | if (kfd_gtt_sa_init(kfd, size, 512) != 0) { | |
257 | dev_err(kfd_device, | |
258 | "Error initializing gtt sub-allocator\n"); | |
259 | goto kfd_gtt_sa_init_error; | |
260 | } | |
261 | ||
19f6d2a6 OG |
262 | kfd_doorbell_init(kfd); |
263 | ||
264 | if (kfd_topology_add_device(kfd) != 0) { | |
265 | dev_err(kfd_device, | |
266 | "Error adding device (%x:%x) to topology\n", | |
267 | kfd->pdev->vendor, kfd->pdev->device); | |
268 | goto kfd_topology_add_device_error; | |
269 | } | |
270 | ||
2249d558 AL |
271 | if (kfd_interrupt_init(kfd)) { |
272 | dev_err(kfd_device, | |
273 | "Error initializing interrupts for device (%x:%x)\n", | |
274 | kfd->pdev->vendor, kfd->pdev->device); | |
275 | goto kfd_interrupt_error; | |
276 | } | |
277 | ||
b17f068a OG |
278 | if (!device_iommu_pasid_init(kfd)) { |
279 | dev_err(kfd_device, | |
280 | "Error initializing iommuv2 for device (%x:%x)\n", | |
281 | kfd->pdev->vendor, kfd->pdev->device); | |
282 | goto device_iommu_pasid_error; | |
283 | } | |
284 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, | |
285 | iommu_pasid_shutdown_callback); | |
59d3e8be | 286 | amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); |
5b5c4e40 | 287 | |
64c7f8cf BG |
288 | kfd->dqm = device_queue_manager_init(kfd); |
289 | if (!kfd->dqm) { | |
290 | dev_err(kfd_device, | |
291 | "Error initializing queue manager for device (%x:%x)\n", | |
292 | kfd->pdev->vendor, kfd->pdev->device); | |
293 | goto device_queue_manager_error; | |
294 | } | |
295 | ||
45c9a5e4 | 296 | if (kfd->dqm->ops.start(kfd->dqm) != 0) { |
64c7f8cf BG |
297 | dev_err(kfd_device, |
298 | "Error starting queuen manager for device (%x:%x)\n", | |
299 | kfd->pdev->vendor, kfd->pdev->device); | |
300 | goto dqm_start_error; | |
301 | } | |
302 | ||
fbeb661b YS |
303 | kfd->dbgmgr = NULL; |
304 | ||
4a488a7a OG |
305 | kfd->init_complete = true; |
306 | dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, | |
307 | kfd->pdev->device); | |
308 | ||
64c7f8cf BG |
309 | pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", |
310 | sched_policy); | |
311 | ||
19f6d2a6 OG |
312 | goto out; |
313 | ||
64c7f8cf BG |
314 | dqm_start_error: |
315 | device_queue_manager_uninit(kfd->dqm); | |
316 | device_queue_manager_error: | |
317 | amd_iommu_free_device(kfd->pdev); | |
b17f068a | 318 | device_iommu_pasid_error: |
2249d558 AL |
319 | kfd_interrupt_exit(kfd); |
320 | kfd_interrupt_error: | |
b17f068a | 321 | kfd_topology_remove_device(kfd); |
19f6d2a6 | 322 | kfd_topology_add_device_error: |
73a1da0b OG |
323 | kfd_gtt_sa_fini(kfd); |
324 | kfd_gtt_sa_init_error: | |
cea405b1 | 325 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); |
19f6d2a6 OG |
326 | dev_err(kfd_device, |
327 | "device (%x:%x) NOT added due to errors\n", | |
328 | kfd->pdev->vendor, kfd->pdev->device); | |
329 | out: | |
330 | return kfd->init_complete; | |
4a488a7a OG |
331 | } |
332 | ||
333 | void kgd2kfd_device_exit(struct kfd_dev *kfd) | |
334 | { | |
b17f068a | 335 | if (kfd->init_complete) { |
64c7f8cf | 336 | device_queue_manager_uninit(kfd->dqm); |
b17f068a | 337 | amd_iommu_free_device(kfd->pdev); |
2249d558 | 338 | kfd_interrupt_exit(kfd); |
b17f068a | 339 | kfd_topology_remove_device(kfd); |
73a1da0b | 340 | kfd_gtt_sa_fini(kfd); |
cea405b1 | 341 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); |
b17f068a | 342 | } |
5b5c4e40 | 343 | |
4a488a7a OG |
344 | kfree(kfd); |
345 | } | |
346 | ||
347 | void kgd2kfd_suspend(struct kfd_dev *kfd) | |
348 | { | |
349 | BUG_ON(kfd == NULL); | |
b17f068a | 350 | |
64c7f8cf | 351 | if (kfd->init_complete) { |
45c9a5e4 | 352 | kfd->dqm->ops.stop(kfd->dqm); |
abc9d3e3 | 353 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); |
59d3e8be | 354 | amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL); |
b17f068a | 355 | amd_iommu_free_device(kfd->pdev); |
64c7f8cf | 356 | } |
4a488a7a OG |
357 | } |
358 | ||
359 | int kgd2kfd_resume(struct kfd_dev *kfd) | |
360 | { | |
b17f068a OG |
361 | unsigned int pasid_limit; |
362 | int err; | |
363 | ||
4a488a7a OG |
364 | BUG_ON(kfd == NULL); |
365 | ||
b17f068a OG |
366 | pasid_limit = kfd_get_pasid_limit(); |
367 | ||
368 | if (kfd->init_complete) { | |
369 | err = amd_iommu_init_device(kfd->pdev, pasid_limit); | |
370 | if (err < 0) | |
371 | return -ENXIO; | |
372 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, | |
373 | iommu_pasid_shutdown_callback); | |
59d3e8be | 374 | amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb); |
45c9a5e4 | 375 | kfd->dqm->ops.start(kfd->dqm); |
b17f068a OG |
376 | } |
377 | ||
4a488a7a OG |
378 | return 0; |
379 | } | |
380 | ||
b3f5e6b4 AL |
381 | /* This is called directly from KGD at ISR. */ |
382 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |
4a488a7a | 383 | { |
2249d558 AL |
384 | if (!kfd->init_complete) |
385 | return; | |
386 | ||
387 | spin_lock(&kfd->interrupt_lock); | |
388 | ||
389 | if (kfd->interrupts_active | |
390 | && interrupt_is_wanted(kfd, ih_ring_entry) | |
391 | && enqueue_ih_ring_entry(kfd, ih_ring_entry)) | |
392 | schedule_work(&kfd->interrupt_work); | |
393 | ||
394 | spin_unlock(&kfd->interrupt_lock); | |
4a488a7a | 395 | } |
6e81090b OG |
396 | |
397 | static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, | |
398 | unsigned int chunk_size) | |
399 | { | |
400 | unsigned int num_of_bits; | |
401 | ||
402 | BUG_ON(!kfd); | |
403 | BUG_ON(!kfd->gtt_mem); | |
404 | BUG_ON(buf_size < chunk_size); | |
405 | BUG_ON(buf_size == 0); | |
406 | BUG_ON(chunk_size == 0); | |
407 | ||
408 | kfd->gtt_sa_chunk_size = chunk_size; | |
409 | kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; | |
410 | ||
411 | num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE; | |
412 | BUG_ON(num_of_bits == 0); | |
413 | ||
414 | kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL); | |
415 | ||
416 | if (!kfd->gtt_sa_bitmap) | |
417 | return -ENOMEM; | |
418 | ||
419 | pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", | |
420 | kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); | |
421 | ||
422 | mutex_init(&kfd->gtt_sa_lock); | |
423 | ||
424 | return 0; | |
425 | ||
426 | } | |
427 | ||
428 | static void kfd_gtt_sa_fini(struct kfd_dev *kfd) | |
429 | { | |
430 | mutex_destroy(&kfd->gtt_sa_lock); | |
431 | kfree(kfd->gtt_sa_bitmap); | |
432 | } | |
433 | ||
434 | static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, | |
435 | unsigned int bit_num, | |
436 | unsigned int chunk_size) | |
437 | { | |
438 | return start_addr + bit_num * chunk_size; | |
439 | } | |
440 | ||
441 | static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, | |
442 | unsigned int bit_num, | |
443 | unsigned int chunk_size) | |
444 | { | |
445 | return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); | |
446 | } | |
447 | ||
448 | int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, | |
449 | struct kfd_mem_obj **mem_obj) | |
450 | { | |
451 | unsigned int found, start_search, cur_size; | |
452 | ||
453 | BUG_ON(!kfd); | |
454 | ||
455 | if (size == 0) | |
456 | return -EINVAL; | |
457 | ||
458 | if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) | |
459 | return -ENOMEM; | |
460 | ||
461 | *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); | |
462 | if ((*mem_obj) == NULL) | |
463 | return -ENOMEM; | |
464 | ||
465 | pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); | |
466 | ||
467 | start_search = 0; | |
468 | ||
469 | mutex_lock(&kfd->gtt_sa_lock); | |
470 | ||
471 | kfd_gtt_restart_search: | |
472 | /* Find the first chunk that is free */ | |
473 | found = find_next_zero_bit(kfd->gtt_sa_bitmap, | |
474 | kfd->gtt_sa_num_of_chunks, | |
475 | start_search); | |
476 | ||
477 | pr_debug("kfd: found = %d\n", found); | |
478 | ||
479 | /* If there wasn't any free chunk, bail out */ | |
480 | if (found == kfd->gtt_sa_num_of_chunks) | |
481 | goto kfd_gtt_no_free_chunk; | |
482 | ||
483 | /* Update fields of mem_obj */ | |
484 | (*mem_obj)->range_start = found; | |
485 | (*mem_obj)->range_end = found; | |
486 | (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( | |
487 | kfd->gtt_start_gpu_addr, | |
488 | found, | |
489 | kfd->gtt_sa_chunk_size); | |
490 | (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( | |
491 | kfd->gtt_start_cpu_ptr, | |
492 | found, | |
493 | kfd->gtt_sa_chunk_size); | |
494 | ||
495 | pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", | |
496 | (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); | |
497 | ||
498 | /* If we need only one chunk, mark it as allocated and get out */ | |
499 | if (size <= kfd->gtt_sa_chunk_size) { | |
500 | pr_debug("kfd: single bit\n"); | |
501 | set_bit(found, kfd->gtt_sa_bitmap); | |
502 | goto kfd_gtt_out; | |
503 | } | |
504 | ||
505 | /* Otherwise, try to see if we have enough contiguous chunks */ | |
506 | cur_size = size - kfd->gtt_sa_chunk_size; | |
507 | do { | |
508 | (*mem_obj)->range_end = | |
509 | find_next_zero_bit(kfd->gtt_sa_bitmap, | |
510 | kfd->gtt_sa_num_of_chunks, ++found); | |
511 | /* | |
512 | * If next free chunk is not contiguous than we need to | |
513 | * restart our search from the last free chunk we found (which | |
514 | * wasn't contiguous to the previous ones | |
515 | */ | |
516 | if ((*mem_obj)->range_end != found) { | |
517 | start_search = found; | |
518 | goto kfd_gtt_restart_search; | |
519 | } | |
520 | ||
521 | /* | |
522 | * If we reached end of buffer, bail out with error | |
523 | */ | |
524 | if (found == kfd->gtt_sa_num_of_chunks) | |
525 | goto kfd_gtt_no_free_chunk; | |
526 | ||
527 | /* Check if we don't need another chunk */ | |
528 | if (cur_size <= kfd->gtt_sa_chunk_size) | |
529 | cur_size = 0; | |
530 | else | |
531 | cur_size -= kfd->gtt_sa_chunk_size; | |
532 | ||
533 | } while (cur_size > 0); | |
534 | ||
535 | pr_debug("kfd: range_start = %d, range_end = %d\n", | |
536 | (*mem_obj)->range_start, (*mem_obj)->range_end); | |
537 | ||
538 | /* Mark the chunks as allocated */ | |
539 | for (found = (*mem_obj)->range_start; | |
540 | found <= (*mem_obj)->range_end; | |
541 | found++) | |
542 | set_bit(found, kfd->gtt_sa_bitmap); | |
543 | ||
544 | kfd_gtt_out: | |
545 | mutex_unlock(&kfd->gtt_sa_lock); | |
546 | return 0; | |
547 | ||
548 | kfd_gtt_no_free_chunk: | |
549 | pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); | |
550 | mutex_unlock(&kfd->gtt_sa_lock); | |
551 | kfree(mem_obj); | |
552 | return -ENOMEM; | |
553 | } | |
554 | ||
555 | int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) | |
556 | { | |
557 | unsigned int bit; | |
558 | ||
559 | BUG_ON(!kfd); | |
9216ed29 OG |
560 | |
561 | /* Act like kfree when trying to free a NULL object */ | |
562 | if (!mem_obj) | |
563 | return 0; | |
6e81090b OG |
564 | |
565 | pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", | |
566 | mem_obj, mem_obj->range_start, mem_obj->range_end); | |
567 | ||
568 | mutex_lock(&kfd->gtt_sa_lock); | |
569 | ||
570 | /* Mark the chunks as free */ | |
571 | for (bit = mem_obj->range_start; | |
572 | bit <= mem_obj->range_end; | |
573 | bit++) | |
574 | clear_bit(bit, kfd->gtt_sa_bitmap); | |
575 | ||
576 | mutex_unlock(&kfd->gtt_sa_lock); | |
577 | ||
578 | kfree(mem_obj); | |
579 | return 0; | |
580 | } |