Commit | Line | Data |
---|---|---|
4a488a7a OG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include <linux/amd-iommu.h> | |
24 | #include <linux/bsearch.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/slab.h> | |
27 | #include "kfd_priv.h" | |
64c7f8cf | 28 | #include "kfd_device_queue_manager.h" |
e18e794e | 29 | #include "kfd_pm4_headers.h" |
4a488a7a | 30 | |
19f6d2a6 OG |
31 | #define MQD_SIZE_ALIGNED 768 |
32 | ||
4a488a7a OG |
33 | static const struct kfd_device_info kaveri_device_info = { |
34 | .max_pasid_bits = 16, | |
b3f5e6b4 | 35 | .ih_ring_entry_size = 4 * sizeof(uint32_t), |
f7c826ad | 36 | .num_of_watch_points = 4, |
19f6d2a6 | 37 | .mqd_size_aligned = MQD_SIZE_ALIGNED |
4a488a7a OG |
38 | }; |
39 | ||
40 | struct kfd_deviceid { | |
41 | unsigned short did; | |
42 | const struct kfd_device_info *device_info; | |
43 | }; | |
44 | ||
45 | /* Please keep this sorted by increasing device id. */ | |
46 | static const struct kfd_deviceid supported_devices[] = { | |
47 | { 0x1304, &kaveri_device_info }, /* Kaveri */ | |
48 | { 0x1305, &kaveri_device_info }, /* Kaveri */ | |
49 | { 0x1306, &kaveri_device_info }, /* Kaveri */ | |
50 | { 0x1307, &kaveri_device_info }, /* Kaveri */ | |
51 | { 0x1309, &kaveri_device_info }, /* Kaveri */ | |
52 | { 0x130A, &kaveri_device_info }, /* Kaveri */ | |
53 | { 0x130B, &kaveri_device_info }, /* Kaveri */ | |
54 | { 0x130C, &kaveri_device_info }, /* Kaveri */ | |
55 | { 0x130D, &kaveri_device_info }, /* Kaveri */ | |
56 | { 0x130E, &kaveri_device_info }, /* Kaveri */ | |
57 | { 0x130F, &kaveri_device_info }, /* Kaveri */ | |
58 | { 0x1310, &kaveri_device_info }, /* Kaveri */ | |
59 | { 0x1311, &kaveri_device_info }, /* Kaveri */ | |
60 | { 0x1312, &kaveri_device_info }, /* Kaveri */ | |
61 | { 0x1313, &kaveri_device_info }, /* Kaveri */ | |
62 | { 0x1315, &kaveri_device_info }, /* Kaveri */ | |
63 | { 0x1316, &kaveri_device_info }, /* Kaveri */ | |
64 | { 0x1317, &kaveri_device_info }, /* Kaveri */ | |
65 | { 0x1318, &kaveri_device_info }, /* Kaveri */ | |
66 | { 0x131B, &kaveri_device_info }, /* Kaveri */ | |
67 | { 0x131C, &kaveri_device_info }, /* Kaveri */ | |
68 | { 0x131D, &kaveri_device_info }, /* Kaveri */ | |
69 | }; | |
70 | ||
6e81090b OG |
71 | static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, |
72 | unsigned int chunk_size); | |
73 | static void kfd_gtt_sa_fini(struct kfd_dev *kfd); | |
74 | ||
4a488a7a OG |
75 | static const struct kfd_device_info *lookup_device_info(unsigned short did) |
76 | { | |
77 | size_t i; | |
78 | ||
79 | for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { | |
80 | if (supported_devices[i].did == did) { | |
81 | BUG_ON(supported_devices[i].device_info == NULL); | |
82 | return supported_devices[i].device_info; | |
83 | } | |
84 | } | |
85 | ||
86 | return NULL; | |
87 | } | |
88 | ||
89 | struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev) | |
90 | { | |
91 | struct kfd_dev *kfd; | |
92 | ||
93 | const struct kfd_device_info *device_info = | |
94 | lookup_device_info(pdev->device); | |
95 | ||
96 | if (!device_info) | |
97 | return NULL; | |
98 | ||
99 | kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); | |
100 | if (!kfd) | |
101 | return NULL; | |
102 | ||
103 | kfd->kgd = kgd; | |
104 | kfd->device_info = device_info; | |
105 | kfd->pdev = pdev; | |
19f6d2a6 | 106 | kfd->init_complete = false; |
4a488a7a OG |
107 | |
108 | return kfd; | |
109 | } | |
110 | ||
b17f068a OG |
111 | static bool device_iommu_pasid_init(struct kfd_dev *kfd) |
112 | { | |
113 | const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP | | |
114 | AMD_IOMMU_DEVICE_FLAG_PRI_SUP | | |
115 | AMD_IOMMU_DEVICE_FLAG_PASID_SUP; | |
116 | ||
117 | struct amd_iommu_device_info iommu_info; | |
118 | unsigned int pasid_limit; | |
119 | int err; | |
120 | ||
121 | err = amd_iommu_device_info(kfd->pdev, &iommu_info); | |
122 | if (err < 0) { | |
123 | dev_err(kfd_device, | |
124 | "error getting iommu info. is the iommu enabled?\n"); | |
125 | return false; | |
126 | } | |
127 | ||
128 | if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { | |
129 | dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", | |
130 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, | |
131 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, | |
132 | (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0); | |
133 | return false; | |
134 | } | |
135 | ||
136 | pasid_limit = min_t(unsigned int, | |
137 | (unsigned int)1 << kfd->device_info->max_pasid_bits, | |
138 | iommu_info.max_pasids); | |
139 | /* | |
140 | * last pasid is used for kernel queues doorbells | |
141 | * in the future the last pasid might be used for a kernel thread. | |
142 | */ | |
143 | pasid_limit = min_t(unsigned int, | |
144 | pasid_limit, | |
145 | kfd->doorbell_process_limit - 1); | |
146 | ||
147 | err = amd_iommu_init_device(kfd->pdev, pasid_limit); | |
148 | if (err < 0) { | |
149 | dev_err(kfd_device, "error initializing iommu device\n"); | |
150 | return false; | |
151 | } | |
152 | ||
153 | if (!kfd_set_pasid_limit(pasid_limit)) { | |
154 | dev_err(kfd_device, "error setting pasid limit\n"); | |
155 | amd_iommu_free_device(kfd->pdev); | |
156 | return false; | |
157 | } | |
158 | ||
159 | return true; | |
160 | } | |
161 | ||
162 | static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) | |
163 | { | |
164 | struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); | |
165 | ||
166 | if (dev) | |
167 | kfd_unbind_process_from_device(dev, pasid); | |
168 | } | |
169 | ||
4a488a7a OG |
170 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
171 | const struct kgd2kfd_shared_resources *gpu_resources) | |
172 | { | |
19f6d2a6 OG |
173 | unsigned int size; |
174 | ||
4a488a7a OG |
175 | kfd->shared_resources = *gpu_resources; |
176 | ||
19f6d2a6 OG |
177 | /* calculate max size of mqds needed for queues */ |
178 | size = max_num_of_processes * | |
179 | max_num_of_queues_per_process * | |
180 | kfd->device_info->mqd_size_aligned; | |
181 | ||
e18e794e OG |
182 | /* |
183 | * calculate max size of runlist packet. | |
184 | * There can be only 2 packets at once | |
185 | */ | |
186 | size += (max_num_of_processes * sizeof(struct pm4_map_process) + | |
187 | max_num_of_processes * max_num_of_queues_per_process * | |
188 | sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; | |
189 | ||
190 | /* Add size of HIQ & DIQ */ | |
191 | size += KFD_KERNEL_QUEUE_SIZE * 2; | |
192 | ||
193 | /* add another 512KB for all other allocations on gart (HPD, fences) */ | |
19f6d2a6 OG |
194 | size += 512 * 1024; |
195 | ||
73a1da0b OG |
196 | if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem, |
197 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) { | |
19f6d2a6 | 198 | dev_err(kfd_device, |
e18e794e OG |
199 | "Could not allocate %d bytes for device (%x:%x)\n", |
200 | size, kfd->pdev->vendor, kfd->pdev->device); | |
19f6d2a6 OG |
201 | goto out; |
202 | } | |
203 | ||
e18e794e OG |
204 | dev_info(kfd_device, |
205 | "Allocated %d bytes on gart for device(%x:%x)\n", | |
206 | size, kfd->pdev->vendor, kfd->pdev->device); | |
207 | ||
73a1da0b OG |
208 | /* Initialize GTT sa with 512 byte chunk size */ |
209 | if (kfd_gtt_sa_init(kfd, size, 512) != 0) { | |
210 | dev_err(kfd_device, | |
211 | "Error initializing gtt sub-allocator\n"); | |
212 | goto kfd_gtt_sa_init_error; | |
213 | } | |
214 | ||
19f6d2a6 OG |
215 | kfd_doorbell_init(kfd); |
216 | ||
217 | if (kfd_topology_add_device(kfd) != 0) { | |
218 | dev_err(kfd_device, | |
219 | "Error adding device (%x:%x) to topology\n", | |
220 | kfd->pdev->vendor, kfd->pdev->device); | |
221 | goto kfd_topology_add_device_error; | |
222 | } | |
223 | ||
b3f5e6b4 AL |
224 | if (kfd_interrupt_init(kfd)) { |
225 | dev_err(kfd_device, | |
226 | "Error initializing interrupts for device (%x:%x)\n", | |
227 | kfd->pdev->vendor, kfd->pdev->device); | |
228 | goto kfd_interrupt_error; | |
229 | } | |
230 | ||
b17f068a OG |
231 | if (!device_iommu_pasid_init(kfd)) { |
232 | dev_err(kfd_device, | |
233 | "Error initializing iommuv2 for device (%x:%x)\n", | |
234 | kfd->pdev->vendor, kfd->pdev->device); | |
235 | goto device_iommu_pasid_error; | |
236 | } | |
237 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, | |
238 | iommu_pasid_shutdown_callback); | |
5b5c4e40 | 239 | |
64c7f8cf BG |
240 | kfd->dqm = device_queue_manager_init(kfd); |
241 | if (!kfd->dqm) { | |
242 | dev_err(kfd_device, | |
243 | "Error initializing queue manager for device (%x:%x)\n", | |
244 | kfd->pdev->vendor, kfd->pdev->device); | |
245 | goto device_queue_manager_error; | |
246 | } | |
247 | ||
248 | if (kfd->dqm->start(kfd->dqm) != 0) { | |
249 | dev_err(kfd_device, | |
250 | "Error starting queuen manager for device (%x:%x)\n", | |
251 | kfd->pdev->vendor, kfd->pdev->device); | |
252 | goto dqm_start_error; | |
253 | } | |
254 | ||
4a488a7a OG |
255 | kfd->init_complete = true; |
256 | dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, | |
257 | kfd->pdev->device); | |
258 | ||
64c7f8cf BG |
259 | pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", |
260 | sched_policy); | |
261 | ||
19f6d2a6 OG |
262 | goto out; |
263 | ||
64c7f8cf BG |
264 | dqm_start_error: |
265 | device_queue_manager_uninit(kfd->dqm); | |
266 | device_queue_manager_error: | |
267 | amd_iommu_free_device(kfd->pdev); | |
b17f068a | 268 | device_iommu_pasid_error: |
b3f5e6b4 AL |
269 | kfd_interrupt_exit(kfd); |
270 | kfd_interrupt_error: | |
b17f068a | 271 | kfd_topology_remove_device(kfd); |
19f6d2a6 | 272 | kfd_topology_add_device_error: |
73a1da0b OG |
273 | kfd_gtt_sa_fini(kfd); |
274 | kfd_gtt_sa_init_error: | |
275 | kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); | |
19f6d2a6 OG |
276 | dev_err(kfd_device, |
277 | "device (%x:%x) NOT added due to errors\n", | |
278 | kfd->pdev->vendor, kfd->pdev->device); | |
279 | out: | |
280 | return kfd->init_complete; | |
4a488a7a OG |
281 | } |
282 | ||
283 | void kgd2kfd_device_exit(struct kfd_dev *kfd) | |
284 | { | |
b17f068a | 285 | if (kfd->init_complete) { |
64c7f8cf | 286 | device_queue_manager_uninit(kfd->dqm); |
b17f068a | 287 | amd_iommu_free_device(kfd->pdev); |
b3f5e6b4 | 288 | kfd_interrupt_exit(kfd); |
b17f068a | 289 | kfd_topology_remove_device(kfd); |
73a1da0b OG |
290 | kfd_gtt_sa_fini(kfd); |
291 | kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); | |
b17f068a | 292 | } |
5b5c4e40 | 293 | |
4a488a7a OG |
294 | kfree(kfd); |
295 | } | |
296 | ||
297 | void kgd2kfd_suspend(struct kfd_dev *kfd) | |
298 | { | |
299 | BUG_ON(kfd == NULL); | |
b17f068a | 300 | |
64c7f8cf BG |
301 | if (kfd->init_complete) { |
302 | kfd->dqm->stop(kfd->dqm); | |
abc9d3e3 | 303 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); |
b17f068a | 304 | amd_iommu_free_device(kfd->pdev); |
64c7f8cf | 305 | } |
4a488a7a OG |
306 | } |
307 | ||
308 | int kgd2kfd_resume(struct kfd_dev *kfd) | |
309 | { | |
b17f068a OG |
310 | unsigned int pasid_limit; |
311 | int err; | |
312 | ||
4a488a7a OG |
313 | BUG_ON(kfd == NULL); |
314 | ||
b17f068a OG |
315 | pasid_limit = kfd_get_pasid_limit(); |
316 | ||
317 | if (kfd->init_complete) { | |
318 | err = amd_iommu_init_device(kfd->pdev, pasid_limit); | |
319 | if (err < 0) | |
320 | return -ENXIO; | |
321 | amd_iommu_set_invalidate_ctx_cb(kfd->pdev, | |
322 | iommu_pasid_shutdown_callback); | |
64c7f8cf | 323 | kfd->dqm->start(kfd->dqm); |
b17f068a OG |
324 | } |
325 | ||
4a488a7a OG |
326 | return 0; |
327 | } | |
328 | ||
b3f5e6b4 AL |
329 | /* This is called directly from KGD at ISR. */ |
330 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |
4a488a7a | 331 | { |
b3f5e6b4 AL |
332 | if (kfd->init_complete) { |
333 | spin_lock(&kfd->interrupt_lock); | |
334 | ||
335 | if (kfd->interrupts_active | |
336 | && enqueue_ih_ring_entry(kfd, ih_ring_entry)) | |
337 | schedule_work(&kfd->interrupt_work); | |
338 | ||
339 | spin_unlock(&kfd->interrupt_lock); | |
340 | } | |
4a488a7a | 341 | } |
6e81090b OG |
342 | |
343 | static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, | |
344 | unsigned int chunk_size) | |
345 | { | |
346 | unsigned int num_of_bits; | |
347 | ||
348 | BUG_ON(!kfd); | |
349 | BUG_ON(!kfd->gtt_mem); | |
350 | BUG_ON(buf_size < chunk_size); | |
351 | BUG_ON(buf_size == 0); | |
352 | BUG_ON(chunk_size == 0); | |
353 | ||
354 | kfd->gtt_sa_chunk_size = chunk_size; | |
355 | kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; | |
356 | ||
357 | num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE; | |
358 | BUG_ON(num_of_bits == 0); | |
359 | ||
360 | kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL); | |
361 | ||
362 | if (!kfd->gtt_sa_bitmap) | |
363 | return -ENOMEM; | |
364 | ||
365 | pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", | |
366 | kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); | |
367 | ||
368 | mutex_init(&kfd->gtt_sa_lock); | |
369 | ||
370 | return 0; | |
371 | ||
372 | } | |
373 | ||
374 | static void kfd_gtt_sa_fini(struct kfd_dev *kfd) | |
375 | { | |
376 | mutex_destroy(&kfd->gtt_sa_lock); | |
377 | kfree(kfd->gtt_sa_bitmap); | |
378 | } | |
379 | ||
380 | static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, | |
381 | unsigned int bit_num, | |
382 | unsigned int chunk_size) | |
383 | { | |
384 | return start_addr + bit_num * chunk_size; | |
385 | } | |
386 | ||
387 | static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, | |
388 | unsigned int bit_num, | |
389 | unsigned int chunk_size) | |
390 | { | |
391 | return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); | |
392 | } | |
393 | ||
394 | int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, | |
395 | struct kfd_mem_obj **mem_obj) | |
396 | { | |
397 | unsigned int found, start_search, cur_size; | |
398 | ||
399 | BUG_ON(!kfd); | |
400 | ||
401 | if (size == 0) | |
402 | return -EINVAL; | |
403 | ||
404 | if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) | |
405 | return -ENOMEM; | |
406 | ||
407 | *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); | |
408 | if ((*mem_obj) == NULL) | |
409 | return -ENOMEM; | |
410 | ||
411 | pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); | |
412 | ||
413 | start_search = 0; | |
414 | ||
415 | mutex_lock(&kfd->gtt_sa_lock); | |
416 | ||
417 | kfd_gtt_restart_search: | |
418 | /* Find the first chunk that is free */ | |
419 | found = find_next_zero_bit(kfd->gtt_sa_bitmap, | |
420 | kfd->gtt_sa_num_of_chunks, | |
421 | start_search); | |
422 | ||
423 | pr_debug("kfd: found = %d\n", found); | |
424 | ||
425 | /* If there wasn't any free chunk, bail out */ | |
426 | if (found == kfd->gtt_sa_num_of_chunks) | |
427 | goto kfd_gtt_no_free_chunk; | |
428 | ||
429 | /* Update fields of mem_obj */ | |
430 | (*mem_obj)->range_start = found; | |
431 | (*mem_obj)->range_end = found; | |
432 | (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( | |
433 | kfd->gtt_start_gpu_addr, | |
434 | found, | |
435 | kfd->gtt_sa_chunk_size); | |
436 | (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( | |
437 | kfd->gtt_start_cpu_ptr, | |
438 | found, | |
439 | kfd->gtt_sa_chunk_size); | |
440 | ||
441 | pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", | |
442 | (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); | |
443 | ||
444 | /* If we need only one chunk, mark it as allocated and get out */ | |
445 | if (size <= kfd->gtt_sa_chunk_size) { | |
446 | pr_debug("kfd: single bit\n"); | |
447 | set_bit(found, kfd->gtt_sa_bitmap); | |
448 | goto kfd_gtt_out; | |
449 | } | |
450 | ||
451 | /* Otherwise, try to see if we have enough contiguous chunks */ | |
452 | cur_size = size - kfd->gtt_sa_chunk_size; | |
453 | do { | |
454 | (*mem_obj)->range_end = | |
455 | find_next_zero_bit(kfd->gtt_sa_bitmap, | |
456 | kfd->gtt_sa_num_of_chunks, ++found); | |
457 | /* | |
458 | * If next free chunk is not contiguous than we need to | |
459 | * restart our search from the last free chunk we found (which | |
460 | * wasn't contiguous to the previous ones | |
461 | */ | |
462 | if ((*mem_obj)->range_end != found) { | |
463 | start_search = found; | |
464 | goto kfd_gtt_restart_search; | |
465 | } | |
466 | ||
467 | /* | |
468 | * If we reached end of buffer, bail out with error | |
469 | */ | |
470 | if (found == kfd->gtt_sa_num_of_chunks) | |
471 | goto kfd_gtt_no_free_chunk; | |
472 | ||
473 | /* Check if we don't need another chunk */ | |
474 | if (cur_size <= kfd->gtt_sa_chunk_size) | |
475 | cur_size = 0; | |
476 | else | |
477 | cur_size -= kfd->gtt_sa_chunk_size; | |
478 | ||
479 | } while (cur_size > 0); | |
480 | ||
481 | pr_debug("kfd: range_start = %d, range_end = %d\n", | |
482 | (*mem_obj)->range_start, (*mem_obj)->range_end); | |
483 | ||
484 | /* Mark the chunks as allocated */ | |
485 | for (found = (*mem_obj)->range_start; | |
486 | found <= (*mem_obj)->range_end; | |
487 | found++) | |
488 | set_bit(found, kfd->gtt_sa_bitmap); | |
489 | ||
490 | kfd_gtt_out: | |
491 | mutex_unlock(&kfd->gtt_sa_lock); | |
492 | return 0; | |
493 | ||
494 | kfd_gtt_no_free_chunk: | |
495 | pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); | |
496 | mutex_unlock(&kfd->gtt_sa_lock); | |
497 | kfree(mem_obj); | |
498 | return -ENOMEM; | |
499 | } | |
500 | ||
501 | int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) | |
502 | { | |
503 | unsigned int bit; | |
504 | ||
505 | BUG_ON(!kfd); | |
506 | BUG_ON(!mem_obj); | |
507 | ||
508 | pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", | |
509 | mem_obj, mem_obj->range_start, mem_obj->range_end); | |
510 | ||
511 | mutex_lock(&kfd->gtt_sa_lock); | |
512 | ||
513 | /* Mark the chunks as free */ | |
514 | for (bit = mem_obj->range_start; | |
515 | bit <= mem_obj->range_end; | |
516 | bit++) | |
517 | clear_bit(bit, kfd->gtt_sa_bitmap); | |
518 | ||
519 | mutex_unlock(&kfd->gtt_sa_lock); | |
520 | ||
521 | kfree(mem_obj); | |
522 | return 0; | |
523 | } |