Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/drm_crtc_helper.h> | |
30 | #include <drm/amdgpu_drm.h> | |
31 | #include "amdgpu.h" | |
32 | #include "amdgpu_ih.h" | |
33 | #include "atom.h" | |
34 | #include "amdgpu_connectors.h" | |
35 | ||
36 | #include <linux/pm_runtime.h> | |
37 | ||
38 | #define AMDGPU_WAIT_IDLE_TIMEOUT 200 | |
39 | ||
40 | /* | |
41 | * Handle hotplug events outside the interrupt handler proper. | |
42 | */ | |
43 | /** | |
44 | * amdgpu_hotplug_work_func - display hotplug work handler | |
45 | * | |
46 | * @work: work struct | |
47 | * | |
48 | * This is the hot plug event work handler (all asics). | |
49 | * The work gets scheduled from the irq handler if there | |
50 | * was a hot plug interrupt. It walks the connector table | |
51 | * and calls the hotplug handler for each one, then sends | |
52 | * a drm hotplug event to alert userspace. | |
53 | */ | |
54 | static void amdgpu_hotplug_work_func(struct work_struct *work) | |
55 | { | |
56 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | |
57 | hotplug_work); | |
58 | struct drm_device *dev = adev->ddev; | |
59 | struct drm_mode_config *mode_config = &dev->mode_config; | |
60 | struct drm_connector *connector; | |
61 | ||
9e14c65c | 62 | mutex_lock(&mode_config->mutex); |
d38ceaf9 AD |
63 | if (mode_config->num_connector) { |
64 | list_for_each_entry(connector, &mode_config->connector_list, head) | |
65 | amdgpu_connector_hotplug(connector); | |
66 | } | |
9e14c65c | 67 | mutex_unlock(&mode_config->mutex); |
d38ceaf9 AD |
68 | /* Just fire off a uevent and let userspace tell us what to do */ |
69 | drm_helper_hpd_irq_event(dev); | |
70 | } | |
71 | ||
72 | /** | |
73 | * amdgpu_irq_reset_work_func - execute gpu reset | |
74 | * | |
75 | * @work: work struct | |
76 | * | |
77 | * Execute scheduled gpu reset (cayman+). | |
78 | * This function is called when the irq handler | |
79 | * thinks we need a gpu reset. | |
80 | */ | |
81 | static void amdgpu_irq_reset_work_func(struct work_struct *work) | |
82 | { | |
83 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | |
84 | reset_work); | |
85 | ||
86 | amdgpu_gpu_reset(adev); | |
87 | } | |
88 | ||
89 | /* Disable *all* interrupts */ | |
90 | static void amdgpu_irq_disable_all(struct amdgpu_device *adev) | |
91 | { | |
92 | unsigned long irqflags; | |
93 | unsigned i, j; | |
94 | int r; | |
95 | ||
96 | spin_lock_irqsave(&adev->irq.lock, irqflags); | |
97 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | |
98 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | |
99 | ||
100 | if (!src || !src->funcs->set || !src->num_types) | |
101 | continue; | |
102 | ||
103 | for (j = 0; j < src->num_types; ++j) { | |
104 | atomic_set(&src->enabled_types[j], 0); | |
105 | r = src->funcs->set(adev, src, j, | |
106 | AMDGPU_IRQ_STATE_DISABLE); | |
107 | if (r) | |
108 | DRM_ERROR("error disabling interrupt (%d)\n", | |
109 | r); | |
110 | } | |
111 | } | |
112 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | |
113 | } | |
114 | ||
115 | /** | |
116 | * amdgpu_irq_preinstall - drm irq preinstall callback | |
117 | * | |
118 | * @dev: drm dev pointer | |
119 | * | |
120 | * Gets the hw ready to enable irqs (all asics). | |
121 | * This function disables all interrupt sources on the GPU. | |
122 | */ | |
123 | void amdgpu_irq_preinstall(struct drm_device *dev) | |
124 | { | |
125 | struct amdgpu_device *adev = dev->dev_private; | |
126 | ||
127 | /* Disable *all* interrupts */ | |
128 | amdgpu_irq_disable_all(adev); | |
129 | /* Clear bits */ | |
130 | amdgpu_ih_process(adev); | |
131 | } | |
132 | ||
133 | /** | |
134 | * amdgpu_irq_postinstall - drm irq preinstall callback | |
135 | * | |
136 | * @dev: drm dev pointer | |
137 | * | |
138 | * Handles stuff to be done after enabling irqs (all asics). | |
139 | * Returns 0 on success. | |
140 | */ | |
141 | int amdgpu_irq_postinstall(struct drm_device *dev) | |
142 | { | |
5a6adfa2 | 143 | dev->max_vblank_count = 0x00ffffff; |
d38ceaf9 AD |
144 | return 0; |
145 | } | |
146 | ||
147 | /** | |
148 | * amdgpu_irq_uninstall - drm irq uninstall callback | |
149 | * | |
150 | * @dev: drm dev pointer | |
151 | * | |
152 | * This function disables all interrupt sources on the GPU (all asics). | |
153 | */ | |
154 | void amdgpu_irq_uninstall(struct drm_device *dev) | |
155 | { | |
156 | struct amdgpu_device *adev = dev->dev_private; | |
157 | ||
158 | if (adev == NULL) { | |
159 | return; | |
160 | } | |
161 | amdgpu_irq_disable_all(adev); | |
162 | } | |
163 | ||
164 | /** | |
165 | * amdgpu_irq_handler - irq handler | |
166 | * | |
167 | * @int irq, void *arg: args | |
168 | * | |
169 | * This is the irq handler for the amdgpu driver (all asics). | |
170 | */ | |
171 | irqreturn_t amdgpu_irq_handler(int irq, void *arg) | |
172 | { | |
173 | struct drm_device *dev = (struct drm_device *) arg; | |
174 | struct amdgpu_device *adev = dev->dev_private; | |
175 | irqreturn_t ret; | |
176 | ||
177 | ret = amdgpu_ih_process(adev); | |
178 | if (ret == IRQ_HANDLED) | |
179 | pm_runtime_mark_last_busy(dev->dev); | |
180 | return ret; | |
181 | } | |
182 | ||
183 | /** | |
184 | * amdgpu_msi_ok - asic specific msi checks | |
185 | * | |
186 | * @adev: amdgpu device pointer | |
187 | * | |
188 | * Handles asic specific MSI checks to determine if | |
189 | * MSIs should be enabled on a particular chip (all asics). | |
190 | * Returns true if MSIs should be enabled, false if MSIs | |
191 | * should not be enabled. | |
192 | */ | |
193 | static bool amdgpu_msi_ok(struct amdgpu_device *adev) | |
194 | { | |
195 | /* force MSI on */ | |
196 | if (amdgpu_msi == 1) | |
197 | return true; | |
198 | else if (amdgpu_msi == 0) | |
199 | return false; | |
200 | ||
201 | return true; | |
202 | } | |
203 | ||
204 | /** | |
205 | * amdgpu_irq_init - init driver interrupt info | |
206 | * | |
207 | * @adev: amdgpu device pointer | |
208 | * | |
209 | * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). | |
210 | * Returns 0 for success, error for failure. | |
211 | */ | |
212 | int amdgpu_irq_init(struct amdgpu_device *adev) | |
213 | { | |
214 | int r = 0; | |
215 | ||
216 | spin_lock_init(&adev->irq.lock); | |
217 | r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); | |
218 | if (r) { | |
219 | return r; | |
220 | } | |
221 | /* enable msi */ | |
222 | adev->irq.msi_enabled = false; | |
223 | ||
224 | if (amdgpu_msi_ok(adev)) { | |
225 | int ret = pci_enable_msi(adev->pdev); | |
226 | if (!ret) { | |
227 | adev->irq.msi_enabled = true; | |
228 | dev_info(adev->dev, "amdgpu: using MSI.\n"); | |
229 | } | |
230 | } | |
231 | ||
232 | INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); | |
233 | INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); | |
234 | ||
235 | adev->irq.installed = true; | |
236 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | |
237 | if (r) { | |
238 | adev->irq.installed = false; | |
239 | flush_work(&adev->hotplug_work); | |
240 | return r; | |
241 | } | |
242 | ||
243 | DRM_INFO("amdgpu: irq initialized.\n"); | |
244 | return 0; | |
245 | } | |
246 | ||
247 | /** | |
248 | * amdgpu_irq_fini - tear down driver interrupt info | |
249 | * | |
250 | * @adev: amdgpu device pointer | |
251 | * | |
252 | * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). | |
253 | */ | |
254 | void amdgpu_irq_fini(struct amdgpu_device *adev) | |
255 | { | |
256 | unsigned i; | |
257 | ||
258 | drm_vblank_cleanup(adev->ddev); | |
259 | if (adev->irq.installed) { | |
260 | drm_irq_uninstall(adev->ddev); | |
261 | adev->irq.installed = false; | |
262 | if (adev->irq.msi_enabled) | |
263 | pci_disable_msi(adev->pdev); | |
264 | flush_work(&adev->hotplug_work); | |
265 | } | |
266 | ||
267 | for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { | |
268 | struct amdgpu_irq_src *src = adev->irq.sources[i]; | |
269 | ||
270 | if (!src) | |
271 | continue; | |
272 | ||
273 | kfree(src->enabled_types); | |
274 | src->enabled_types = NULL; | |
0cf3be21 AD |
275 | if (src->data) { |
276 | kfree(src->data); | |
277 | kfree(src); | |
278 | adev->irq.sources[i] = NULL; | |
279 | } | |
d38ceaf9 AD |
280 | } |
281 | } | |
282 | ||
283 | /** | |
284 | * amdgpu_irq_add_id - register irq source | |
285 | * | |
286 | * @adev: amdgpu device pointer | |
287 | * @src_id: source id for this source | |
288 | * @source: irq source | |
289 | * | |
290 | */ | |
291 | int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, | |
292 | struct amdgpu_irq_src *source) | |
293 | { | |
294 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) | |
295 | return -EINVAL; | |
296 | ||
297 | if (adev->irq.sources[src_id] != NULL) | |
298 | return -EINVAL; | |
299 | ||
300 | if (!source->funcs) | |
301 | return -EINVAL; | |
302 | ||
303 | if (source->num_types && !source->enabled_types) { | |
304 | atomic_t *types; | |
305 | ||
306 | types = kcalloc(source->num_types, sizeof(atomic_t), | |
307 | GFP_KERNEL); | |
308 | if (!types) | |
309 | return -ENOMEM; | |
310 | ||
311 | source->enabled_types = types; | |
312 | } | |
313 | ||
314 | adev->irq.sources[src_id] = source; | |
5f232365 | 315 | |
d38ceaf9 AD |
316 | return 0; |
317 | } | |
318 | ||
319 | /** | |
320 | * amdgpu_irq_dispatch - dispatch irq to IP blocks | |
321 | * | |
322 | * @adev: amdgpu device pointer | |
323 | * @entry: interrupt vector | |
324 | * | |
325 | * Dispatches the irq to the different IP blocks | |
326 | */ | |
327 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, | |
328 | struct amdgpu_iv_entry *entry) | |
329 | { | |
330 | unsigned src_id = entry->src_id; | |
331 | struct amdgpu_irq_src *src; | |
332 | int r; | |
333 | ||
334 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { | |
335 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); | |
336 | return; | |
337 | } | |
338 | ||
5f232365 AD |
339 | if (adev->irq.virq[src_id]) { |
340 | generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); | |
341 | } else { | |
342 | src = adev->irq.sources[src_id]; | |
343 | if (!src) { | |
344 | DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); | |
345 | return; | |
346 | } | |
d38ceaf9 | 347 | |
5f232365 AD |
348 | r = src->funcs->process(adev, src, entry); |
349 | if (r) | |
350 | DRM_ERROR("error processing interrupt (%d)\n", r); | |
351 | } | |
d38ceaf9 AD |
352 | } |
353 | ||
354 | /** | |
355 | * amdgpu_irq_update - update hw interrupt state | |
356 | * | |
357 | * @adev: amdgpu device pointer | |
358 | * @src: interrupt src you want to enable | |
359 | * @type: type of interrupt you want to update | |
360 | * | |
361 | * Updates the interrupt state for a specific src (all asics). | |
362 | */ | |
363 | int amdgpu_irq_update(struct amdgpu_device *adev, | |
364 | struct amdgpu_irq_src *src, unsigned type) | |
365 | { | |
366 | unsigned long irqflags; | |
367 | enum amdgpu_interrupt_state state; | |
368 | int r; | |
369 | ||
370 | spin_lock_irqsave(&adev->irq.lock, irqflags); | |
371 | ||
372 | /* we need to determine after taking the lock, otherwise | |
373 | we might disable just enabled interrupts again */ | |
374 | if (amdgpu_irq_enabled(adev, src, type)) | |
375 | state = AMDGPU_IRQ_STATE_ENABLE; | |
376 | else | |
377 | state = AMDGPU_IRQ_STATE_DISABLE; | |
378 | ||
379 | r = src->funcs->set(adev, src, type, state); | |
380 | spin_unlock_irqrestore(&adev->irq.lock, irqflags); | |
381 | return r; | |
382 | } | |
383 | ||
384 | /** | |
385 | * amdgpu_irq_get - enable interrupt | |
386 | * | |
387 | * @adev: amdgpu device pointer | |
388 | * @src: interrupt src you want to enable | |
389 | * @type: type of interrupt you want to enable | |
390 | * | |
391 | * Enables the interrupt type for a specific src (all asics). | |
392 | */ | |
393 | int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | |
394 | unsigned type) | |
395 | { | |
396 | if (!adev->ddev->irq_enabled) | |
397 | return -ENOENT; | |
398 | ||
399 | if (type >= src->num_types) | |
400 | return -EINVAL; | |
401 | ||
402 | if (!src->enabled_types || !src->funcs->set) | |
403 | return -EINVAL; | |
404 | ||
405 | if (atomic_inc_return(&src->enabled_types[type]) == 1) | |
406 | return amdgpu_irq_update(adev, src, type); | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | bool amdgpu_irq_get_delayed(struct amdgpu_device *adev, | |
412 | struct amdgpu_irq_src *src, | |
413 | unsigned type) | |
414 | { | |
415 | if ((type >= src->num_types) || !src->enabled_types) | |
416 | return false; | |
417 | return atomic_inc_return(&src->enabled_types[type]) == 1; | |
418 | } | |
419 | ||
420 | /** | |
421 | * amdgpu_irq_put - disable interrupt | |
422 | * | |
423 | * @adev: amdgpu device pointer | |
424 | * @src: interrupt src you want to disable | |
425 | * @type: type of interrupt you want to disable | |
426 | * | |
427 | * Disables the interrupt type for a specific src (all asics). | |
428 | */ | |
429 | int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | |
430 | unsigned type) | |
431 | { | |
432 | if (!adev->ddev->irq_enabled) | |
433 | return -ENOENT; | |
434 | ||
435 | if (type >= src->num_types) | |
436 | return -EINVAL; | |
437 | ||
438 | if (!src->enabled_types || !src->funcs->set) | |
439 | return -EINVAL; | |
440 | ||
441 | if (atomic_dec_and_test(&src->enabled_types[type])) | |
442 | return amdgpu_irq_update(adev, src, type); | |
443 | ||
444 | return 0; | |
445 | } | |
446 | ||
447 | /** | |
448 | * amdgpu_irq_enabled - test if irq is enabled or not | |
449 | * | |
450 | * @adev: amdgpu device pointer | |
451 | * @idx: interrupt src you want to test | |
452 | * | |
453 | * Tests if the given interrupt source is enabled or not | |
454 | */ | |
455 | bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, | |
456 | unsigned type) | |
457 | { | |
458 | if (!adev->ddev->irq_enabled) | |
459 | return false; | |
460 | ||
461 | if (type >= src->num_types) | |
462 | return false; | |
463 | ||
464 | if (!src->enabled_types || !src->funcs->set) | |
465 | return false; | |
466 | ||
467 | return !!atomic_read(&src->enabled_types[type]); | |
468 | } | |
5f232365 AD |
469 | |
470 | /* gen irq */ | |
471 | static void amdgpu_irq_mask(struct irq_data *irqd) | |
472 | { | |
473 | /* XXX */ | |
474 | } | |
475 | ||
476 | static void amdgpu_irq_unmask(struct irq_data *irqd) | |
477 | { | |
478 | /* XXX */ | |
479 | } | |
480 | ||
481 | static struct irq_chip amdgpu_irq_chip = { | |
482 | .name = "amdgpu-ih", | |
483 | .irq_mask = amdgpu_irq_mask, | |
484 | .irq_unmask = amdgpu_irq_unmask, | |
485 | }; | |
486 | ||
487 | static int amdgpu_irqdomain_map(struct irq_domain *d, | |
488 | unsigned int irq, irq_hw_number_t hwirq) | |
489 | { | |
490 | if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) | |
491 | return -EPERM; | |
492 | ||
493 | irq_set_chip_and_handler(irq, | |
494 | &amdgpu_irq_chip, handle_simple_irq); | |
495 | return 0; | |
496 | } | |
497 | ||
498 | static struct irq_domain_ops amdgpu_hw_irqdomain_ops = { | |
499 | .map = amdgpu_irqdomain_map, | |
500 | }; | |
501 | ||
502 | /** | |
503 | * amdgpu_irq_add_domain - create a linear irq domain | |
504 | * | |
505 | * @adev: amdgpu device pointer | |
506 | * | |
507 | * Create an irq domain for GPU interrupt sources | |
508 | * that may be driven by another driver (e.g., ACP). | |
509 | */ | |
510 | int amdgpu_irq_add_domain(struct amdgpu_device *adev) | |
511 | { | |
512 | adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, | |
513 | &amdgpu_hw_irqdomain_ops, adev); | |
514 | if (!adev->irq.domain) { | |
515 | DRM_ERROR("GPU irq add domain failed\n"); | |
516 | return -ENODEV; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | /** | |
523 | * amdgpu_irq_remove_domain - remove the irq domain | |
524 | * | |
525 | * @adev: amdgpu device pointer | |
526 | * | |
527 | * Remove the irq domain for GPU interrupt sources | |
528 | * that may be driven by another driver (e.g., ACP). | |
529 | */ | |
530 | void amdgpu_irq_remove_domain(struct amdgpu_device *adev) | |
531 | { | |
532 | if (adev->irq.domain) { | |
533 | irq_domain_remove(adev->irq.domain); | |
534 | adev->irq.domain = NULL; | |
535 | } | |
536 | } | |
537 | ||
538 | /** | |
539 | * amdgpu_irq_create_mapping - create a mapping between a domain irq and a | |
540 | * Linux irq | |
541 | * | |
542 | * @adev: amdgpu device pointer | |
543 | * @src_id: IH source id | |
544 | * | |
545 | * Create a mapping between a domain irq (GPU IH src id) and a Linux irq | |
546 | * Use this for components that generate a GPU interrupt, but are driven | |
547 | * by a different driver (e.g., ACP). | |
548 | * Returns the Linux irq. | |
549 | */ | |
550 | unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) | |
551 | { | |
552 | adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); | |
553 | ||
554 | return adev->irq.virq[src_id]; | |
555 | } |