2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 /* QXL cmd/ring handling */
28 #include <linux/delay.h>
30 #include <drm/drm_util.h>
33 #include "qxl_object.h"
35 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
38 struct qxl_ring_header header;
47 wait_queue_head_t *push_event;
51 void qxl_ring_free(struct qxl_ring *ring)
56 void qxl_ring_init_hdr(struct qxl_ring *ring)
58 ring->ring->header.notify_on_prod = ring->n_elements;
62 qxl_ring_create(struct qxl_ring_header *header,
67 wait_queue_head_t *push_event)
69 struct qxl_ring *ring;
71 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
75 ring->ring = (struct ring *)header;
76 ring->element_size = element_size;
77 ring->n_elements = n_elements;
78 ring->prod_notify = prod_notify;
79 ring->push_event = push_event;
81 qxl_ring_init_hdr(ring);
82 spin_lock_init(&ring->lock);
86 static int qxl_check_header(struct qxl_ring *ring)
89 struct qxl_ring_header *header = &(ring->ring->header);
92 spin_lock_irqsave(&ring->lock, flags);
93 ret = header->prod - header->cons < header->num_items;
95 header->notify_on_cons = header->cons + 1;
96 spin_unlock_irqrestore(&ring->lock, flags);
100 int qxl_check_idle(struct qxl_ring *ring)
103 struct qxl_ring_header *header = &(ring->ring->header);
106 spin_lock_irqsave(&ring->lock, flags);
107 ret = header->prod == header->cons;
108 spin_unlock_irqrestore(&ring->lock, flags);
112 int qxl_ring_push(struct qxl_ring *ring,
113 const void *new_elt, bool interruptible)
115 struct qxl_ring_header *header = &(ring->ring->header);
120 spin_lock_irqsave(&ring->lock, flags);
121 if (header->prod - header->cons == header->num_items) {
122 header->notify_on_cons = header->cons + 1;
124 spin_unlock_irqrestore(&ring->lock, flags);
125 if (!drm_can_sleep()) {
126 while (!qxl_check_header(ring))
130 ret = wait_event_interruptible(*ring->push_event,
131 qxl_check_header(ring));
135 wait_event(*ring->push_event,
136 qxl_check_header(ring));
140 spin_lock_irqsave(&ring->lock, flags);
143 idx = header->prod & (ring->n_elements - 1);
144 elt = ring->ring->elements + idx * ring->element_size;
146 memcpy((void *)elt, new_elt, ring->element_size);
152 if (header->prod == header->notify_on_prod)
153 outb(0, ring->prod_notify);
155 spin_unlock_irqrestore(&ring->lock, flags);
159 static bool qxl_ring_pop(struct qxl_ring *ring,
162 volatile struct qxl_ring_header *header = &(ring->ring->header);
163 volatile uint8_t *ring_elt;
167 spin_lock_irqsave(&ring->lock, flags);
168 if (header->cons == header->prod) {
169 header->notify_on_prod = header->cons + 1;
170 spin_unlock_irqrestore(&ring->lock, flags);
174 idx = header->cons & (ring->n_elements - 1);
175 ring_elt = ring->ring->elements + idx * ring->element_size;
177 memcpy(element, (void *)ring_elt, ring->element_size);
181 spin_unlock_irqrestore(&ring->lock, flags);
186 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
187 uint32_t type, bool interruptible)
189 struct qxl_command cmd;
192 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
194 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
198 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
199 uint32_t type, bool interruptible)
201 struct qxl_command cmd;
204 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
206 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
209 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
211 if (!qxl_check_idle(qdev->release_ring)) {
212 schedule_work(&qdev->gc_work);
214 flush_work(&qdev->gc_work);
220 int qxl_garbage_collect(struct qxl_device *qdev)
222 struct qxl_release *release;
223 uint64_t id, next_id;
225 union qxl_release_info *info;
227 while (qxl_ring_pop(qdev->release_ring, &id)) {
228 DRM_DEBUG_DRIVER("popped %lld\n", id);
230 release = qxl_release_from_id_locked(qdev, id);
234 info = qxl_release_map(qdev, release);
235 next_id = info->next;
236 qxl_release_unmap(qdev, release, info);
238 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
241 switch (release->type) {
242 case QXL_RELEASE_DRAWABLE:
243 case QXL_RELEASE_SURFACE_CMD:
244 case QXL_RELEASE_CURSOR_CMD:
247 DRM_ERROR("unexpected release type\n");
252 qxl_release_free(qdev, release);
257 wake_up_all(&qdev->release_event);
258 DRM_DEBUG_DRIVER("%d\n", i);
263 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
264 struct qxl_release *release,
271 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
272 false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
274 DRM_ERROR("failed to allocate VRAM BO\n");
277 ret = qxl_release_list_add(release, bo);
288 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
291 long addr = qdev->io_base + port;
294 mutex_lock(&qdev->async_io_mutex);
295 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 if (qdev->last_sent_io_cmd > irq_num) {
298 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
299 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301 ret = wait_event_timeout(qdev->io_cmd_event,
302 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
303 /* 0 is timeout, just bail the "hw" has gone away */
306 irq_num = atomic_read(&qdev->irq_received_io_cmd);
309 qdev->last_sent_io_cmd = irq_num + 1;
311 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
312 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
314 ret = wait_event_timeout(qdev->io_cmd_event,
315 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
319 mutex_unlock(&qdev->async_io_mutex);
323 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
328 ret = wait_for_io_cmd_user(qdev, val, port, false);
329 if (ret == -ERESTARTSYS)
333 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
334 const struct qxl_rect *area)
337 uint32_t surface_width, surface_height;
340 if (!surf->hw_surf_alloc)
341 DRM_ERROR("got io update area with no hw surface\n");
343 if (surf->is_primary)
346 surface_id = surf->surface_id;
347 surface_width = surf->surf.width;
348 surface_height = surf->surf.height;
350 if (area->left < 0 || area->top < 0 ||
351 area->right > surface_width || area->bottom > surface_height)
354 mutex_lock(&qdev->update_area_mutex);
355 qdev->ram_header->update_area = *area;
356 qdev->ram_header->update_surface = surface_id;
357 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
358 mutex_unlock(&qdev->update_area_mutex);
362 void qxl_io_notify_oom(struct qxl_device *qdev)
364 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
367 void qxl_io_flush_release(struct qxl_device *qdev)
369 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
372 void qxl_io_flush_surfaces(struct qxl_device *qdev)
374 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
377 void qxl_io_destroy_primary(struct qxl_device *qdev)
379 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
380 qdev->primary_bo->is_primary = false;
381 drm_gem_object_put(&qdev->primary_bo->tbo.base);
382 qdev->primary_bo = NULL;
385 void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
387 struct qxl_surface_create *create;
389 if (WARN_ON(qdev->primary_bo))
392 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
393 create = &qdev->ram_header->create_surface;
394 create->format = bo->surf.format;
395 create->width = bo->surf.width;
396 create->height = bo->surf.height;
397 create->stride = bo->surf.stride;
398 create->mem = qxl_bo_physical_address(qdev, bo, 0);
400 DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
402 create->flags = QXL_SURF_FLAG_KEEP_DATA;
403 create->type = QXL_SURF_TYPE_PRIMARY;
405 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
406 qdev->primary_bo = bo;
407 qdev->primary_bo->is_primary = true;
408 drm_gem_object_get(&qdev->primary_bo->tbo.base);
411 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
413 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
414 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
417 void qxl_io_reset(struct qxl_device *qdev)
419 outb(0, qdev->io_base + QXL_IO_RESET);
422 void qxl_io_monitors_config(struct qxl_device *qdev)
424 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
427 int qxl_surface_id_alloc(struct qxl_device *qdev,
434 idr_preload(GFP_ATOMIC);
435 spin_lock(&qdev->surf_id_idr_lock);
436 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
437 spin_unlock(&qdev->surf_id_idr_lock);
443 if (handle >= qdev->rom->n_surfaces) {
445 spin_lock(&qdev->surf_id_idr_lock);
446 idr_remove(&qdev->surf_id_idr, handle);
447 spin_unlock(&qdev->surf_id_idr_lock);
448 qxl_reap_surface_id(qdev, 2);
451 surf->surface_id = handle;
453 spin_lock(&qdev->surf_id_idr_lock);
454 qdev->last_alloced_surf_id = handle;
455 spin_unlock(&qdev->surf_id_idr_lock);
459 void qxl_surface_id_dealloc(struct qxl_device *qdev,
462 spin_lock(&qdev->surf_id_idr_lock);
463 idr_remove(&qdev->surf_id_idr, surface_id);
464 spin_unlock(&qdev->surf_id_idr_lock);
467 int qxl_hw_surface_alloc(struct qxl_device *qdev,
470 struct qxl_surface_cmd *cmd;
471 struct qxl_release *release;
474 if (surf->hw_surf_alloc)
477 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
483 ret = qxl_release_reserve_list(release, true);
485 qxl_release_free(qdev, release);
488 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
489 cmd->type = QXL_SURFACE_CMD_CREATE;
490 cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
491 cmd->u.surface_create.format = surf->surf.format;
492 cmd->u.surface_create.width = surf->surf.width;
493 cmd->u.surface_create.height = surf->surf.height;
494 cmd->u.surface_create.stride = surf->surf.stride;
495 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
496 cmd->surface_id = surf->surface_id;
497 qxl_release_unmap(qdev, release, &cmd->release_info);
499 surf->surf_create = release;
501 /* no need to add a release to the fence for this surface bo,
502 since it is only released when we ask to destroy the surface
503 and it would never signal otherwise */
504 qxl_release_fence_buffer_objects(release);
505 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
507 surf->hw_surf_alloc = true;
508 spin_lock(&qdev->surf_id_idr_lock);
509 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
510 spin_unlock(&qdev->surf_id_idr_lock);
514 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
517 struct qxl_surface_cmd *cmd;
518 struct qxl_release *release;
522 if (!surf->hw_surf_alloc)
525 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
531 surf->surf_create = NULL;
532 /* remove the surface from the idr, but not the surface id yet */
533 spin_lock(&qdev->surf_id_idr_lock);
534 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
535 spin_unlock(&qdev->surf_id_idr_lock);
536 surf->hw_surf_alloc = false;
538 id = surf->surface_id;
539 surf->surface_id = 0;
541 release->surface_release_id = id;
542 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
543 cmd->type = QXL_SURFACE_CMD_DESTROY;
544 cmd->surface_id = id;
545 qxl_release_unmap(qdev, release, &cmd->release_info);
547 qxl_release_fence_buffer_objects(release);
548 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
553 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
555 struct qxl_rect rect;
558 /* if we are evicting, we need to make sure the surface is up
561 rect.right = surf->surf.width;
563 rect.bottom = surf->surf.height;
565 ret = qxl_io_update_area(qdev, surf, &rect);
566 if (ret == -ERESTARTSYS)
571 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
573 /* no need to update area if we are just freeing the surface normally */
575 qxl_update_surface(qdev, surf);
577 /* nuke the surface id at the hw */
578 qxl_hw_surface_dealloc(qdev, surf);
581 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
583 mutex_lock(&qdev->surf_evict_mutex);
584 qxl_surface_evict_locked(qdev, surf, do_update_area);
585 mutex_unlock(&qdev->surf_evict_mutex);
588 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
592 ret = qxl_bo_reserve(surf);
597 mutex_unlock(&qdev->surf_evict_mutex);
599 ret = ttm_bo_wait(&surf->tbo, true, !stall);
602 mutex_lock(&qdev->surf_evict_mutex);
604 qxl_bo_unreserve(surf);
608 qxl_surface_evict_locked(qdev, surf, true);
609 qxl_bo_unreserve(surf);
613 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
620 mutex_lock(&qdev->surf_evict_mutex);
623 spin_lock(&qdev->surf_id_idr_lock);
624 start = qdev->last_alloced_surf_id + 1;
625 spin_unlock(&qdev->surf_id_idr_lock);
627 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
629 int surfid = i % qdev->rom->n_surfaces;
631 /* this avoids the case where the objects is in the
632 idr but has been evicted half way - its makes
633 the idr lookup atomic with the eviction */
634 spin_lock(&qdev->surf_id_idr_lock);
635 objptr = idr_find(&qdev->surf_id_idr, surfid);
636 spin_unlock(&qdev->surf_id_idr_lock);
641 ret = qxl_reap_surf(qdev, objptr, stall);
644 if (num_reaped >= max_to_reap)
647 if (num_reaped == 0 && stall == false) {
652 mutex_unlock(&qdev->surf_evict_mutex);
654 usleep_range(500, 1000);
655 qxl_queue_garbage_collect(qdev, true);