1 /**************************************************************************
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
31 #define VMW_FENCE_WRAP (1 << 31)
33 struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
37 struct list_head fence_list;
38 struct work_struct work, ping_work;
41 u32 event_fence_action_size;
43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
52 struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
58 * struct vmw_event_fence_action - fence action that delivers a drm event.
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
72 struct vmw_event_fence_action {
73 struct vmw_fence_action action;
74 struct list_head fpriv_head;
76 struct drm_pending_event *event;
77 struct vmw_fence_obj *fence;
78 struct drm_device *dev;
84 static struct vmw_fence_manager *
85 fman_from_fence(struct vmw_fence_obj *fence)
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
91 * Note on fencing subsystem usage of irqs:
92 * Typically the vmw_fences_update function is called
94 * a) When a new fence seqno has been submitted by the fifo code.
95 * b) On-demand when we have waiters. Sleeping waiters will switch on the
96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
97 * irq is received. When the last fence waiter is gone, that IRQ is masked
100 * In situations where there are no waiters and we don't submit any new fences,
101 * fence objects may not be signaled. This is perfectly OK, since there are
102 * no consumers of the signaled data, but that is NOT ok when there are fence
103 * actions attached to a fence. The fencing subsystem then makes use of the
104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
105 * which has an action attached, and each time vmw_fences_update is called,
106 * the subsystem makes sure the fence goal seqno is updated.
108 * The fence goal seqno irq is on as long as there are unsignaled fence
109 * objects with actions attached to them.
112 static void vmw_fence_obj_destroy(struct fence *f)
114 struct vmw_fence_obj *fence =
115 container_of(f, struct vmw_fence_obj, base);
117 struct vmw_fence_manager *fman = fman_from_fence(fence);
118 unsigned long irq_flags;
120 spin_lock_irqsave(&fman->lock, irq_flags);
121 list_del_init(&fence->head);
122 --fman->num_fence_objects;
123 spin_unlock_irqrestore(&fman->lock, irq_flags);
124 fence->destroy(fence);
127 static const char *vmw_fence_get_driver_name(struct fence *f)
132 static const char *vmw_fence_get_timeline_name(struct fence *f)
137 static void vmw_fence_ping_func(struct work_struct *work)
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
145 static bool vmw_fence_enable_signaling(struct fence *f)
147 struct vmw_fence_obj *fence =
148 container_of(f, struct vmw_fence_obj, base);
150 struct vmw_fence_manager *fman = fman_from_fence(fence);
151 struct vmw_private *dev_priv = fman->dev_priv;
153 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
154 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
158 if (mutex_trylock(&dev_priv->hw_mutex)) {
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
162 schedule_work(&fman->ping_work);
167 struct vmwgfx_wait_cb {
168 struct fence_cb base;
169 struct task_struct *task;
173 vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
175 struct vmwgfx_wait_cb *wait =
176 container_of(cb, struct vmwgfx_wait_cb, base);
178 wake_up_process(wait->task);
181 static void __vmw_fences_update(struct vmw_fence_manager *fman);
183 static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
185 struct vmw_fence_obj *fence =
186 container_of(f, struct vmw_fence_obj, base);
188 struct vmw_fence_manager *fman = fman_from_fence(fence);
189 struct vmw_private *dev_priv = fman->dev_priv;
190 struct vmwgfx_wait_cb cb;
192 unsigned long irq_flags;
194 if (likely(vmw_fence_obj_signaled(fence)))
197 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
198 vmw_seqno_waiter_add(dev_priv);
200 spin_lock_irqsave(f->lock, irq_flags);
202 if (intr && signal_pending(current)) {
207 cb.base.func = vmwgfx_wait_cb;
209 list_add(&cb.base.node, &f->cb_list);
212 __vmw_fences_update(fman);
213 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
217 __set_current_state(TASK_INTERRUPTIBLE);
219 __set_current_state(TASK_UNINTERRUPTIBLE);
220 spin_unlock_irqrestore(f->lock, irq_flags);
222 ret = schedule_timeout(ret);
224 spin_lock_irqsave(f->lock, irq_flags);
225 if (ret > 0 && intr && signal_pending(current))
229 if (!list_empty(&cb.base.node))
230 list_del(&cb.base.node);
231 __set_current_state(TASK_RUNNING);
234 spin_unlock_irqrestore(f->lock, irq_flags);
236 vmw_seqno_waiter_remove(dev_priv);
241 static struct fence_ops vmw_fence_ops = {
242 .get_driver_name = vmw_fence_get_driver_name,
243 .get_timeline_name = vmw_fence_get_timeline_name,
244 .enable_signaling = vmw_fence_enable_signaling,
245 .wait = vmw_fence_wait,
246 .release = vmw_fence_obj_destroy,
251 * Execute signal actions on fences recently signaled.
252 * This is done from a workqueue so we don't have to execute
253 * signal actions from atomic context.
256 static void vmw_fence_work_func(struct work_struct *work)
258 struct vmw_fence_manager *fman =
259 container_of(work, struct vmw_fence_manager, work);
260 struct list_head list;
261 struct vmw_fence_action *action, *next_action;
265 INIT_LIST_HEAD(&list);
266 mutex_lock(&fman->goal_irq_mutex);
268 spin_lock_irq(&fman->lock);
269 list_splice_init(&fman->cleanup_list, &list);
270 seqno_valid = fman->seqno_valid;
271 spin_unlock_irq(&fman->lock);
273 if (!seqno_valid && fman->goal_irq_on) {
274 fman->goal_irq_on = false;
275 vmw_goal_waiter_remove(fman->dev_priv);
277 mutex_unlock(&fman->goal_irq_mutex);
279 if (list_empty(&list))
283 * At this point, only we should be able to manipulate the
284 * list heads of the actions we have on the private list.
285 * hence fman::lock not held.
288 list_for_each_entry_safe(action, next_action, &list, head) {
289 list_del_init(&action->head);
291 action->cleanup(action);
296 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
298 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
300 if (unlikely(fman == NULL))
303 fman->dev_priv = dev_priv;
304 spin_lock_init(&fman->lock);
305 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
309 fman->fifo_down = true;
310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
312 fman->event_fence_action_size =
313 ttm_round_pot(sizeof(struct vmw_event_fence_action));
314 mutex_init(&fman->goal_irq_mutex);
315 fman->ctx = fence_context_alloc(1);
320 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
322 unsigned long irq_flags;
325 (void) cancel_work_sync(&fman->work);
326 (void) cancel_work_sync(&fman->ping_work);
328 spin_lock_irqsave(&fman->lock, irq_flags);
329 lists_empty = list_empty(&fman->fence_list) &&
330 list_empty(&fman->cleanup_list);
331 spin_unlock_irqrestore(&fman->lock, irq_flags);
333 BUG_ON(!lists_empty);
337 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
338 struct vmw_fence_obj *fence, u32 seqno,
339 void (*destroy) (struct vmw_fence_obj *fence))
341 unsigned long irq_flags;
344 fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
346 INIT_LIST_HEAD(&fence->seq_passed_actions);
347 fence->destroy = destroy;
349 spin_lock_irqsave(&fman->lock, irq_flags);
350 if (unlikely(fman->fifo_down)) {
354 list_add_tail(&fence->head, &fman->fence_list);
355 ++fman->num_fence_objects;
358 spin_unlock_irqrestore(&fman->lock, irq_flags);
363 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
364 struct list_head *list)
366 struct vmw_fence_action *action, *next_action;
368 list_for_each_entry_safe(action, next_action, list, head) {
369 list_del_init(&action->head);
370 fman->pending_actions[action->type]--;
371 if (action->seq_passed != NULL)
372 action->seq_passed(action);
375 * Add the cleanup action to the cleanup list so that
376 * it will be performed by a worker task.
379 list_add_tail(&action->head, &fman->cleanup_list);
384 * vmw_fence_goal_new_locked - Figure out a new device fence goal
387 * @fman: Pointer to a fence manager.
388 * @passed_seqno: The seqno the device currently signals as passed.
390 * This function should be called with the fence manager lock held.
391 * It is typically called when we have a new passed_seqno, and
392 * we might need to update the fence goal. It checks to see whether
393 * the current fence goal has already passed, and, in that case,
394 * scans through all unsignaled fences to get the next fence object with an
395 * action attached, and sets the seqno of that fence as a new fence goal.
397 * returns true if the device goal seqno was updated. False otherwise.
399 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
403 __le32 __iomem *fifo_mem;
404 struct vmw_fence_obj *fence;
406 if (likely(!fman->seqno_valid))
409 fifo_mem = fman->dev_priv->mmio_virt;
410 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
411 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
414 fman->seqno_valid = false;
415 list_for_each_entry(fence, &fman->fence_list, head) {
416 if (!list_empty(&fence->seq_passed_actions)) {
417 fman->seqno_valid = true;
418 iowrite32(fence->base.seqno,
419 fifo_mem + SVGA_FIFO_FENCE_GOAL);
429 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
432 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
433 * considered as a device fence goal.
435 * This function should be called with the fence manager lock held.
436 * It is typically called when an action has been attached to a fence to
437 * check whether the seqno of that fence should be used for a fence
438 * goal interrupt. This is typically needed if the current fence goal is
439 * invalid, or has a higher seqno than that of the current fence object.
441 * returns true if the device goal seqno was updated. False otherwise.
443 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
445 struct vmw_fence_manager *fman = fman_from_fence(fence);
447 __le32 __iomem *fifo_mem;
449 if (fence_is_signaled_locked(&fence->base))
452 fifo_mem = fman->dev_priv->mmio_virt;
453 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
454 if (likely(fman->seqno_valid &&
455 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
458 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
459 fman->seqno_valid = true;
464 static void __vmw_fences_update(struct vmw_fence_manager *fman)
466 struct vmw_fence_obj *fence, *next_fence;
467 struct list_head action_list;
469 uint32_t seqno, new_seqno;
470 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
472 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
474 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
475 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
476 list_del_init(&fence->head);
477 fence_signal_locked(&fence->base);
478 INIT_LIST_HEAD(&action_list);
479 list_splice_init(&fence->seq_passed_actions,
481 vmw_fences_perform_actions(fman, &action_list);
487 * Rerun if the fence goal seqno was updated, and the
488 * hardware might have raced with that update, so that
489 * we missed a fence_goal irq.
492 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
493 if (unlikely(needs_rerun)) {
494 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
495 if (new_seqno != seqno) {
501 if (!list_empty(&fman->cleanup_list))
502 (void) schedule_work(&fman->work);
505 void vmw_fences_update(struct vmw_fence_manager *fman)
507 unsigned long irq_flags;
509 spin_lock_irqsave(&fman->lock, irq_flags);
510 __vmw_fences_update(fman);
511 spin_unlock_irqrestore(&fman->lock, irq_flags);
514 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
516 struct vmw_fence_manager *fman = fman_from_fence(fence);
518 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
521 vmw_fences_update(fman);
523 return fence_is_signaled(&fence->base);
526 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
527 bool interruptible, unsigned long timeout)
529 long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
539 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
541 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
543 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
546 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
548 fence_free(&fence->base);
551 int vmw_fence_create(struct vmw_fence_manager *fman,
553 struct vmw_fence_obj **p_fence)
555 struct vmw_fence_obj *fence;
558 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
559 if (unlikely(fence == NULL))
562 ret = vmw_fence_obj_init(fman, fence, seqno,
564 if (unlikely(ret != 0))
576 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
578 struct vmw_user_fence *ufence =
579 container_of(fence, struct vmw_user_fence, fence);
580 struct vmw_fence_manager *fman = fman_from_fence(fence);
582 ttm_base_object_kfree(ufence, base);
584 * Free kernel space accounting.
586 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
587 fman->user_fence_size);
590 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
592 struct ttm_base_object *base = *p_base;
593 struct vmw_user_fence *ufence =
594 container_of(base, struct vmw_user_fence, base);
595 struct vmw_fence_obj *fence = &ufence->fence;
598 vmw_fence_obj_unreference(&fence);
601 int vmw_user_fence_create(struct drm_file *file_priv,
602 struct vmw_fence_manager *fman,
604 struct vmw_fence_obj **p_fence,
607 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
608 struct vmw_user_fence *ufence;
609 struct vmw_fence_obj *tmp;
610 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
614 * Kernel memory space accounting, since this object may
615 * be created by a user-space request.
618 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
620 if (unlikely(ret != 0))
623 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
624 if (unlikely(ufence == NULL)) {
629 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
630 vmw_user_fence_destroy);
631 if (unlikely(ret != 0)) {
637 * The base object holds a reference which is freed in
638 * vmw_user_fence_base_release.
640 tmp = vmw_fence_obj_reference(&ufence->fence);
641 ret = ttm_base_object_init(tfile, &ufence->base, false,
643 &vmw_user_fence_base_release, NULL);
646 if (unlikely(ret != 0)) {
648 * Free the base object's reference
650 vmw_fence_obj_unreference(&tmp);
654 *p_fence = &ufence->fence;
655 *p_handle = ufence->base.hash.key;
659 tmp = &ufence->fence;
660 vmw_fence_obj_unreference(&tmp);
662 ttm_mem_global_free(mem_glob, fman->user_fence_size);
668 * vmw_fence_fifo_down - signal all unsignaled fence objects.
671 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
673 struct list_head action_list;
677 * The list may be altered while we traverse it, so always
678 * restart when we've released the fman->lock.
681 spin_lock_irq(&fman->lock);
682 fman->fifo_down = true;
683 while (!list_empty(&fman->fence_list)) {
684 struct vmw_fence_obj *fence =
685 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
687 fence_get(&fence->base);
688 spin_unlock_irq(&fman->lock);
690 ret = vmw_fence_obj_wait(fence, false, false,
691 VMW_FENCE_WAIT_TIMEOUT);
693 if (unlikely(ret != 0)) {
694 list_del_init(&fence->head);
695 fence_signal(&fence->base);
696 INIT_LIST_HEAD(&action_list);
697 list_splice_init(&fence->seq_passed_actions,
699 vmw_fences_perform_actions(fman, &action_list);
702 BUG_ON(!list_empty(&fence->head));
703 fence_put(&fence->base);
704 spin_lock_irq(&fman->lock);
706 spin_unlock_irq(&fman->lock);
709 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
711 unsigned long irq_flags;
713 spin_lock_irqsave(&fman->lock, irq_flags);
714 fman->fifo_down = false;
715 spin_unlock_irqrestore(&fman->lock, irq_flags);
719 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
720 struct drm_file *file_priv)
722 struct drm_vmw_fence_wait_arg *arg =
723 (struct drm_vmw_fence_wait_arg *)data;
724 unsigned long timeout;
725 struct ttm_base_object *base;
726 struct vmw_fence_obj *fence;
727 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
729 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
732 * 64-bit division not present on 32-bit systems, so do an
733 * approximation. (Divide by 1000000).
736 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
737 (wait_timeout >> 26);
739 if (!arg->cookie_valid) {
740 arg->cookie_valid = 1;
741 arg->kernel_cookie = jiffies + wait_timeout;
744 base = ttm_base_object_lookup(tfile, arg->handle);
745 if (unlikely(base == NULL)) {
746 printk(KERN_ERR "Wait invalid fence object handle "
748 (unsigned long)arg->handle);
752 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
756 ret = ((vmw_fence_obj_signaled(fence)) ?
761 timeout = (unsigned long)arg->kernel_cookie - timeout;
763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
766 ttm_base_object_unref(&base);
769 * Optionally unref the fence object.
772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
773 return ttm_ref_object_base_unref(tfile, arg->handle,
778 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
779 struct drm_file *file_priv)
781 struct drm_vmw_fence_signaled_arg *arg =
782 (struct drm_vmw_fence_signaled_arg *) data;
783 struct ttm_base_object *base;
784 struct vmw_fence_obj *fence;
785 struct vmw_fence_manager *fman;
786 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
787 struct vmw_private *dev_priv = vmw_priv(dev);
789 base = ttm_base_object_lookup(tfile, arg->handle);
790 if (unlikely(base == NULL)) {
791 printk(KERN_ERR "Fence signaled invalid fence object handle "
793 (unsigned long)arg->handle);
797 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
798 fman = fman_from_fence(fence);
800 arg->signaled = vmw_fence_obj_signaled(fence);
802 arg->signaled_flags = arg->flags;
803 spin_lock_irq(&fman->lock);
804 arg->passed_seqno = dev_priv->last_read_seqno;
805 spin_unlock_irq(&fman->lock);
807 ttm_base_object_unref(&base);
813 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
814 struct drm_file *file_priv)
816 struct drm_vmw_fence_arg *arg =
817 (struct drm_vmw_fence_arg *) data;
819 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
825 * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
827 * @fman: Pointer to a struct vmw_fence_manager
828 * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
829 * with pointers to a struct drm_file object about to be closed.
831 * This function removes all pending fence events with references to a
832 * specific struct drm_file object about to be closed. The caller is required
833 * to pass a list of all struct vmw_event_fence_action objects with such
834 * events attached. This function is typically called before the
835 * struct drm_file object's event management is taken down.
837 void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
838 struct list_head *event_list)
840 struct vmw_event_fence_action *eaction;
841 struct drm_pending_event *event;
842 unsigned long irq_flags;
845 spin_lock_irqsave(&fman->lock, irq_flags);
846 if (list_empty(event_list))
848 eaction = list_first_entry(event_list,
849 struct vmw_event_fence_action,
851 list_del_init(&eaction->fpriv_head);
852 event = eaction->event;
853 eaction->event = NULL;
854 spin_unlock_irqrestore(&fman->lock, irq_flags);
855 event->destroy(event);
858 spin_unlock_irqrestore(&fman->lock, irq_flags);
863 * vmw_event_fence_action_seq_passed
865 * @action: The struct vmw_fence_action embedded in a struct
866 * vmw_event_fence_action.
868 * This function is called when the seqno of the fence where @action is
869 * attached has passed. It queues the event on the submitter's event list.
870 * This function is always called from atomic context, and may be called
873 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
875 struct vmw_event_fence_action *eaction =
876 container_of(action, struct vmw_event_fence_action, action);
877 struct drm_device *dev = eaction->dev;
878 struct drm_pending_event *event = eaction->event;
879 struct drm_file *file_priv;
880 unsigned long irq_flags;
882 if (unlikely(event == NULL))
885 file_priv = event->file_priv;
886 spin_lock_irqsave(&dev->event_lock, irq_flags);
888 if (likely(eaction->tv_sec != NULL)) {
891 do_gettimeofday(&tv);
892 *eaction->tv_sec = tv.tv_sec;
893 *eaction->tv_usec = tv.tv_usec;
896 list_del_init(&eaction->fpriv_head);
897 list_add_tail(&eaction->event->link, &file_priv->event_list);
898 eaction->event = NULL;
899 wake_up_all(&file_priv->event_wait);
900 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
904 * vmw_event_fence_action_cleanup
906 * @action: The struct vmw_fence_action embedded in a struct
907 * vmw_event_fence_action.
909 * This function is the struct vmw_fence_action destructor. It's typically
910 * called from a workqueue.
912 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
914 struct vmw_event_fence_action *eaction =
915 container_of(action, struct vmw_event_fence_action, action);
916 struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
917 unsigned long irq_flags;
919 spin_lock_irqsave(&fman->lock, irq_flags);
920 list_del(&eaction->fpriv_head);
921 spin_unlock_irqrestore(&fman->lock, irq_flags);
923 vmw_fence_obj_unreference(&eaction->fence);
929 * vmw_fence_obj_add_action - Add an action to a fence object.
931 * @fence - The fence object.
932 * @action - The action to add.
934 * Note that the action callbacks may be executed before this function
937 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
938 struct vmw_fence_action *action)
940 struct vmw_fence_manager *fman = fman_from_fence(fence);
941 unsigned long irq_flags;
942 bool run_update = false;
944 mutex_lock(&fman->goal_irq_mutex);
945 spin_lock_irqsave(&fman->lock, irq_flags);
947 fman->pending_actions[action->type]++;
948 if (fence_is_signaled_locked(&fence->base)) {
949 struct list_head action_list;
951 INIT_LIST_HEAD(&action_list);
952 list_add_tail(&action->head, &action_list);
953 vmw_fences_perform_actions(fman, &action_list);
955 list_add_tail(&action->head, &fence->seq_passed_actions);
958 * This function may set fman::seqno_valid, so it must
959 * be run with the goal_irq_mutex held.
961 run_update = vmw_fence_goal_check_locked(fence);
964 spin_unlock_irqrestore(&fman->lock, irq_flags);
967 if (!fman->goal_irq_on) {
968 fman->goal_irq_on = true;
969 vmw_goal_waiter_add(fman->dev_priv);
971 vmw_fences_update(fman);
973 mutex_unlock(&fman->goal_irq_mutex);
978 * vmw_event_fence_action_create - Post an event for sending when a fence
979 * object seqno has passed.
981 * @file_priv: The file connection on which the event should be posted.
982 * @fence: The fence object on which to post the event.
983 * @event: Event to be posted. This event should've been alloced
984 * using k[mz]alloc, and should've been completely initialized.
985 * @interruptible: Interruptible waits if possible.
987 * As a side effect, the object pointed to by @event may have been
988 * freed when this function returns. If this function returns with
989 * an error code, the caller needs to free that object.
992 int vmw_event_fence_action_queue(struct drm_file *file_priv,
993 struct vmw_fence_obj *fence,
994 struct drm_pending_event *event,
999 struct vmw_event_fence_action *eaction;
1000 struct vmw_fence_manager *fman = fman_from_fence(fence);
1001 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1002 unsigned long irq_flags;
1004 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1005 if (unlikely(eaction == NULL))
1008 eaction->event = event;
1010 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1011 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1012 eaction->action.type = VMW_ACTION_EVENT;
1014 eaction->fence = vmw_fence_obj_reference(fence);
1015 eaction->dev = fman->dev_priv->dev;
1016 eaction->tv_sec = tv_sec;
1017 eaction->tv_usec = tv_usec;
1019 spin_lock_irqsave(&fman->lock, irq_flags);
1020 list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
1021 spin_unlock_irqrestore(&fman->lock, irq_flags);
1023 vmw_fence_obj_add_action(fence, &eaction->action);
1028 struct vmw_event_fence_pending {
1029 struct drm_pending_event base;
1030 struct drm_vmw_event_fence event;
1033 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1034 struct vmw_fence_obj *fence,
1039 struct vmw_event_fence_pending *event;
1040 struct vmw_fence_manager *fman = fman_from_fence(fence);
1041 struct drm_device *dev = fman->dev_priv->dev;
1042 unsigned long irq_flags;
1045 spin_lock_irqsave(&dev->event_lock, irq_flags);
1047 ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
1048 if (likely(ret == 0))
1049 file_priv->event_space -= sizeof(event->event);
1051 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1053 if (unlikely(ret != 0)) {
1054 DRM_ERROR("Failed to allocate event space for this file.\n");
1059 event = kzalloc(sizeof(*event), GFP_KERNEL);
1060 if (unlikely(event == NULL)) {
1061 DRM_ERROR("Failed to allocate an event.\n");
1066 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1067 event->event.base.length = sizeof(*event);
1068 event->event.user_data = user_data;
1070 event->base.event = &event->event.base;
1071 event->base.file_priv = file_priv;
1072 event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1075 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1076 ret = vmw_event_fence_action_queue(file_priv, fence,
1078 &event->event.tv_sec,
1079 &event->event.tv_usec,
1082 ret = vmw_event_fence_action_queue(file_priv, fence,
1093 event->base.destroy(&event->base);
1095 spin_lock_irqsave(&dev->event_lock, irq_flags);
1096 file_priv->event_space += sizeof(*event);
1097 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1102 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1105 struct vmw_private *dev_priv = vmw_priv(dev);
1106 struct drm_vmw_fence_event_arg *arg =
1107 (struct drm_vmw_fence_event_arg *) data;
1108 struct vmw_fence_obj *fence = NULL;
1109 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1110 struct drm_vmw_fence_rep __user *user_fence_rep =
1111 (struct drm_vmw_fence_rep __user *)(unsigned long)
1117 * Look up an existing fence object,
1118 * and if user-space wants a new reference,
1122 struct ttm_base_object *base =
1123 ttm_base_object_lookup_for_ref(dev_priv->tdev,
1126 if (unlikely(base == NULL)) {
1127 DRM_ERROR("Fence event invalid fence object handle "
1129 (unsigned long)arg->handle);
1132 fence = &(container_of(base, struct vmw_user_fence,
1134 (void) vmw_fence_obj_reference(fence);
1136 if (user_fence_rep != NULL) {
1139 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1140 TTM_REF_USAGE, &existed);
1141 if (unlikely(ret != 0)) {
1142 DRM_ERROR("Failed to reference a fence "
1144 goto out_no_ref_obj;
1146 handle = base->hash.key;
1148 ttm_base_object_unref(&base);
1152 * Create a new fence object.
1155 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1159 if (unlikely(ret != 0)) {
1160 DRM_ERROR("Fence event failed to create fence.\n");
1165 BUG_ON(fence == NULL);
1167 ret = vmw_event_fence_action_create(file_priv, fence,
1171 if (unlikely(ret != 0)) {
1172 if (ret != -ERESTARTSYS)
1173 DRM_ERROR("Failed to attach event to fence.\n");
1177 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1179 vmw_fence_obj_unreference(&fence);
1182 if (user_fence_rep != NULL)
1183 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1184 handle, TTM_REF_USAGE);
1186 vmw_fence_obj_unreference(&fence);