x86/vmware: Add a header file for hypercall definitions
[linux-2.6-block.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_bo_api.h>
29
30 #include "vmwgfx_drv.h"
31
32 /*
33  * Size of inline command buffers. Try to make sure that a page size is a
34  * multiple of the DMA pool allocation size.
35  */
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38         (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
39
40 /**
41  * struct vmw_cmdbuf_context - Command buffer context queues
42  *
43  * @submitted: List of command buffers that have been submitted to the
44  * manager but not yet submitted to hardware.
45  * @hw_submitted: List of command buffers submitted to hardware.
46  * @preempted: List of preempted command buffers.
47  * @num_hw_submitted: Number of buffers currently being processed by hardware
48  */
49 struct vmw_cmdbuf_context {
50         struct list_head submitted;
51         struct list_head hw_submitted;
52         struct list_head preempted;
53         unsigned num_hw_submitted;
54         bool block_submission;
55 };
56
57 /**
58  * struct vmw_cmdbuf_man: - Command buffer manager
59  *
60  * @cur_mutex: Mutex protecting the command buffer used for incremental small
61  * kernel command submissions, @cur.
62  * @space_mutex: Mutex to protect against starvation when we allocate
63  * main pool buffer space.
64  * @error_mutex: Mutex to serialize the work queue error handling.
65  * Note this is not needed if the same workqueue handler
66  * can't race with itself...
67  * @work: A struct work_struct implementeing command buffer error handling.
68  * Immutable.
69  * @dev_priv: Pointer to the device private struct. Immutable.
70  * @ctx: Array of command buffer context queues. The queues and the context
71  * data is protected by @lock.
72  * @error: List of command buffers that have caused device errors.
73  * Protected by @lock.
74  * @mm: Range manager for the command buffer space. Manager allocations and
75  * frees are protected by @lock.
76  * @cmd_space: Buffer object for the command buffer space, unless we were
77  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
78  * @map_obj: Mapping state for @cmd_space. Immutable.
79  * @map: Pointer to command buffer space. May be a mapped buffer object or
80  * a contigous coherent DMA memory allocation. Immutable.
81  * @cur: Command buffer for small kernel command submissions. Protected by
82  * the @cur_mutex.
83  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
84  * @default_size: Default size for the @cur command buffer. Immutable.
85  * @max_hw_submitted: Max number of in-flight command buffers the device can
86  * handle. Immutable.
87  * @lock: Spinlock protecting command submission queues.
88  * @header: Pool of DMA memory for device command buffer headers.
89  * Internal protection.
90  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
91  * space for inline data. Internal protection.
92  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
93  * space.
94  * @idle_queue: Wait queue for processes waiting for command buffer idle.
95  * @irq_on: Whether the process function has requested irq to be turned on.
96  * Protected by @lock.
97  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
98  * allocation. Immutable.
99  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
100  * Typically this is false only during bootstrap.
101  * @handle: DMA address handle for the command buffer space if @using_mob is
102  * false. Immutable.
103  * @size: The size of the command buffer space. Immutable.
104  * @num_contexts: Number of contexts actually enabled.
105  */
106 struct vmw_cmdbuf_man {
107         struct mutex cur_mutex;
108         struct mutex space_mutex;
109         struct mutex error_mutex;
110         struct work_struct work;
111         struct vmw_private *dev_priv;
112         struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
113         struct list_head error;
114         struct drm_mm mm;
115         struct ttm_buffer_object *cmd_space;
116         struct ttm_bo_kmap_obj map_obj;
117         u8 *map;
118         struct vmw_cmdbuf_header *cur;
119         size_t cur_pos;
120         size_t default_size;
121         unsigned max_hw_submitted;
122         spinlock_t lock;
123         struct dma_pool *headers;
124         struct dma_pool *dheaders;
125         wait_queue_head_t alloc_queue;
126         wait_queue_head_t idle_queue;
127         bool irq_on;
128         bool using_mob;
129         bool has_pool;
130         dma_addr_t handle;
131         size_t size;
132         u32 num_contexts;
133 };
134
135 /**
136  * struct vmw_cmdbuf_header - Command buffer metadata
137  *
138  * @man: The command buffer manager.
139  * @cb_header: Device command buffer header, allocated from a DMA pool.
140  * @cb_context: The device command buffer context.
141  * @list: List head for attaching to the manager lists.
142  * @node: The range manager node.
143  * @handle. The DMA address of @cb_header. Handed to the device on command
144  * buffer submission.
145  * @cmd: Pointer to the command buffer space of this buffer.
146  * @size: Size of the command buffer space of this buffer.
147  * @reserved: Reserved space of this buffer.
148  * @inline_space: Whether inline command buffer space is used.
149  */
150 struct vmw_cmdbuf_header {
151         struct vmw_cmdbuf_man *man;
152         SVGACBHeader *cb_header;
153         SVGACBContext cb_context;
154         struct list_head list;
155         struct drm_mm_node node;
156         dma_addr_t handle;
157         u8 *cmd;
158         size_t size;
159         size_t reserved;
160         bool inline_space;
161 };
162
163 /**
164  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
165  * command buffer space.
166  *
167  * @cb_header: Device command buffer header.
168  * @cmd: Inline command buffer space.
169  */
170 struct vmw_cmdbuf_dheader {
171         SVGACBHeader cb_header;
172         u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
173 };
174
175 /**
176  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
177  *
178  * @page_size: Size of requested command buffer space in pages.
179  * @node: Pointer to the range manager node.
180  * @done: True if this allocation has succeeded.
181  */
182 struct vmw_cmdbuf_alloc_info {
183         size_t page_size;
184         struct drm_mm_node *node;
185         bool done;
186 };
187
188 /* Loop over each context in the command buffer manager. */
189 #define for_each_cmdbuf_ctx(_man, _i, _ctx)                             \
190         for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
191              ++(_i), ++(_ctx))
192
193 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
194                                 bool enable);
195 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
196
197 /**
198  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
199  *
200  * @man: The range manager.
201  * @interruptible: Whether to wait interruptible when locking.
202  */
203 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
204 {
205         if (interruptible) {
206                 if (mutex_lock_interruptible(&man->cur_mutex))
207                         return -ERESTARTSYS;
208         } else {
209                 mutex_lock(&man->cur_mutex);
210         }
211
212         return 0;
213 }
214
215 /**
216  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
217  *
218  * @man: The range manager.
219  */
220 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
221 {
222         mutex_unlock(&man->cur_mutex);
223 }
224
225 /**
226  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
227  * been used for the device context with inline command buffers.
228  * Need not be called locked.
229  *
230  * @header: Pointer to the header to free.
231  */
232 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
233 {
234         struct vmw_cmdbuf_dheader *dheader;
235
236         if (WARN_ON_ONCE(!header->inline_space))
237                 return;
238
239         dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
240                                cb_header);
241         dma_pool_free(header->man->dheaders, dheader, header->handle);
242         kfree(header);
243 }
244
245 /**
246  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
247  * associated structures.
248  *
249  * header: Pointer to the header to free.
250  *
251  * For internal use. Must be called with man::lock held.
252  */
253 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
254 {
255         struct vmw_cmdbuf_man *man = header->man;
256
257         lockdep_assert_held_once(&man->lock);
258
259         if (header->inline_space) {
260                 vmw_cmdbuf_header_inline_free(header);
261                 return;
262         }
263
264         drm_mm_remove_node(&header->node);
265         wake_up_all(&man->alloc_queue);
266         if (header->cb_header)
267                 dma_pool_free(man->headers, header->cb_header,
268                               header->handle);
269         kfree(header);
270 }
271
272 /**
273  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
274  * associated structures.
275  *
276  * @header: Pointer to the header to free.
277  */
278 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
279 {
280         struct vmw_cmdbuf_man *man = header->man;
281
282         /* Avoid locking if inline_space */
283         if (header->inline_space) {
284                 vmw_cmdbuf_header_inline_free(header);
285                 return;
286         }
287         spin_lock(&man->lock);
288         __vmw_cmdbuf_header_free(header);
289         spin_unlock(&man->lock);
290 }
291
292
293 /**
294  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
295  *
296  * @header: The header of the buffer to submit.
297  */
298 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
299 {
300         struct vmw_cmdbuf_man *man = header->man;
301         u32 val;
302
303         val = upper_32_bits(header->handle);
304         vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
305
306         val = lower_32_bits(header->handle);
307         val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
308         vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
309
310         return header->cb_header->status;
311 }
312
313 /**
314  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
315  *
316  * @ctx: The command buffer context to initialize
317  */
318 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
319 {
320         INIT_LIST_HEAD(&ctx->hw_submitted);
321         INIT_LIST_HEAD(&ctx->submitted);
322         INIT_LIST_HEAD(&ctx->preempted);
323         ctx->num_hw_submitted = 0;
324 }
325
326 /**
327  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
328  * context.
329  *
330  * @man: The command buffer manager.
331  * @ctx: The command buffer context.
332  *
333  * Submits command buffers to hardware until there are no more command
334  * buffers to submit or the hardware can't handle more command buffers.
335  */
336 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
337                                   struct vmw_cmdbuf_context *ctx)
338 {
339         while (ctx->num_hw_submitted < man->max_hw_submitted &&
340                !list_empty(&ctx->submitted) &&
341                !ctx->block_submission) {
342                 struct vmw_cmdbuf_header *entry;
343                 SVGACBStatus status;
344
345                 entry = list_first_entry(&ctx->submitted,
346                                          struct vmw_cmdbuf_header,
347                                          list);
348
349                 status = vmw_cmdbuf_header_submit(entry);
350
351                 /* This should never happen */
352                 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
353                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
354                         break;
355                 }
356
357                 list_del(&entry->list);
358                 list_add_tail(&entry->list, &ctx->hw_submitted);
359                 ctx->num_hw_submitted++;
360         }
361
362 }
363
364 /**
365  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
366  *
367  * @man: The command buffer manager.
368  * @ctx: The command buffer context.
369  *
370  * Submit command buffers to hardware if possible, and process finished
371  * buffers. Typically freeing them, but on preemption or error take
372  * appropriate action. Wake up waiters if appropriate.
373  */
374 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
375                                    struct vmw_cmdbuf_context *ctx,
376                                    int *notempty)
377 {
378         struct vmw_cmdbuf_header *entry, *next;
379
380         vmw_cmdbuf_ctx_submit(man, ctx);
381
382         list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
383                 SVGACBStatus status = entry->cb_header->status;
384
385                 if (status == SVGA_CB_STATUS_NONE)
386                         break;
387
388                 list_del(&entry->list);
389                 wake_up_all(&man->idle_queue);
390                 ctx->num_hw_submitted--;
391                 switch (status) {
392                 case SVGA_CB_STATUS_COMPLETED:
393                         __vmw_cmdbuf_header_free(entry);
394                         break;
395                 case SVGA_CB_STATUS_COMMAND_ERROR:
396                         WARN_ONCE(true, "Command buffer error.\n");
397                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
398                         list_add_tail(&entry->list, &man->error);
399                         schedule_work(&man->work);
400                         break;
401                 case SVGA_CB_STATUS_PREEMPTED:
402                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
403                         list_add_tail(&entry->list, &ctx->preempted);
404                         break;
405                 case SVGA_CB_STATUS_CB_HEADER_ERROR:
406                         WARN_ONCE(true, "Command buffer header error.\n");
407                         __vmw_cmdbuf_header_free(entry);
408                         break;
409                 default:
410                         WARN_ONCE(true, "Undefined command buffer status.\n");
411                         __vmw_cmdbuf_header_free(entry);
412                         break;
413                 }
414         }
415
416         vmw_cmdbuf_ctx_submit(man, ctx);
417         if (!list_empty(&ctx->submitted))
418                 (*notempty)++;
419 }
420
421 /**
422  * vmw_cmdbuf_man_process - Process all command buffer contexts and
423  * switch on and off irqs as appropriate.
424  *
425  * @man: The command buffer manager.
426  *
427  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
428  * command buffers left that are not submitted to hardware, Make sure
429  * IRQ handling is turned on. Otherwise, make sure it's turned off.
430  */
431 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
432 {
433         int notempty;
434         struct vmw_cmdbuf_context *ctx;
435         int i;
436
437 retry:
438         notempty = 0;
439         for_each_cmdbuf_ctx(man, i, ctx)
440                 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
441
442         if (man->irq_on && !notempty) {
443                 vmw_generic_waiter_remove(man->dev_priv,
444                                           SVGA_IRQFLAG_COMMAND_BUFFER,
445                                           &man->dev_priv->cmdbuf_waiters);
446                 man->irq_on = false;
447         } else if (!man->irq_on && notempty) {
448                 vmw_generic_waiter_add(man->dev_priv,
449                                        SVGA_IRQFLAG_COMMAND_BUFFER,
450                                        &man->dev_priv->cmdbuf_waiters);
451                 man->irq_on = true;
452
453                 /* Rerun in case we just missed an irq. */
454                 goto retry;
455         }
456 }
457
458 /**
459  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
460  * command buffer context
461  *
462  * @man: The command buffer manager.
463  * @header: The header of the buffer to submit.
464  * @cb_context: The command buffer context to use.
465  *
466  * This function adds @header to the "submitted" queue of the command
467  * buffer context identified by @cb_context. It then calls the command buffer
468  * manager processing to potentially submit the buffer to hardware.
469  * @man->lock needs to be held when calling this function.
470  */
471 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
472                                struct vmw_cmdbuf_header *header,
473                                SVGACBContext cb_context)
474 {
475         if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
476                 header->cb_header->dxContext = 0;
477         header->cb_context = cb_context;
478         list_add_tail(&header->list, &man->ctx[cb_context].submitted);
479
480         vmw_cmdbuf_man_process(man);
481 }
482
483 /**
484  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
485  * handler implemented as a threaded irq task.
486  *
487  * @man: Pointer to the command buffer manager.
488  *
489  * The bottom half of the interrupt handler simply calls into the
490  * command buffer processor to free finished buffers and submit any
491  * queued buffers to hardware.
492  */
493 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
494 {
495         spin_lock(&man->lock);
496         vmw_cmdbuf_man_process(man);
497         spin_unlock(&man->lock);
498 }
499
500 /**
501  * vmw_cmdbuf_work_func - The deferred work function that handles
502  * command buffer errors.
503  *
504  * @work: The work func closure argument.
505  *
506  * Restarting the command buffer context after an error requires process
507  * context, so it is deferred to this work function.
508  */
509 static void vmw_cmdbuf_work_func(struct work_struct *work)
510 {
511         struct vmw_cmdbuf_man *man =
512                 container_of(work, struct vmw_cmdbuf_man, work);
513         struct vmw_cmdbuf_header *entry, *next;
514         uint32_t dummy;
515         bool send_fence = false;
516         struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
517         int i;
518         struct vmw_cmdbuf_context *ctx;
519         bool global_block = false;
520
521         for_each_cmdbuf_ctx(man, i, ctx)
522                 INIT_LIST_HEAD(&restart_head[i]);
523
524         mutex_lock(&man->error_mutex);
525         spin_lock(&man->lock);
526         list_for_each_entry_safe(entry, next, &man->error, list) {
527                 SVGACBHeader *cb_hdr = entry->cb_header;
528                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
529                         (entry->cmd + cb_hdr->errorOffset);
530                 u32 error_cmd_size, new_start_offset;
531                 const char *cmd_name;
532
533                 list_del_init(&entry->list);
534                 global_block = true;
535
536                 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
537                         VMW_DEBUG_USER("Unknown command causing device error.\n");
538                         VMW_DEBUG_USER("Command buffer offset is %lu\n",
539                                        (unsigned long) cb_hdr->errorOffset);
540                         __vmw_cmdbuf_header_free(entry);
541                         send_fence = true;
542                         continue;
543                 }
544
545                 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
546                                cmd_name);
547                 VMW_DEBUG_USER("Command buffer offset is %lu\n",
548                                (unsigned long) cb_hdr->errorOffset);
549                 VMW_DEBUG_USER("Command size is %lu\n",
550                                (unsigned long) error_cmd_size);
551
552                 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
553
554                 if (new_start_offset >= cb_hdr->length) {
555                         __vmw_cmdbuf_header_free(entry);
556                         send_fence = true;
557                         continue;
558                 }
559
560                 if (man->using_mob)
561                         cb_hdr->ptr.mob.mobOffset += new_start_offset;
562                 else
563                         cb_hdr->ptr.pa += (u64) new_start_offset;
564
565                 entry->cmd += new_start_offset;
566                 cb_hdr->length -= new_start_offset;
567                 cb_hdr->errorOffset = 0;
568                 cb_hdr->offset = 0;
569
570                 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
571         }
572
573         for_each_cmdbuf_ctx(man, i, ctx)
574                 man->ctx[i].block_submission = true;
575
576         spin_unlock(&man->lock);
577
578         /* Preempt all contexts */
579         if (global_block && vmw_cmdbuf_preempt(man, 0))
580                 DRM_ERROR("Failed preempting command buffer contexts\n");
581
582         spin_lock(&man->lock);
583         for_each_cmdbuf_ctx(man, i, ctx) {
584                 /* Move preempted command buffers to the preempted queue. */
585                 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
586
587                 /*
588                  * Add the preempted queue after the command buffer
589                  * that caused an error.
590                  */
591                 list_splice_init(&ctx->preempted, restart_head[i].prev);
592
593                 /*
594                  * Finally add all command buffers first in the submitted
595                  * queue, to rerun them.
596                  */
597
598                 ctx->block_submission = false;
599                 list_splice_init(&restart_head[i], &ctx->submitted);
600         }
601
602         vmw_cmdbuf_man_process(man);
603         spin_unlock(&man->lock);
604
605         if (global_block && vmw_cmdbuf_startstop(man, 0, true))
606                 DRM_ERROR("Failed restarting command buffer contexts\n");
607
608         /* Send a new fence in case one was removed */
609         if (send_fence) {
610                 vmw_fifo_send_fence(man->dev_priv, &dummy);
611                 wake_up_all(&man->idle_queue);
612         }
613
614         mutex_unlock(&man->error_mutex);
615 }
616
617 /**
618  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
619  *
620  * @man: The command buffer manager.
621  * @check_preempted: Check also the preempted queue for pending command buffers.
622  *
623  */
624 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
625                                 bool check_preempted)
626 {
627         struct vmw_cmdbuf_context *ctx;
628         bool idle = false;
629         int i;
630
631         spin_lock(&man->lock);
632         vmw_cmdbuf_man_process(man);
633         for_each_cmdbuf_ctx(man, i, ctx) {
634                 if (!list_empty(&ctx->submitted) ||
635                     !list_empty(&ctx->hw_submitted) ||
636                     (check_preempted && !list_empty(&ctx->preempted)))
637                         goto out_unlock;
638         }
639
640         idle = list_empty(&man->error);
641
642 out_unlock:
643         spin_unlock(&man->lock);
644
645         return idle;
646 }
647
648 /**
649  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
650  * command submissions
651  *
652  * @man: The command buffer manager.
653  *
654  * Flushes the current command buffer without allocating a new one. A new one
655  * is automatically allocated when needed. Call with @man->cur_mutex held.
656  */
657 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
658 {
659         struct vmw_cmdbuf_header *cur = man->cur;
660
661         lockdep_assert_held_once(&man->cur_mutex);
662
663         if (!cur)
664                 return;
665
666         spin_lock(&man->lock);
667         if (man->cur_pos == 0) {
668                 __vmw_cmdbuf_header_free(cur);
669                 goto out_unlock;
670         }
671
672         man->cur->cb_header->length = man->cur_pos;
673         vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
674 out_unlock:
675         spin_unlock(&man->lock);
676         man->cur = NULL;
677         man->cur_pos = 0;
678 }
679
680 /**
681  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
682  * command submissions
683  *
684  * @man: The command buffer manager.
685  * @interruptible: Whether to sleep interruptible when sleeping.
686  *
687  * Flushes the current command buffer without allocating a new one. A new one
688  * is automatically allocated when needed.
689  */
690 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
691                          bool interruptible)
692 {
693         int ret = vmw_cmdbuf_cur_lock(man, interruptible);
694
695         if (ret)
696                 return ret;
697
698         __vmw_cmdbuf_cur_flush(man);
699         vmw_cmdbuf_cur_unlock(man);
700
701         return 0;
702 }
703
704 /**
705  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
706  *
707  * @man: The command buffer manager.
708  * @interruptible: Sleep interruptible while waiting.
709  * @timeout: Time out after this many ticks.
710  *
711  * Wait until the command buffer manager has processed all command buffers,
712  * or until a timeout occurs. If a timeout occurs, the function will return
713  * -EBUSY.
714  */
715 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
716                     unsigned long timeout)
717 {
718         int ret;
719
720         ret = vmw_cmdbuf_cur_flush(man, interruptible);
721         vmw_generic_waiter_add(man->dev_priv,
722                                SVGA_IRQFLAG_COMMAND_BUFFER,
723                                &man->dev_priv->cmdbuf_waiters);
724
725         if (interruptible) {
726                 ret = wait_event_interruptible_timeout
727                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
728                          timeout);
729         } else {
730                 ret = wait_event_timeout
731                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
732                          timeout);
733         }
734         vmw_generic_waiter_remove(man->dev_priv,
735                                   SVGA_IRQFLAG_COMMAND_BUFFER,
736                                   &man->dev_priv->cmdbuf_waiters);
737         if (ret == 0) {
738                 if (!vmw_cmdbuf_man_idle(man, true))
739                         ret = -EBUSY;
740                 else
741                         ret = 0;
742         }
743         if (ret > 0)
744                 ret = 0;
745
746         return ret;
747 }
748
749 /**
750  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
751  *
752  * @man: The command buffer manager.
753  * @info: Allocation info. Will hold the size on entry and allocated mm node
754  * on successful return.
755  *
756  * Try to allocate buffer space from the main pool. Returns true if succeeded.
757  * If a fatal error was hit, the error code is returned in @info->ret.
758  */
759 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
760                                  struct vmw_cmdbuf_alloc_info *info)
761 {
762         int ret;
763
764         if (info->done)
765                 return true;
766
767         memset(info->node, 0, sizeof(*info->node));
768         spin_lock(&man->lock);
769         ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
770         if (ret) {
771                 vmw_cmdbuf_man_process(man);
772                 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773         }
774
775         spin_unlock(&man->lock);
776         info->done = !ret;
777
778         return info->done;
779 }
780
781 /**
782  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
783  *
784  * @man: The command buffer manager.
785  * @node: Pointer to pre-allocated range-manager node.
786  * @size: The size of the allocation.
787  * @interruptible: Whether to sleep interruptible while waiting for space.
788  *
789  * This function allocates buffer space from the main pool, and if there is
790  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
791  * become available.
792  */
793 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
794                                   struct drm_mm_node *node,
795                                   size_t size,
796                                   bool interruptible)
797 {
798         struct vmw_cmdbuf_alloc_info info;
799
800         info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
801         info.node = node;
802         info.done = false;
803
804         /*
805          * To prevent starvation of large requests, only one allocating call
806          * at a time waiting for space.
807          */
808         if (interruptible) {
809                 if (mutex_lock_interruptible(&man->space_mutex))
810                         return -ERESTARTSYS;
811         } else {
812                 mutex_lock(&man->space_mutex);
813         }
814
815         /* Try to allocate space without waiting. */
816         if (vmw_cmdbuf_try_alloc(man, &info))
817                 goto out_unlock;
818
819         vmw_generic_waiter_add(man->dev_priv,
820                                SVGA_IRQFLAG_COMMAND_BUFFER,
821                                &man->dev_priv->cmdbuf_waiters);
822
823         if (interruptible) {
824                 int ret;
825
826                 ret = wait_event_interruptible
827                         (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
828                 if (ret) {
829                         vmw_generic_waiter_remove
830                                 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
831                                  &man->dev_priv->cmdbuf_waiters);
832                         mutex_unlock(&man->space_mutex);
833                         return ret;
834                 }
835         } else {
836                 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
837         }
838         vmw_generic_waiter_remove(man->dev_priv,
839                                   SVGA_IRQFLAG_COMMAND_BUFFER,
840                                   &man->dev_priv->cmdbuf_waiters);
841
842 out_unlock:
843         mutex_unlock(&man->space_mutex);
844
845         return 0;
846 }
847
848 /**
849  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
850  * space from the main pool.
851  *
852  * @man: The command buffer manager.
853  * @header: Pointer to the header to set up.
854  * @size: The requested size of the buffer space.
855  * @interruptible: Whether to sleep interruptible while waiting for space.
856  */
857 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
858                                  struct vmw_cmdbuf_header *header,
859                                  size_t size,
860                                  bool interruptible)
861 {
862         SVGACBHeader *cb_hdr;
863         size_t offset;
864         int ret;
865
866         if (!man->has_pool)
867                 return -ENOMEM;
868
869         ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
870
871         if (ret)
872                 return ret;
873
874         header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
875                                             &header->handle);
876         if (!header->cb_header) {
877                 ret = -ENOMEM;
878                 goto out_no_cb_header;
879         }
880
881         header->size = header->node.size << PAGE_SHIFT;
882         cb_hdr = header->cb_header;
883         offset = header->node.start << PAGE_SHIFT;
884         header->cmd = man->map + offset;
885         if (man->using_mob) {
886                 cb_hdr->flags = SVGA_CB_FLAG_MOB;
887                 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
888                 cb_hdr->ptr.mob.mobOffset = offset;
889         } else {
890                 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
891         }
892
893         return 0;
894
895 out_no_cb_header:
896         spin_lock(&man->lock);
897         drm_mm_remove_node(&header->node);
898         spin_unlock(&man->lock);
899
900         return ret;
901 }
902
903 /**
904  * vmw_cmdbuf_space_inline - Set up a command buffer header with
905  * inline command buffer space.
906  *
907  * @man: The command buffer manager.
908  * @header: Pointer to the header to set up.
909  * @size: The requested size of the buffer space.
910  */
911 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
912                                    struct vmw_cmdbuf_header *header,
913                                    int size)
914 {
915         struct vmw_cmdbuf_dheader *dheader;
916         SVGACBHeader *cb_hdr;
917
918         if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
919                 return -ENOMEM;
920
921         dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
922                                   &header->handle);
923         if (!dheader)
924                 return -ENOMEM;
925
926         header->inline_space = true;
927         header->size = VMW_CMDBUF_INLINE_SIZE;
928         cb_hdr = &dheader->cb_header;
929         header->cb_header = cb_hdr;
930         header->cmd = dheader->cmd;
931         cb_hdr->status = SVGA_CB_STATUS_NONE;
932         cb_hdr->flags = SVGA_CB_FLAG_NONE;
933         cb_hdr->ptr.pa = (u64)header->handle +
934                 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
935
936         return 0;
937 }
938
939 /**
940  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
941  * command buffer space.
942  *
943  * @man: The command buffer manager.
944  * @size: The requested size of the buffer space.
945  * @interruptible: Whether to sleep interruptible while waiting for space.
946  * @p_header: points to a header pointer to populate on successful return.
947  *
948  * Returns a pointer to command buffer space if successful. Otherwise
949  * returns an error pointer. The header pointer returned in @p_header should
950  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
951  */
952 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
953                        size_t size, bool interruptible,
954                        struct vmw_cmdbuf_header **p_header)
955 {
956         struct vmw_cmdbuf_header *header;
957         int ret = 0;
958
959         *p_header = NULL;
960
961         header = kzalloc(sizeof(*header), GFP_KERNEL);
962         if (!header)
963                 return ERR_PTR(-ENOMEM);
964
965         if (size <= VMW_CMDBUF_INLINE_SIZE)
966                 ret = vmw_cmdbuf_space_inline(man, header, size);
967         else
968                 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
969
970         if (ret) {
971                 kfree(header);
972                 return ERR_PTR(ret);
973         }
974
975         header->man = man;
976         INIT_LIST_HEAD(&header->list);
977         header->cb_header->status = SVGA_CB_STATUS_NONE;
978         *p_header = header;
979
980         return header->cmd;
981 }
982
983 /**
984  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
985  * command buffer.
986  *
987  * @man: The command buffer manager.
988  * @size: The requested size of the commands.
989  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
990  * @interruptible: Whether to sleep interruptible while waiting for space.
991  *
992  * Returns a pointer to command buffer space if successful. Otherwise
993  * returns an error pointer.
994  */
995 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
996                                     size_t size,
997                                     int ctx_id,
998                                     bool interruptible)
999 {
1000         struct vmw_cmdbuf_header *cur;
1001         void *ret;
1002
1003         if (vmw_cmdbuf_cur_lock(man, interruptible))
1004                 return ERR_PTR(-ERESTARTSYS);
1005
1006         cur = man->cur;
1007         if (cur && (size + man->cur_pos > cur->size ||
1008                     ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1009                      ctx_id != cur->cb_header->dxContext)))
1010                 __vmw_cmdbuf_cur_flush(man);
1011
1012         if (!man->cur) {
1013                 ret = vmw_cmdbuf_alloc(man,
1014                                        max_t(size_t, size, man->default_size),
1015                                        interruptible, &man->cur);
1016                 if (IS_ERR(ret)) {
1017                         vmw_cmdbuf_cur_unlock(man);
1018                         return ret;
1019                 }
1020
1021                 cur = man->cur;
1022         }
1023
1024         if (ctx_id != SVGA3D_INVALID_ID) {
1025                 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1026                 cur->cb_header->dxContext = ctx_id;
1027         }
1028
1029         cur->reserved = size;
1030
1031         return (void *) (man->cur->cmd + man->cur_pos);
1032 }
1033
1034 /**
1035  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1036  *
1037  * @man: The command buffer manager.
1038  * @size: The size of the commands actually written.
1039  * @flush: Whether to flush the command buffer immediately.
1040  */
1041 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1042                                   size_t size, bool flush)
1043 {
1044         struct vmw_cmdbuf_header *cur = man->cur;
1045
1046         lockdep_assert_held_once(&man->cur_mutex);
1047
1048         WARN_ON(size > cur->reserved);
1049         man->cur_pos += size;
1050         if (!size)
1051                 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1052         if (flush)
1053                 __vmw_cmdbuf_cur_flush(man);
1054         vmw_cmdbuf_cur_unlock(man);
1055 }
1056
1057 /**
1058  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1059  *
1060  * @man: The command buffer manager.
1061  * @size: The requested size of the commands.
1062  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1063  * @interruptible: Whether to sleep interruptible while waiting for space.
1064  * @header: Header of the command buffer. NULL if the current command buffer
1065  * should be used.
1066  *
1067  * Returns a pointer to command buffer space if successful. Otherwise
1068  * returns an error pointer.
1069  */
1070 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1071                          int ctx_id, bool interruptible,
1072                          struct vmw_cmdbuf_header *header)
1073 {
1074         if (!header)
1075                 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1076
1077         if (size > header->size)
1078                 return ERR_PTR(-EINVAL);
1079
1080         if (ctx_id != SVGA3D_INVALID_ID) {
1081                 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1082                 header->cb_header->dxContext = ctx_id;
1083         }
1084
1085         header->reserved = size;
1086         return header->cmd;
1087 }
1088
1089 /**
1090  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1091  *
1092  * @man: The command buffer manager.
1093  * @size: The size of the commands actually written.
1094  * @header: Header of the command buffer. NULL if the current command buffer
1095  * should be used.
1096  * @flush: Whether to flush the command buffer immediately.
1097  */
1098 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1099                        struct vmw_cmdbuf_header *header, bool flush)
1100 {
1101         if (!header) {
1102                 vmw_cmdbuf_commit_cur(man, size, flush);
1103                 return;
1104         }
1105
1106         (void) vmw_cmdbuf_cur_lock(man, false);
1107         __vmw_cmdbuf_cur_flush(man);
1108         WARN_ON(size > header->reserved);
1109         man->cur = header;
1110         man->cur_pos = size;
1111         if (!size)
1112                 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1113         if (flush)
1114                 __vmw_cmdbuf_cur_flush(man);
1115         vmw_cmdbuf_cur_unlock(man);
1116 }
1117
1118
1119 /**
1120  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1121  *
1122  * @man: The command buffer manager.
1123  * @command: Pointer to the command to send.
1124  * @size: Size of the command.
1125  *
1126  * Synchronously sends a device context command.
1127  */
1128 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1129                                           const void *command,
1130                                           size_t size)
1131 {
1132         struct vmw_cmdbuf_header *header;
1133         int status;
1134         void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1135
1136         if (IS_ERR(cmd))
1137                 return PTR_ERR(cmd);
1138
1139         memcpy(cmd, command, size);
1140         header->cb_header->length = size;
1141         header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1142         spin_lock(&man->lock);
1143         status = vmw_cmdbuf_header_submit(header);
1144         spin_unlock(&man->lock);
1145         vmw_cmdbuf_header_free(header);
1146
1147         if (status != SVGA_CB_STATUS_COMPLETED) {
1148                 DRM_ERROR("Device context command failed with status %d\n",
1149                           status);
1150                 return -EINVAL;
1151         }
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * vmw_cmdbuf_preempt - Send a preempt command through the device
1158  * context.
1159  *
1160  * @man: The command buffer manager.
1161  *
1162  * Synchronously sends a preempt command.
1163  */
1164 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1165 {
1166         struct {
1167                 uint32 id;
1168                 SVGADCCmdPreempt body;
1169         } __packed cmd;
1170
1171         cmd.id = SVGA_DC_CMD_PREEMPT;
1172         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1173         cmd.body.ignoreIDZero = 0;
1174
1175         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1176 }
1177
1178
1179 /**
1180  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1181  * context.
1182  *
1183  * @man: The command buffer manager.
1184  * @enable: Whether to enable or disable the context.
1185  *
1186  * Synchronously sends a device start / stop context command.
1187  */
1188 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1189                                 bool enable)
1190 {
1191         struct {
1192                 uint32 id;
1193                 SVGADCCmdStartStop body;
1194         } __packed cmd;
1195
1196         cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1197         cmd.body.enable = (enable) ? 1 : 0;
1198         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1199
1200         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1201 }
1202
1203 /**
1204  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1205  *
1206  * @man: The command buffer manager.
1207  * @size: The size of the main space pool.
1208  * @default_size: The default size of the command buffer for small kernel
1209  * submissions.
1210  *
1211  * Set the size and allocate the main command buffer space pool,
1212  * as well as the default size of the command buffer for
1213  * small kernel submissions. If successful, this enables large command
1214  * submissions. Note that this function requires that rudimentary command
1215  * submission is already available and that the MOB memory manager is alive.
1216  * Returns 0 on success. Negative error code on failure.
1217  */
1218 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1219                              size_t size, size_t default_size)
1220 {
1221         struct vmw_private *dev_priv = man->dev_priv;
1222         bool dummy;
1223         int ret;
1224
1225         if (man->has_pool)
1226                 return -EINVAL;
1227
1228         /* First, try to allocate a huge chunk of DMA memory */
1229         size = PAGE_ALIGN(size);
1230         man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1231                                       &man->handle, GFP_KERNEL);
1232         if (man->map) {
1233                 man->using_mob = false;
1234         } else {
1235                 /*
1236                  * DMA memory failed. If we can have command buffers in a
1237                  * MOB, try to use that instead. Note that this will
1238                  * actually call into the already enabled manager, when
1239                  * binding the MOB.
1240                  */
1241                 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1242                         return -ENOMEM;
1243
1244                 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1245                                     &vmw_mob_ne_placement, 0, false,
1246                                     &man->cmd_space);
1247                 if (ret)
1248                         return ret;
1249
1250                 man->using_mob = true;
1251                 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1252                                   &man->map_obj);
1253                 if (ret)
1254                         goto out_no_map;
1255
1256                 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1257         }
1258
1259         man->size = size;
1260         drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1261
1262         man->has_pool = true;
1263
1264         /*
1265          * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1266          * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1267          * needs to wait for space and we block on further command
1268          * submissions to be able to free up space.
1269          */
1270         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1271         DRM_INFO("Using command buffers with %s pool.\n",
1272                  (man->using_mob) ? "MOB" : "DMA");
1273
1274         return 0;
1275
1276 out_no_map:
1277         if (man->using_mob) {
1278                 ttm_bo_put(man->cmd_space);
1279                 man->cmd_space = NULL;
1280         }
1281
1282         return ret;
1283 }
1284
1285 /**
1286  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1287  * inline command buffer submissions only.
1288  *
1289  * @dev_priv: Pointer to device private structure.
1290  *
1291  * Returns a pointer to a cummand buffer manager to success or error pointer
1292  * on failure. The command buffer manager will be enabled for submissions of
1293  * size VMW_CMDBUF_INLINE_SIZE only.
1294  */
1295 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1296 {
1297         struct vmw_cmdbuf_man *man;
1298         struct vmw_cmdbuf_context *ctx;
1299         unsigned int i;
1300         int ret;
1301
1302         if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1303                 return ERR_PTR(-ENOSYS);
1304
1305         man = kzalloc(sizeof(*man), GFP_KERNEL);
1306         if (!man)
1307                 return ERR_PTR(-ENOMEM);
1308
1309         man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1310                 2 : 1;
1311         man->headers = dma_pool_create("vmwgfx cmdbuf",
1312                                        &dev_priv->dev->pdev->dev,
1313                                        sizeof(SVGACBHeader),
1314                                        64, PAGE_SIZE);
1315         if (!man->headers) {
1316                 ret = -ENOMEM;
1317                 goto out_no_pool;
1318         }
1319
1320         man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1321                                         &dev_priv->dev->pdev->dev,
1322                                         sizeof(struct vmw_cmdbuf_dheader),
1323                                         64, PAGE_SIZE);
1324         if (!man->dheaders) {
1325                 ret = -ENOMEM;
1326                 goto out_no_dpool;
1327         }
1328
1329         for_each_cmdbuf_ctx(man, i, ctx)
1330                 vmw_cmdbuf_ctx_init(ctx);
1331
1332         INIT_LIST_HEAD(&man->error);
1333         spin_lock_init(&man->lock);
1334         mutex_init(&man->cur_mutex);
1335         mutex_init(&man->space_mutex);
1336         mutex_init(&man->error_mutex);
1337         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1338         init_waitqueue_head(&man->alloc_queue);
1339         init_waitqueue_head(&man->idle_queue);
1340         man->dev_priv = dev_priv;
1341         man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1342         INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1343         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1344                                &dev_priv->error_waiters);
1345         ret = vmw_cmdbuf_startstop(man, 0, true);
1346         if (ret) {
1347                 DRM_ERROR("Failed starting command buffer contexts\n");
1348                 vmw_cmdbuf_man_destroy(man);
1349                 return ERR_PTR(ret);
1350         }
1351
1352         return man;
1353
1354 out_no_dpool:
1355         dma_pool_destroy(man->headers);
1356 out_no_pool:
1357         kfree(man);
1358
1359         return ERR_PTR(ret);
1360 }
1361
1362 /**
1363  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1364  *
1365  * @man: Pointer to a command buffer manager.
1366  *
1367  * This function removes the main buffer space pool, and should be called
1368  * before MOB memory management is removed. When this function has been called,
1369  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1370  * less are allowed, and the default size of the command buffer for small kernel
1371  * submissions is also set to this size.
1372  */
1373 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1374 {
1375         if (!man->has_pool)
1376                 return;
1377
1378         man->has_pool = false;
1379         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1380         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1381         if (man->using_mob) {
1382                 (void) ttm_bo_kunmap(&man->map_obj);
1383                 ttm_bo_put(man->cmd_space);
1384                 man->cmd_space = NULL;
1385         } else {
1386                 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1387                                   man->size, man->map, man->handle);
1388         }
1389 }
1390
1391 /**
1392  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1393  *
1394  * @man: Pointer to a command buffer manager.
1395  *
1396  * This function idles and then destroys a command buffer manager.
1397  */
1398 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1399 {
1400         WARN_ON_ONCE(man->has_pool);
1401         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1402
1403         if (vmw_cmdbuf_startstop(man, 0, false))
1404                 DRM_ERROR("Failed stopping command buffer contexts.\n");
1405
1406         vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1407                                   &man->dev_priv->error_waiters);
1408         (void) cancel_work_sync(&man->work);
1409         dma_pool_destroy(man->dheaders);
1410         dma_pool_destroy(man->headers);
1411         mutex_destroy(&man->cur_mutex);
1412         mutex_destroy(&man->space_mutex);
1413         mutex_destroy(&man->error_mutex);
1414         kfree(man);
1415 }