1 /**************************************************************************
3 * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
31 #include "vmwgfx_reg.h"
33 #include <drm/vmwgfx_drm.h>
34 #include <drm/drm_hashtab.h>
35 #include <linux/suspend.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_object.h>
38 #include <drm/ttm/ttm_lock.h>
39 #include <drm/ttm/ttm_execbuf_util.h>
40 #include <drm/ttm/ttm_module.h>
41 #include "vmwgfx_fence.h"
43 #define VMWGFX_DRIVER_DATE "20140704"
44 #define VMWGFX_DRIVER_MAJOR 2
45 #define VMWGFX_DRIVER_MINOR 6
46 #define VMWGFX_DRIVER_PATCHLEVEL 1
47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49 #define VMWGFX_MAX_RELOCATIONS 2048
50 #define VMWGFX_MAX_VALIDATIONS 2048
51 #define VMWGFX_MAX_DISPLAYS 16
52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
56 * Perhaps we should have sysfs entries for these.
58 #define VMWGFX_NUM_GB_CONTEXT 256
59 #define VMWGFX_NUM_GB_SHADER 20000
60 #define VMWGFX_NUM_GB_SURFACE 32768
61 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 VMWGFX_NUM_GB_SHADER +\
64 VMWGFX_NUM_GB_SURFACE +\
65 VMWGFX_NUM_GB_SCREEN_TARGET)
67 #define VMW_PL_GMR TTM_PL_PRIV0
68 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
69 #define VMW_PL_MOB TTM_PL_PRIV1
70 #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
72 #define VMW_RES_CONTEXT ttm_driver_type0
73 #define VMW_RES_SURFACE ttm_driver_type1
74 #define VMW_RES_STREAM ttm_driver_type2
75 #define VMW_RES_FENCE ttm_driver_type3
76 #define VMW_RES_SHADER ttm_driver_type4
79 struct drm_master *locked_master;
80 struct ttm_object_file *tfile;
81 struct list_head fence_events;
85 struct vmw_dma_buffer {
86 struct ttm_buffer_object base;
87 struct list_head res_list;
91 * struct vmw_validate_buffer - Carries validation info about buffers.
93 * @base: Validation info for TTM.
94 * @hash: Hash entry for quick lookup of the TTM buffer object.
96 * This structure contains also driver private validation info
97 * on top of the info needed by TTM.
99 struct vmw_validate_buffer {
100 struct ttm_validate_buffer base;
101 struct drm_hash_item hash;
102 bool validate_as_mob;
106 struct vmw_resource {
108 struct vmw_private *dev_priv;
111 unsigned long backup_size;
112 bool res_dirty; /* Protected by backup buffer reserved */
113 bool backup_dirty; /* Protected by backup buffer reserved */
114 struct vmw_dma_buffer *backup;
115 unsigned long backup_offset;
116 unsigned long pin_count; /* Protected by resource reserved */
117 const struct vmw_res_func *func;
118 struct list_head lru_head; /* Protected by the resource lock */
119 struct list_head mob_head; /* Protected by @backup reserved */
120 struct list_head binding_head; /* Protected by binding_mutex */
121 void (*res_free) (struct vmw_resource *res);
122 void (*hw_destroy) (struct vmw_resource *res);
127 * Resources that are managed using ioctls.
138 * Resources that are managed using command streams.
140 enum vmw_cmdbuf_res_type {
141 vmw_cmdbuf_res_compat_shader
144 struct vmw_cmdbuf_res_manager;
146 struct vmw_cursor_snooper {
147 struct drm_crtc *crtc;
152 struct vmw_framebuffer;
153 struct vmw_surface_offset;
156 struct vmw_resource res;
159 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
160 struct drm_vmw_size base_size;
161 struct drm_vmw_size *sizes;
164 /* TODO so far just a extra pointer */
165 struct vmw_cursor_snooper snooper;
166 struct vmw_surface_offset *offsets;
167 SVGA3dTextureFilter autogen_filter;
168 uint32_t multisample_count;
171 struct vmw_marker_queue {
172 struct list_head head;
178 struct vmw_fifo_state {
179 unsigned long reserved_size;
180 __le32 *dynamic_buffer;
181 __le32 *static_buffer;
182 unsigned long static_buffer_size;
183 bool using_bounce_buffer;
184 uint32_t capabilities;
185 struct mutex fifo_mutex;
186 struct rw_semaphore rwsem;
187 struct vmw_marker_queue marker_queue;
190 struct vmw_relocation {
192 SVGAGuestPtr *location;
197 * struct vmw_res_cache_entry - resource information cache entry
199 * @valid: Whether the entry is valid, which also implies that the execbuf
200 * code holds a reference to the resource, and it's placed on the
202 * @handle: User-space handle of a resource.
203 * @res: Non-ref-counted pointer to the resource.
205 * Used to avoid frequent repeated user-space handle lookups of the
208 struct vmw_res_cache_entry {
211 struct vmw_resource *res;
212 struct vmw_resource_val_node *node;
216 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
218 enum vmw_dma_map_mode {
219 vmw_dma_phys, /* Use physical page addresses */
220 vmw_dma_alloc_coherent, /* Use TTM coherent pages */
221 vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
222 vmw_dma_map_bind, /* Unmap from DMA just before unbind */
227 * struct vmw_sg_table - Scatter/gather table for binding, with additional
228 * device-specific information.
230 * @sgt: Pointer to a struct sg_table with binding information
231 * @num_regions: Number of regions with device-address contigous pages
233 struct vmw_sg_table {
234 enum vmw_dma_map_mode mode;
236 const dma_addr_t *addrs;
237 struct sg_table *sgt;
238 unsigned long num_regions;
239 unsigned long num_pages;
243 * struct vmw_piter - Page iterator that iterates over a list of pages
244 * and DMA addresses that could be either a scatter-gather list or
247 * @pages: Array of page pointers to the pages.
248 * @addrs: DMA addresses to the pages if coherent pages are used.
249 * @iter: Scatter-gather page iterator. Current position in SG list.
250 * @i: Current position in arrays.
251 * @num_pages: Number of pages total.
252 * @next: Function to advance the iterator. Returns false if past the list
253 * of pages, true otherwise.
254 * @dma_address: Function to return the DMA address of the current page.
258 const dma_addr_t *addrs;
259 struct sg_page_iter iter;
261 unsigned long num_pages;
262 bool (*next)(struct vmw_piter *);
263 dma_addr_t (*dma_address)(struct vmw_piter *);
264 struct page *(*page)(struct vmw_piter *);
268 * enum vmw_ctx_binding_type - abstract resource to context binding types
270 enum vmw_ctx_binding_type {
271 vmw_ctx_binding_shader,
278 * struct vmw_ctx_bindinfo - structure representing a single context binding
280 * @ctx: Pointer to the context structure. NULL means the binding is not
282 * @res: Non ref-counted pointer to the bound resource.
283 * @bt: The binding type.
284 * @i1: Union of information needed to unbind.
286 struct vmw_ctx_bindinfo {
287 struct vmw_resource *ctx;
288 struct vmw_resource *res;
289 enum vmw_ctx_binding_type bt;
292 SVGA3dShaderType shader_type;
293 SVGA3dRenderTargetType rt_type;
294 uint32 texture_stage;
299 * struct vmw_ctx_binding - structure representing a single context binding
300 * - suitable for tracking in a context
302 * @ctx_list: List head for context.
303 * @res_list: List head for bound resource.
306 struct vmw_ctx_binding {
307 struct list_head ctx_list;
308 struct list_head res_list;
309 struct vmw_ctx_bindinfo bi;
314 * struct vmw_ctx_binding_state - context binding state
316 * @list: linked list of individual bindings.
317 * @render_targets: Render target bindings.
318 * @texture_units: Texture units/samplers bindings.
319 * @shaders: Shader bindings.
321 * Note that this structure also provides storage space for the individual
322 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
323 * for individual bindings.
326 struct vmw_ctx_binding_state {
327 struct list_head list;
328 struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
329 struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
330 struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
335 * enum vmw_display_unit_type - Describes the display unit
337 enum vmw_display_unit_type {
344 #define VMW_QUIRK_SCREENTARGET (1U << 0)
346 struct vmw_sw_context{
347 struct drm_open_hash res_ht;
348 bool res_ht_initialized;
349 bool kernel; /**< is the called made from the kernel */
350 struct vmw_fpriv *fp;
351 struct list_head validate_nodes;
352 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
354 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
355 uint32_t cur_val_buf;
356 uint32_t *cmd_bounce;
357 uint32_t cmd_bounce_size;
358 struct list_head resource_list;
359 struct ttm_buffer_object *cur_query_bo;
360 struct list_head res_relocations;
362 struct vmw_res_cache_entry res_cache[vmw_res_max];
363 struct vmw_resource *last_query_ctx;
364 bool needs_post_query_barrier;
365 struct vmw_resource *error_resource;
366 struct vmw_ctx_binding_state staged_bindings;
367 struct list_head staged_cmd_res;
371 struct vmw_legacy_display;
375 struct ttm_lock lock;
376 struct mutex fb_surf_mutex;
377 struct list_head fb_surf;
380 struct vmw_vga_topology_state {
389 struct ttm_bo_device bdev;
390 struct ttm_bo_global_ref bo_global_ref;
391 struct drm_global_reference mem_global_ref;
393 struct vmw_fifo_state fifo;
395 struct drm_device *dev;
396 unsigned long vmw_chipset;
397 unsigned int io_start;
400 uint32_t prim_bb_mem;
403 uint32_t fb_max_width;
404 uint32_t fb_max_height;
405 uint32_t initial_width;
406 uint32_t initial_height;
407 __le32 __iomem *mmio_virt;
409 uint32_t capabilities;
410 uint32_t max_gmr_ids;
411 uint32_t max_gmr_pages;
412 uint32_t max_mob_pages;
413 uint32_t max_mob_size;
414 uint32_t memory_size;
424 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
429 uint32_t vga_pitchlock;
431 uint32_t num_displays;
438 enum vmw_display_unit_type active_display_unit;
439 struct vmw_legacy_display *ldu_priv;
440 struct vmw_screen_object_display *sou_priv;
441 struct vmw_overlay *overlay_priv;
444 * Context and surface management.
447 rwlock_t resource_lock;
448 struct idr res_idr[vmw_res_max];
450 * Block lastclose from racing with firstopen.
453 struct mutex init_mutex;
456 * A resource manager for kernel-only surfaces and
460 struct ttm_object_device *tdev;
467 wait_queue_head_t fence_queue;
468 wait_queue_head_t fifo_queue;
469 spinlock_t waiter_lock;
470 int fence_queue_waiters; /* Protected by waiter_lock */
471 int goal_queue_waiters; /* Protected by waiter_lock */
472 int cmdbuf_waiters; /* Protected by irq_lock */
473 int error_waiters; /* Protected by irq_lock */
474 atomic_t fifo_queue_waiters;
475 uint32_t last_read_seqno;
477 struct vmw_fence_manager *fman;
484 uint32_t traces_state;
485 uint32_t enable_state;
486 uint32_t config_done_state;
492 * Protected by the cmdbuf mutex.
495 struct vmw_sw_context ctx;
496 struct mutex cmdbuf_mutex;
497 struct mutex binding_mutex;
505 spinlock_t svga_lock;
511 struct vmw_master *active_master;
512 struct vmw_master fbdev_master;
513 struct notifier_block pm_nb;
515 bool refuse_hibernation;
517 struct mutex release_mutex;
518 atomic_t num_fifo_resources;
521 * Replace this with an rwsem as soon as we have down_xx_interruptible()
523 struct ttm_lock reservation_sem;
526 * Query processing. These members
527 * are protected by the cmdbuf mutex.
530 struct ttm_buffer_object *dummy_query_bo;
531 struct ttm_buffer_object *pinned_bo;
533 uint32_t query_cid_valid;
534 bool dummy_query_bo_pinned;
537 * Surface swapping. The "surface_lru" list is protected by the
538 * resource lock in order to be able to destroy a surface and take
539 * it off the lru atomically. "used_memory_size" is currently
540 * protected by the cmdbuf mutex for simplicity.
543 struct list_head res_lru[vmw_res_max];
544 uint32_t used_memory_size;
549 enum vmw_dma_map_mode map_mode;
554 struct ttm_buffer_object *otable_bo;
555 struct vmw_otable *otables;
557 struct vmw_cmdbuf_man *cman;
560 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
562 return container_of(res, struct vmw_surface, res);
565 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
567 return (struct vmw_private *)dev->dev_private;
570 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
572 return (struct vmw_fpriv *)file_priv->driver_priv;
575 static inline struct vmw_master *vmw_master(struct drm_master *master)
577 return (struct vmw_master *) master->driver_priv;
581 * The locking here is fine-grained, so that it is performed once
582 * for every read- and write operation. This is of course costly, but we
583 * don't perform much register access in the timing critical paths anyway.
584 * Instead we have the extra benefit of being sure that we don't forget
585 * the hw lock around register accesses.
587 static inline void vmw_write(struct vmw_private *dev_priv,
588 unsigned int offset, uint32_t value)
590 unsigned long irq_flags;
592 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
593 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
594 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
595 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
598 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
601 unsigned long irq_flags;
604 spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
605 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
606 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
607 spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
612 extern void vmw_svga_enable(struct vmw_private *dev_priv);
613 extern void vmw_svga_disable(struct vmw_private *dev_priv);
617 * GMR utilities - vmwgfx_gmr.c
620 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
621 const struct vmw_sg_table *vsgt,
622 unsigned long num_pages,
624 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
627 * Resource utilities - vmwgfx_resource.c
629 struct vmw_user_resource_conv;
631 extern void vmw_resource_unreference(struct vmw_resource **p_res);
632 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
633 extern struct vmw_resource *
634 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
635 extern int vmw_resource_validate(struct vmw_resource *res);
636 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
637 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
638 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
639 struct ttm_object_file *tfile,
641 struct vmw_surface **out_surf,
642 struct vmw_dma_buffer **out_buf);
643 extern int vmw_user_resource_lookup_handle(
644 struct vmw_private *dev_priv,
645 struct ttm_object_file *tfile,
647 const struct vmw_user_resource_conv *converter,
648 struct vmw_resource **p_res);
649 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
650 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
651 struct vmw_dma_buffer *vmw_bo,
652 size_t size, struct ttm_placement *placement,
654 void (*bo_free) (struct ttm_buffer_object *bo));
655 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
656 struct ttm_object_file *tfile);
657 extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
658 struct ttm_object_file *tfile,
662 struct vmw_dma_buffer **p_dma_buf);
663 extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
664 struct vmw_dma_buffer *dma_buf,
666 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *file_priv);
668 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
669 struct drm_file *file_priv);
670 extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
671 struct drm_file *file_priv);
672 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
673 uint32_t cur_validate_node);
674 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
675 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
676 uint32_t id, struct vmw_dma_buffer **out);
677 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv);
679 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv);
681 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
682 struct ttm_object_file *tfile,
684 struct vmw_resource **out);
685 extern void vmw_resource_unreserve(struct vmw_resource *res,
686 struct vmw_dma_buffer *new_backup,
687 unsigned long new_backup_offset);
688 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
689 struct ttm_mem_reg *mem);
690 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
691 struct vmw_fence_obj *fence);
692 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
695 * DMA buffer helper routines - vmwgfx_dmabuf.c
697 extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
698 struct vmw_dma_buffer *bo,
699 struct ttm_placement *placement,
701 extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
702 struct vmw_dma_buffer *buf,
703 bool pin, bool interruptible);
704 extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
705 struct vmw_dma_buffer *buf,
706 bool pin, bool interruptible);
707 extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
708 struct vmw_dma_buffer *bo,
709 bool pin, bool interruptible);
710 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
711 struct vmw_dma_buffer *bo,
713 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
715 extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
718 * Misc Ioctl functionality - vmwgfx_ioctl.c
721 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
722 struct drm_file *file_priv);
723 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv);
725 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv);
727 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
728 struct drm_file *file_priv);
729 extern unsigned int vmw_fops_poll(struct file *filp,
730 struct poll_table_struct *wait);
731 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
732 size_t count, loff_t *offset);
735 * Fifo utilities - vmwgfx_fifo.c
738 extern int vmw_fifo_init(struct vmw_private *dev_priv,
739 struct vmw_fifo_state *fifo);
740 extern void vmw_fifo_release(struct vmw_private *dev_priv,
741 struct vmw_fifo_state *fifo);
742 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
743 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
744 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
746 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
747 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
748 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
749 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
750 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
752 extern int vmw_fifo_flush(struct vmw_private *dev_priv,
756 * TTM glue - vmwgfx_ttm_glue.c
759 extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
760 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
761 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
764 * TTM buffer object driver - vmwgfx_buffer.c
767 extern const size_t vmw_tt_size;
768 extern struct ttm_placement vmw_vram_placement;
769 extern struct ttm_placement vmw_vram_ne_placement;
770 extern struct ttm_placement vmw_vram_sys_placement;
771 extern struct ttm_placement vmw_vram_gmr_placement;
772 extern struct ttm_placement vmw_vram_gmr_ne_placement;
773 extern struct ttm_placement vmw_sys_placement;
774 extern struct ttm_placement vmw_sys_ne_placement;
775 extern struct ttm_placement vmw_evictable_placement;
776 extern struct ttm_placement vmw_srf_placement;
777 extern struct ttm_placement vmw_mob_placement;
778 extern struct ttm_placement vmw_mob_ne_placement;
779 extern struct ttm_bo_driver vmw_bo_driver;
780 extern int vmw_dma_quiescent(struct drm_device *dev);
781 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
782 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
783 extern const struct vmw_sg_table *
784 vmw_bo_sg_table(struct ttm_buffer_object *bo);
785 extern void vmw_piter_start(struct vmw_piter *viter,
786 const struct vmw_sg_table *vsgt,
787 unsigned long p_offs);
790 * vmw_piter_next - Advance the iterator one page.
792 * @viter: Pointer to the iterator to advance.
794 * Returns false if past the list of pages, true otherwise.
796 static inline bool vmw_piter_next(struct vmw_piter *viter)
798 return viter->next(viter);
802 * vmw_piter_dma_addr - Return the DMA address of the current page.
804 * @viter: Pointer to the iterator
806 * Returns the DMA address of the page pointed to by @viter.
808 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
810 return viter->dma_address(viter);
814 * vmw_piter_page - Return a pointer to the current page.
816 * @viter: Pointer to the iterator
818 * Returns the DMA address of the page pointed to by @viter.
820 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
822 return viter->page(viter);
826 * Command submission - vmwgfx_execbuf.c
829 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
830 struct drm_file *file_priv);
831 extern int vmw_execbuf_process(struct drm_file *file_priv,
832 struct vmw_private *dev_priv,
833 void __user *user_commands,
834 void *kernel_commands,
835 uint32_t command_size,
836 uint64_t throttle_us,
838 struct drm_vmw_fence_rep __user
840 struct vmw_fence_obj **out_fence);
841 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
842 struct vmw_fence_obj *fence);
843 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
845 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
846 struct vmw_private *dev_priv,
847 struct vmw_fence_obj **p_fence,
849 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
850 struct vmw_fpriv *vmw_fp,
852 struct drm_vmw_fence_rep __user
854 struct vmw_fence_obj *fence,
855 uint32_t fence_handle);
858 * IRQs and wating - vmwgfx_irq.c
861 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
862 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
863 uint32_t seqno, bool interruptible,
864 unsigned long timeout);
865 extern void vmw_irq_preinstall(struct drm_device *dev);
866 extern int vmw_irq_postinstall(struct drm_device *dev);
867 extern void vmw_irq_uninstall(struct drm_device *dev);
868 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
870 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
875 unsigned long timeout);
876 extern void vmw_update_seqno(struct vmw_private *dev_priv,
877 struct vmw_fifo_state *fifo_state);
878 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
879 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
880 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
881 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
882 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
884 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
885 u32 flag, int *waiter_count);
888 * Rudimentary fence-like objects currently used only for throttling -
892 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
893 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
894 extern int vmw_marker_push(struct vmw_marker_queue *queue,
896 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
897 uint32_t signaled_seqno);
898 extern int vmw_wait_lag(struct vmw_private *dev_priv,
899 struct vmw_marker_queue *queue, uint32_t us);
902 * Kernel framebuffer - vmwgfx_fb.c
905 int vmw_fb_init(struct vmw_private *vmw_priv);
906 int vmw_fb_close(struct vmw_private *dev_priv);
907 int vmw_fb_off(struct vmw_private *vmw_priv);
908 int vmw_fb_on(struct vmw_private *vmw_priv);
911 * Kernel modesetting - vmwgfx_kms.c
914 int vmw_kms_init(struct vmw_private *dev_priv);
915 int vmw_kms_close(struct vmw_private *dev_priv);
916 int vmw_kms_save_vga(struct vmw_private *vmw_priv);
917 int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
918 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
919 struct drm_file *file_priv);
920 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
921 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
922 struct ttm_object_file *tfile,
923 struct ttm_buffer_object *bo,
924 SVGA3dCmdHeader *header);
925 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
926 unsigned width, unsigned height, unsigned pitch,
927 unsigned bpp, unsigned depth);
928 void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
929 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
932 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
933 int vmw_enable_vblank(struct drm_device *dev, int crtc);
934 void vmw_disable_vblank(struct drm_device *dev, int crtc);
935 int vmw_kms_present(struct vmw_private *dev_priv,
936 struct drm_file *file_priv,
937 struct vmw_framebuffer *vfb,
938 struct vmw_surface *surface,
939 uint32_t sid, int32_t destX, int32_t destY,
940 struct drm_vmw_rect *clips,
942 int vmw_kms_readback(struct vmw_private *dev_priv,
943 struct drm_file *file_priv,
944 struct vmw_framebuffer *vfb,
945 struct drm_vmw_fence_rep __user *user_fence_rep,
946 struct drm_vmw_rect *clips,
948 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
949 struct drm_file *file_priv);
951 int vmw_dumb_create(struct drm_file *file_priv,
952 struct drm_device *dev,
953 struct drm_mode_create_dumb *args);
955 int vmw_dumb_map_offset(struct drm_file *file_priv,
956 struct drm_device *dev, uint32_t handle,
958 int vmw_dumb_destroy(struct drm_file *file_priv,
959 struct drm_device *dev,
961 extern int vmw_resource_pin(struct vmw_resource *res);
962 extern void vmw_resource_unpin(struct vmw_resource *res);
965 * Overlay control - vmwgfx_overlay.c
968 int vmw_overlay_init(struct vmw_private *dev_priv);
969 int vmw_overlay_close(struct vmw_private *dev_priv);
970 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
971 struct drm_file *file_priv);
972 int vmw_overlay_stop_all(struct vmw_private *dev_priv);
973 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
974 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
975 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
976 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
977 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
978 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
984 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
987 * Prime - vmwgfx_prime.c
990 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
991 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
992 struct drm_file *file_priv,
993 int fd, u32 *handle);
994 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
995 struct drm_file *file_priv,
996 uint32_t handle, uint32_t flags,
1000 * MemoryOBject management - vmwgfx_mob.c
1003 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
1004 const struct vmw_sg_table *vsgt,
1005 unsigned long num_data_pages, int32_t mob_id);
1006 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
1007 struct vmw_mob *mob);
1008 extern void vmw_mob_destroy(struct vmw_mob *mob);
1009 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
1010 extern int vmw_otables_setup(struct vmw_private *dev_priv);
1011 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
1014 * Context management - vmwgfx_context.c
1017 extern const struct vmw_user_resource_conv *user_context_converter;
1019 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
1021 extern int vmw_context_check(struct vmw_private *dev_priv,
1022 struct ttm_object_file *tfile,
1024 struct vmw_resource **p_res);
1025 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
1026 struct drm_file *file_priv);
1027 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
1028 struct drm_file *file_priv);
1029 extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
1030 const struct vmw_ctx_bindinfo *ci);
1032 vmw_context_binding_state_transfer(struct vmw_resource *res,
1033 struct vmw_ctx_binding_state *cbs);
1034 extern void vmw_context_binding_res_list_kill(struct list_head *head);
1035 extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1036 extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1037 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1038 extern struct vmw_cmdbuf_res_manager *
1039 vmw_context_res_man(struct vmw_resource *ctx);
1041 * Surface management - vmwgfx_surface.c
1044 extern const struct vmw_user_resource_conv *user_surface_converter;
1046 extern void vmw_surface_res_free(struct vmw_resource *res);
1047 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1048 struct drm_file *file_priv);
1049 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1050 struct drm_file *file_priv);
1051 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1052 struct drm_file *file_priv);
1053 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1054 struct drm_file *file_priv);
1055 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1056 struct drm_file *file_priv);
1057 extern int vmw_surface_check(struct vmw_private *dev_priv,
1058 struct ttm_object_file *tfile,
1059 uint32_t handle, int *id);
1060 extern int vmw_surface_validate(struct vmw_private *dev_priv,
1061 struct vmw_surface *srf);
1062 int vmw_surface_gb_priv_define(struct drm_device *dev,
1063 uint32_t user_accounting_size,
1064 uint32_t svga3d_flags,
1065 SVGA3dSurfaceFormat format,
1067 uint32_t num_mip_levels,
1068 uint32_t multisample_count,
1069 struct drm_vmw_size size,
1070 struct vmw_surface **srf_out);
1073 * Shader management - vmwgfx_shader.c
1076 extern const struct vmw_user_resource_conv *user_shader_converter;
1078 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv);
1080 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1081 struct drm_file *file_priv);
1082 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1083 struct vmw_cmdbuf_res_manager *man,
1084 u32 user_key, const void *bytecode,
1085 SVGA3dShaderType shader_type,
1087 struct list_head *list);
1088 extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1089 u32 user_key, SVGA3dShaderType shader_type,
1090 struct list_head *list);
1091 extern struct vmw_resource *
1092 vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1093 u32 user_key, SVGA3dShaderType shader_type);
1096 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1099 extern struct vmw_cmdbuf_res_manager *
1100 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1101 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1102 extern size_t vmw_cmdbuf_res_man_size(void);
1103 extern struct vmw_resource *
1104 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1105 enum vmw_cmdbuf_res_type res_type,
1107 extern void vmw_cmdbuf_res_revert(struct list_head *list);
1108 extern void vmw_cmdbuf_res_commit(struct list_head *list);
1109 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1110 enum vmw_cmdbuf_res_type res_type,
1112 struct vmw_resource *res,
1113 struct list_head *list);
1114 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1115 enum vmw_cmdbuf_res_type res_type,
1117 struct list_head *list);
1121 * Command buffer managerment vmwgfx_cmdbuf.c
1123 struct vmw_cmdbuf_man;
1124 struct vmw_cmdbuf_header;
1126 extern struct vmw_cmdbuf_man *
1127 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
1128 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1129 size_t size, size_t default_size);
1130 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
1131 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
1132 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
1133 unsigned long timeout);
1134 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1135 int ctx_id, bool interruptible,
1136 struct vmw_cmdbuf_header *header);
1137 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1138 struct vmw_cmdbuf_header *header,
1140 extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
1141 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
1142 size_t size, bool interruptible,
1143 struct vmw_cmdbuf_header **p_header);
1144 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
1145 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
1146 bool interruptible);
1150 * Inline helper functions
1153 static inline void vmw_surface_unreference(struct vmw_surface **srf)
1155 struct vmw_surface *tmp_srf = *srf;
1156 struct vmw_resource *res = &tmp_srf->res;
1159 vmw_resource_unreference(&res);
1162 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1164 (void) vmw_resource_reference(&srf->res);
1168 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1170 struct vmw_dma_buffer *tmp_buf = *buf;
1173 if (tmp_buf != NULL) {
1174 struct ttm_buffer_object *bo = &tmp_buf->base;
1180 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1182 if (ttm_bo_reference(&buf->base))
1187 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1189 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1192 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
1194 atomic_inc(&dev_priv->num_fifo_resources);
1197 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
1199 atomic_dec(&dev_priv->num_fifo_resources);