Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_drv.h revision 1.7
      1 /*	$NetBSD: vmwgfx_drv.h,v 1.7 2022/10/25 23:34:05 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #ifndef _VMWGFX_DRV_H_
     31 #define _VMWGFX_DRV_H_
     32 
     33 #include <linux/notifier.h>
     34 #include <linux/suspend.h>
     35 #include <linux/sync_file.h>
     36 
     37 #include <drm/drm_auth.h>
     38 #include <drm/drm_device.h>
     39 #include <drm/drm_file.h>
     40 #include <drm/drm_hashtab.h>
     41 #include <drm/drm_rect.h>
     42 
     43 #include <drm/ttm/ttm_bo_driver.h>
     44 #include <drm/ttm/ttm_execbuf_util.h>
     45 #include <drm/ttm/ttm_module.h>
     46 
     47 #include "ttm_lock.h"
     48 #include "ttm_object.h"
     49 
     50 #include "vmwgfx_fence.h"
     51 #include "vmwgfx_reg.h"
     52 #include "vmwgfx_validation.h"
     53 
     54 /*
     55  * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
     56  * uapi headers should not depend on header files outside uapi/.
     57  */
     58 #include <drm/vmwgfx_drm.h>
     59 
     60 
     61 #define VMWGFX_DRIVER_NAME "vmwgfx"
     62 #define VMWGFX_DRIVER_DATE "20200114"
     63 #define VMWGFX_DRIVER_MAJOR 2
     64 #define VMWGFX_DRIVER_MINOR 17
     65 #define VMWGFX_DRIVER_PATCHLEVEL 0
     66 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
     67 #define VMWGFX_MAX_RELOCATIONS 2048
     68 #define VMWGFX_MAX_VALIDATIONS 2048
     69 #define VMWGFX_MAX_DISPLAYS 16
     70 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
     71 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
     72 
     73 /*
     74  * Perhaps we should have sysfs entries for these.
     75  */
     76 #define VMWGFX_NUM_GB_CONTEXT 256
     77 #define VMWGFX_NUM_GB_SHADER 20000
     78 #define VMWGFX_NUM_GB_SURFACE 32768
     79 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
     80 #define VMWGFX_NUM_DXCONTEXT 256
     81 #define VMWGFX_NUM_DXQUERY 512
     82 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
     83 			VMWGFX_NUM_GB_SHADER +\
     84 			VMWGFX_NUM_GB_SURFACE +\
     85 			VMWGFX_NUM_GB_SCREEN_TARGET)
     86 
     87 #define VMW_PL_GMR (TTM_PL_PRIV + 0)
     88 #define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
     89 #define VMW_PL_MOB (TTM_PL_PRIV + 1)
     90 #define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
     91 
     92 #define VMW_RES_CONTEXT ttm_driver_type0
     93 #define VMW_RES_SURFACE ttm_driver_type1
     94 #define VMW_RES_STREAM ttm_driver_type2
     95 #define VMW_RES_FENCE ttm_driver_type3
     96 #define VMW_RES_SHADER ttm_driver_type4
     97 
     98 struct vmw_fpriv {
     99 	struct ttm_object_file *tfile;
    100 	bool gb_aware; /* user-space is guest-backed aware */
    101 };
    102 
    103 /**
    104  * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
    105  * @base: The TTM buffer object
    106  * @res_tree: RB tree of resources using this buffer object as a backing MOB
    107  * @pin_count: pin depth
    108  * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
    109  * increased. May be decreased without reservation.
    110  * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
    111  * @map: Kmap object for semi-persistent mappings
    112  * @res_prios: Eviction priority counts for attached resources
    113  * @dirty: structure for user-space dirty-tracking
    114  */
    115 struct vmw_buffer_object {
    116 	struct ttm_buffer_object base;
    117 	struct rb_root res_tree;
    118 	s32 pin_count;
    119 	atomic_t cpu_writers;
    120 	/* Not ref-counted.  Protected by binding_mutex */
    121 	struct vmw_resource *dx_query_ctx;
    122 	/* Protected by reservation */
    123 	struct ttm_bo_kmap_obj map;
    124 	u32 res_prios[TTM_MAX_BO_PRIORITY];
    125 	struct vmw_bo_dirty *dirty;
    126 };
    127 
    128 /**
    129  * struct vmw_validate_buffer - Carries validation info about buffers.
    130  *
    131  * @base: Validation info for TTM.
    132  * @hash: Hash entry for quick lookup of the TTM buffer object.
    133  *
    134  * This structure contains also driver private validation info
    135  * on top of the info needed by TTM.
    136  */
    137 struct vmw_validate_buffer {
    138 	struct ttm_validate_buffer base;
    139 	struct drm_hash_item hash;
    140 	bool validate_as_mob;
    141 };
    142 
    143 struct vmw_res_func;
    144 
    145 
    146 /**
    147  * struct vmw-resource - base class for hardware resources
    148  *
    149  * @kref: For refcounting.
    150  * @dev_priv: Pointer to the device private for this resource. Immutable.
    151  * @id: Device id. Protected by @dev_priv::resource_lock.
    152  * @backup_size: Backup buffer size. Immutable.
    153  * @res_dirty: Resource contains data not yet in the backup buffer. Protected
    154  * by resource reserved.
    155  * @backup_dirty: Backup buffer contains data not yet in the HW resource.
    156  * Protected by resource reserved.
    157  * @coherent: Emulate coherency by tracking vm accesses.
    158  * @backup: The backup buffer if any. Protected by resource reserved.
    159  * @backup_offset: Offset into the backup buffer if any. Protected by resource
    160  * reserved. Note that only a few resource types can have a @backup_offset
    161  * different from zero.
    162  * @pin_count: The pin count for this resource. A pinned resource has a
    163  * pin-count greater than zero. It is not on the resource LRU lists and its
    164  * backup buffer is pinned. Hence it can't be evicted.
    165  * @func: Method vtable for this resource. Immutable.
    166  * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
    167  * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
    168  * @binding_head: List head for the context binding list. Protected by
    169  * the @dev_priv::binding_mutex
    170  * @res_free: The resource destructor.
    171  * @hw_destroy: Callback to destroy the resource on the device, as part of
    172  * resource destruction.
    173  */
    174 struct vmw_resource_dirty;
    175 struct vmw_resource {
    176 	struct kref kref;
    177 	struct vmw_private *dev_priv;
    178 	int id;
    179 	u32 used_prio;
    180 	unsigned long backup_size;
    181 	u32 res_dirty : 1;
    182 	u32 backup_dirty : 1;
    183 	u32 coherent : 1;
    184 	struct vmw_buffer_object *backup;
    185 	unsigned long backup_offset;
    186 	unsigned long pin_count;
    187 	const struct vmw_res_func *func;
    188 	struct rb_node mob_node;
    189 	bool mob_attached;
    190 	struct list_head lru_head;
    191 	struct list_head binding_head;
    192 	struct vmw_resource_dirty *dirty;
    193 	void (*res_free) (struct vmw_resource *res);
    194 	void (*hw_destroy) (struct vmw_resource *res);
    195 };
    196 
    197 
    198 /*
    199  * Resources that are managed using ioctls.
    200  */
    201 enum vmw_res_type {
    202 	vmw_res_context,
    203 	vmw_res_surface,
    204 	vmw_res_stream,
    205 	vmw_res_shader,
    206 	vmw_res_dx_context,
    207 	vmw_res_cotable,
    208 	vmw_res_view,
    209 	vmw_res_max
    210 };
    211 
    212 /*
    213  * Resources that are managed using command streams.
    214  */
    215 enum vmw_cmdbuf_res_type {
    216 	vmw_cmdbuf_res_shader,
    217 	vmw_cmdbuf_res_view
    218 };
    219 
    220 struct vmw_cmdbuf_res_manager;
    221 
    222 struct vmw_cursor_snooper {
    223 	size_t age;
    224 	uint32_t *image;
    225 };
    226 
    227 struct vmw_framebuffer;
    228 struct vmw_surface_offset;
    229 
    230 struct vmw_surface {
    231 	struct vmw_resource res;
    232 	SVGA3dSurfaceAllFlags flags;
    233 	uint32_t format;
    234 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
    235 	struct drm_vmw_size base_size;
    236 	struct drm_vmw_size *sizes;
    237 	uint32_t num_sizes;
    238 	bool scanout;
    239 	uint32_t array_size;
    240 	/* TODO so far just a extra pointer */
    241 	struct vmw_cursor_snooper snooper;
    242 	struct vmw_surface_offset *offsets;
    243 	SVGA3dTextureFilter autogen_filter;
    244 	uint32_t multisample_count;
    245 	struct list_head view_list;
    246 	SVGA3dMSPattern multisample_pattern;
    247 	SVGA3dMSQualityLevel quality_level;
    248 };
    249 
    250 struct vmw_marker_queue {
    251 	struct list_head head;
    252 	u64 lag;
    253 	u64 lag_time;
    254 	spinlock_t lock;
    255 };
    256 
    257 struct vmw_fifo_state {
    258 	unsigned long reserved_size;
    259 	u32 *dynamic_buffer;
    260 	u32 *static_buffer;
    261 	unsigned long static_buffer_size;
    262 	bool using_bounce_buffer;
    263 	uint32_t capabilities;
    264 	struct mutex fifo_mutex;
    265 	struct rw_semaphore rwsem;
    266 	struct vmw_marker_queue marker_queue;
    267 	bool dx;
    268 };
    269 
    270 /**
    271  * struct vmw_res_cache_entry - resource information cache entry
    272  * @handle: User-space handle of a resource.
    273  * @res: Non-ref-counted pointer to the resource.
    274  * @valid_handle: Whether the @handle member is valid.
    275  * @valid: Whether the entry is valid, which also implies that the execbuf
    276  * code holds a reference to the resource, and it's placed on the
    277  * validation list.
    278  *
    279  * Used to avoid frequent repeated user-space handle lookups of the
    280  * same resource.
    281  */
    282 struct vmw_res_cache_entry {
    283 	uint32_t handle;
    284 	struct vmw_resource *res;
    285 	void *private;
    286 	unsigned short valid_handle;
    287 	unsigned short valid;
    288 };
    289 
    290 /**
    291  * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
    292  */
    293 enum vmw_dma_map_mode {
    294 	vmw_dma_phys,           /* Use physical page addresses */
    295 	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
    296 	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
    297 	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
    298 	vmw_dma_map_max
    299 };
    300 
    301 /**
    302  * struct vmw_sg_table - Scatter/gather table for binding, with additional
    303  * device-specific information.
    304  *
    305  * @sgt: Pointer to a struct sg_table with binding information
    306  * @num_regions: Number of regions with device-address contiguous pages
    307  */
    308 struct vmw_sg_table {
    309 	enum vmw_dma_map_mode mode;
    310 	struct page **pages;
    311 	const dma_addr_t *addrs;
    312 	struct sg_table *sgt;
    313 	unsigned long num_regions;
    314 	unsigned long num_pages;
    315 };
    316 
    317 /**
    318  * struct vmw_piter - Page iterator that iterates over a list of pages
    319  * and DMA addresses that could be either a scatter-gather list or
    320  * arrays
    321  *
    322  * @pages: Array of page pointers to the pages.
    323  * @addrs: DMA addresses to the pages if coherent pages are used.
    324  * @iter: Scatter-gather page iterator. Current position in SG list.
    325  * @i: Current position in arrays.
    326  * @num_pages: Number of pages total.
    327  * @next: Function to advance the iterator. Returns false if past the list
    328  * of pages, true otherwise.
    329  * @dma_address: Function to return the DMA address of the current page.
    330  */
    331 struct vmw_piter {
    332 	struct page **pages;
    333 	const dma_addr_t *addrs;
    334 #ifndef __NetBSD__		/* XXX */
    335 	struct sg_dma_page_iter iter;
    336 #endif
    337 	unsigned long i;
    338 	unsigned long num_pages;
    339 	bool (*next)(struct vmw_piter *);
    340 	dma_addr_t (*dma_address)(struct vmw_piter *);
    341 	struct page *(*page)(struct vmw_piter *);
    342 };
    343 
    344 /*
    345  * enum vmw_display_unit_type - Describes the display unit
    346  */
    347 enum vmw_display_unit_type {
    348 	vmw_du_invalid = 0,
    349 	vmw_du_legacy,
    350 	vmw_du_screen_object,
    351 	vmw_du_screen_target
    352 };
    353 
    354 struct vmw_validation_context;
    355 struct vmw_ctx_validation_info;
    356 
    357 /**
    358  * struct vmw_sw_context - Command submission context
    359  * @res_ht: Pointer hash table used to find validation duplicates
    360  * @kernel: Whether the command buffer originates from kernel code rather
    361  * than from user-space
    362  * @fp: If @kernel is false, points to the file of the client. Otherwise
    363  * NULL
    364  * @cmd_bounce: Command bounce buffer used for command validation before
    365  * copying to fifo space
    366  * @cmd_bounce_size: Current command bounce buffer size
    367  * @cur_query_bo: Current buffer object used as query result buffer
    368  * @bo_relocations: List of buffer object relocations
    369  * @res_relocations: List of resource relocations
    370  * @buf_start: Pointer to start of memory where command validation takes
    371  * place
    372  * @res_cache: Cache of recently looked up resources
    373  * @last_query_ctx: Last context that submitted a query
    374  * @needs_post_query_barrier: Whether a query barrier is needed after
    375  * command submission
    376  * @staged_bindings: Cached per-context binding tracker
    377  * @staged_bindings_inuse: Whether the cached per-context binding tracker
    378  * is in use
    379  * @staged_cmd_res: List of staged command buffer managed resources in this
    380  * command buffer
    381  * @ctx_list: List of context resources referenced in this command buffer
    382  * @dx_ctx_node: Validation metadata of the current DX context
    383  * @dx_query_mob: The MOB used for DX queries
    384  * @dx_query_ctx: The DX context used for the last DX query
    385  * @man: Pointer to the command buffer managed resource manager
    386  * @ctx: The validation context
    387  */
    388 struct vmw_sw_context{
    389 	struct drm_open_hash res_ht;
    390 	bool res_ht_initialized;
    391 	bool kernel;
    392 	struct vmw_fpriv *fp;
    393 	uint32_t *cmd_bounce;
    394 	uint32_t cmd_bounce_size;
    395 	struct vmw_buffer_object *cur_query_bo;
    396 	struct list_head bo_relocations;
    397 	struct list_head res_relocations;
    398 	uint32_t *buf_start;
    399 	struct vmw_res_cache_entry res_cache[vmw_res_max];
    400 	struct vmw_resource *last_query_ctx;
    401 	bool needs_post_query_barrier;
    402 	struct vmw_ctx_binding_state *staged_bindings;
    403 	bool staged_bindings_inuse;
    404 	struct list_head staged_cmd_res;
    405 	struct list_head ctx_list;
    406 	struct vmw_ctx_validation_info *dx_ctx_node;
    407 	struct vmw_buffer_object *dx_query_mob;
    408 	struct vmw_resource *dx_query_ctx;
    409 	struct vmw_cmdbuf_res_manager *man;
    410 	struct vmw_validation_context *ctx;
    411 };
    412 
    413 struct vmw_legacy_display;
    414 struct vmw_overlay;
    415 
    416 struct vmw_vga_topology_state {
    417 	uint32_t width;
    418 	uint32_t height;
    419 	uint32_t primary;
    420 	uint32_t pos_x;
    421 	uint32_t pos_y;
    422 };
    423 
    424 
    425 /*
    426  * struct vmw_otable - Guest Memory OBject table metadata
    427  *
    428  * @size:           Size of the table (page-aligned).
    429  * @page_table:     Pointer to a struct vmw_mob holding the page table.
    430  */
    431 struct vmw_otable {
    432 	unsigned long size;
    433 	struct vmw_mob *page_table;
    434 	bool enabled;
    435 };
    436 
    437 struct vmw_otable_batch {
    438 	unsigned num_otables;
    439 	struct vmw_otable *otables;
    440 	struct vmw_resource *context;
    441 	struct ttm_buffer_object *otable_bo;
    442 };
    443 
    444 enum {
    445 	VMW_IRQTHREAD_FENCE,
    446 	VMW_IRQTHREAD_CMDBUF,
    447 	VMW_IRQTHREAD_MAX
    448 };
    449 
    450 struct vmw_private {
    451 	struct ttm_bo_device bdev;
    452 
    453 	struct vmw_fifo_state fifo;
    454 
    455 	struct drm_device *dev;
    456 	struct drm_vma_offset_manager vma_manager;
    457 	unsigned long vmw_chipset;
    458 #ifdef __NetBSD__
    459 	bus_space_tag_t iot;
    460 	bus_space_handle_t ioh;
    461 	bus_size_t iosz;
    462 #else
    463 	unsigned int io_start;
    464 #endif
    465 	uint32_t vram_start;
    466 	uint32_t vram_size;
    467 	uint32_t prim_bb_mem;
    468 	uint32_t mmio_start;
    469 	uint32_t mmio_size;
    470 	uint32_t fb_max_width;
    471 	uint32_t fb_max_height;
    472 	uint32_t texture_max_width;
    473 	uint32_t texture_max_height;
    474 	uint32_t stdu_max_width;
    475 	uint32_t stdu_max_height;
    476 	uint32_t initial_width;
    477 	uint32_t initial_height;
    478 	u32 *mmio_virt;
    479 	uint32_t capabilities;
    480 	uint32_t capabilities2;
    481 	uint32_t max_gmr_ids;
    482 	uint32_t max_gmr_pages;
    483 	uint32_t max_mob_pages;
    484 	uint32_t max_mob_size;
    485 	uint32_t memory_size;
    486 	bool has_gmr;
    487 	bool has_mob;
    488 	spinlock_t hw_lock;
    489 	spinlock_t cap_lock;
    490 	bool has_dx;
    491 	bool assume_16bpp;
    492 	bool has_sm4_1;
    493 
    494 	/*
    495 	 * VGA registers.
    496 	 */
    497 
    498 	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
    499 	uint32_t vga_width;
    500 	uint32_t vga_height;
    501 	uint32_t vga_bpp;
    502 	uint32_t vga_bpl;
    503 	uint32_t vga_pitchlock;
    504 
    505 	uint32_t num_displays;
    506 
    507 	/*
    508 	 * Framebuffer info.
    509 	 */
    510 
    511 	void *fb_info;
    512 	enum vmw_display_unit_type active_display_unit;
    513 	struct vmw_legacy_display *ldu_priv;
    514 	struct vmw_overlay *overlay_priv;
    515 	struct drm_property *hotplug_mode_update_property;
    516 	struct drm_property *implicit_placement_property;
    517 	struct mutex global_kms_state_mutex;
    518 	spinlock_t cursor_lock;
    519 	struct drm_atomic_state *suspend_state;
    520 
    521 	/*
    522 	 * Context and surface management.
    523 	 */
    524 
    525 	spinlock_t resource_lock;
    526 	struct idr res_idr[vmw_res_max];
    527 
    528 	/*
    529 	 * A resource manager for kernel-only surfaces and
    530 	 * contexts.
    531 	 */
    532 
    533 	struct ttm_object_device *tdev;
    534 
    535 	/*
    536 	 * Fencing and IRQs.
    537 	 */
    538 
    539 	atomic_t marker_seq;
    540 	drm_waitqueue_t fence_queue;
    541 	spinlock_t fence_lock;
    542 	drm_waitqueue_t fifo_queue;
    543 	spinlock_t fifo_lock;
    544 	spinlock_t waiter_lock;
    545 	int fence_queue_waiters; /* Protected by waiter_lock */
    546 	int goal_queue_waiters; /* Protected by waiter_lock */
    547 	int cmdbuf_waiters; /* Protected by waiter_lock */
    548 	int error_waiters; /* Protected by waiter_lock */
    549 	int fifo_queue_waiters; /* Protected by waiter_lock */
    550 	uint32_t last_read_seqno;
    551 	struct vmw_fence_manager *fman;
    552 	uint32_t irq_mask; /* Updates protected by waiter_lock */
    553 
    554 	/*
    555 	 * Device state
    556 	 */
    557 
    558 	uint32_t traces_state;
    559 	uint32_t enable_state;
    560 	uint32_t config_done_state;
    561 
    562 	/**
    563 	 * Execbuf
    564 	 */
    565 	/**
    566 	 * Protected by the cmdbuf mutex.
    567 	 */
    568 
    569 	struct vmw_sw_context ctx;
    570 	struct mutex cmdbuf_mutex;
    571 	struct mutex binding_mutex;
    572 
    573 	/**
    574 	 * Operating mode.
    575 	 */
    576 
    577 	bool stealth;
    578 	bool enable_fb;
    579 	spinlock_t svga_lock;
    580 
    581 	/**
    582 	 * PM management.
    583 	 */
    584 	struct notifier_block pm_nb;
    585 	bool refuse_hibernation;
    586 	bool suspend_locked;
    587 
    588 	struct mutex release_mutex;
    589 	atomic_t num_fifo_resources;
    590 
    591 	/*
    592 	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
    593 	 */
    594 	struct ttm_lock reservation_sem;
    595 
    596 	/*
    597 	 * Query processing. These members
    598 	 * are protected by the cmdbuf mutex.
    599 	 */
    600 
    601 	struct vmw_buffer_object *dummy_query_bo;
    602 	struct vmw_buffer_object *pinned_bo;
    603 	uint32_t query_cid;
    604 	uint32_t query_cid_valid;
    605 	bool dummy_query_bo_pinned;
    606 
    607 	/*
    608 	 * Surface swapping. The "surface_lru" list is protected by the
    609 	 * resource lock in order to be able to destroy a surface and take
    610 	 * it off the lru atomically. "used_memory_size" is currently
    611 	 * protected by the cmdbuf mutex for simplicity.
    612 	 */
    613 
    614 	struct list_head res_lru[vmw_res_max];
    615 	uint32_t used_memory_size;
    616 
    617 	/*
    618 	 * DMA mapping stuff.
    619 	 */
    620 	enum vmw_dma_map_mode map_mode;
    621 
    622 	/*
    623 	 * Guest Backed stuff
    624 	 */
    625 	struct vmw_otable_batch otable_batch;
    626 
    627 	struct vmw_cmdbuf_man *cman;
    628 	DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
    629 
    630 	/* Validation memory reservation */
    631 	struct vmw_validation_mem vvm;
    632 };
    633 
    634 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
    635 {
    636 	return container_of(res, struct vmw_surface, res);
    637 }
    638 
    639 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
    640 {
    641 	return (struct vmw_private *)dev->dev_private;
    642 }
    643 
    644 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
    645 {
    646 	return (struct vmw_fpriv *)file_priv->driver_priv;
    647 }
    648 
    649 /*
    650  * The locking here is fine-grained, so that it is performed once
    651  * for every read- and write operation. This is of course costly, but we
    652  * don't perform much register access in the timing critical paths anyway.
    653  * Instead we have the extra benefit of being sure that we don't forget
    654  * the hw lock around register accesses.
    655  */
    656 static inline void vmw_write(struct vmw_private *dev_priv,
    657 			     unsigned int offset, uint32_t value)
    658 {
    659 	spin_lock(&dev_priv->hw_lock);
    660 #ifdef __NetBSD__
    661 	bus_space_write_4(dev_priv->iot, dev_priv->ioh, VMWGFX_INDEX_PORT,
    662 	    offset);
    663 	bus_space_write_4(dev_priv->iot, dev_priv->ioh, VMWGFX_VALUE_PORT,
    664 	    value);
    665 #else
    666 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
    667 	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
    668 #endif
    669 	spin_unlock(&dev_priv->hw_lock);
    670 }
    671 
    672 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
    673 				unsigned int offset)
    674 {
    675 	u32 val;
    676 
    677 	spin_lock(&dev_priv->hw_lock);
    678 #ifdef __NetBSD__
    679 	bus_space_write_4(dev_priv->iot, dev_priv->ioh, VMWGFX_INDEX_PORT,
    680 	    offset);
    681 	val = bus_space_read_4(dev_priv->iot, dev_priv->ioh,
    682 	    VMWGFX_VALUE_PORT);
    683 #else
    684 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
    685 	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
    686 #endif
    687 	spin_unlock(&dev_priv->hw_lock);
    688 
    689 	return val;
    690 }
    691 
    692 extern void vmw_svga_enable(struct vmw_private *dev_priv);
    693 extern void vmw_svga_disable(struct vmw_private *dev_priv);
    694 
    695 
    696 /**
    697  * GMR utilities - vmwgfx_gmr.c
    698  */
    699 
    700 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
    701 			const struct vmw_sg_table *vsgt,
    702 			unsigned long num_pages,
    703 			int gmr_id);
    704 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
    705 
    706 /**
    707  * Resource utilities - vmwgfx_resource.c
    708  */
    709 struct vmw_user_resource_conv;
    710 
    711 extern void vmw_resource_unreference(struct vmw_resource **p_res);
    712 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
    713 extern struct vmw_resource *
    714 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
    715 extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
    716 				 bool dirtying);
    717 extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
    718 				bool no_backup);
    719 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
    720 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
    721 				  struct ttm_object_file *tfile,
    722 				  uint32_t handle,
    723 				  struct vmw_surface **out_surf,
    724 				  struct vmw_buffer_object **out_buf);
    725 extern int vmw_user_resource_lookup_handle(
    726 	struct vmw_private *dev_priv,
    727 	struct ttm_object_file *tfile,
    728 	uint32_t handle,
    729 	const struct vmw_user_resource_conv *converter,
    730 	struct vmw_resource **p_res);
    731 extern struct vmw_resource *
    732 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
    733 				      struct ttm_object_file *tfile,
    734 				      uint32_t handle,
    735 				      const struct vmw_user_resource_conv *
    736 				      converter);
    737 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
    738 				  struct drm_file *file_priv);
    739 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
    740 				  struct drm_file *file_priv);
    741 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
    742 				  struct ttm_object_file *tfile,
    743 				  uint32_t *inout_id,
    744 				  struct vmw_resource **out);
    745 extern void vmw_resource_unreserve(struct vmw_resource *res,
    746 				   bool dirty_set,
    747 				   bool dirty,
    748 				   bool switch_backup,
    749 				   struct vmw_buffer_object *new_backup,
    750 				   unsigned long new_backup_offset);
    751 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
    752 				  struct ttm_mem_reg *mem);
    753 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
    754 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
    755 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
    756 void vmw_resource_mob_attach(struct vmw_resource *res);
    757 void vmw_resource_mob_detach(struct vmw_resource *res);
    758 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
    759 			       pgoff_t end);
    760 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
    761 			pgoff_t end, pgoff_t *num_prefault);
    762 
    763 /**
    764  * vmw_resource_mob_attached - Whether a resource currently has a mob attached
    765  * @res: The resource
    766  *
    767  * Return: true if the resource has a mob attached, false otherwise.
    768  */
    769 static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
    770 {
    771 	return res->mob_attached;
    772 }
    773 
    774 /**
    775  * vmw_user_resource_noref_release - release a user resource pointer looked up
    776  * without reference
    777  */
    778 static inline void vmw_user_resource_noref_release(void)
    779 {
    780 	ttm_base_object_noref_release();
    781 }
    782 
    783 /**
    784  * Buffer object helper functions - vmwgfx_bo.c
    785  */
    786 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
    787 				   struct vmw_buffer_object *bo,
    788 				   struct ttm_placement *placement,
    789 				   bool interruptible);
    790 extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
    791 			      struct vmw_buffer_object *buf,
    792 			      bool interruptible);
    793 extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
    794 				     struct vmw_buffer_object *buf,
    795 				     bool interruptible);
    796 extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
    797 				       struct vmw_buffer_object *bo,
    798 				       bool interruptible);
    799 extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
    800 			struct vmw_buffer_object *bo,
    801 			bool interruptible);
    802 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
    803 				 SVGAGuestPtr *ptr);
    804 extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
    805 extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
    806 extern int vmw_bo_init(struct vmw_private *dev_priv,
    807 		       struct vmw_buffer_object *vmw_bo,
    808 		       size_t size, struct ttm_placement *placement,
    809 		       bool interuptable,
    810 		       void (*bo_free)(struct ttm_buffer_object *bo));
    811 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
    812 				     struct ttm_object_file *tfile);
    813 extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
    814 			     struct ttm_object_file *tfile,
    815 			     uint32_t size,
    816 			     bool shareable,
    817 			     uint32_t *handle,
    818 			     struct vmw_buffer_object **p_dma_buf,
    819 			     struct ttm_base_object **p_base);
    820 extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
    821 				 struct vmw_buffer_object *dma_buf,
    822 				 uint32_t *handle);
    823 extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
    824 			      struct drm_file *file_priv);
    825 extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
    826 			      struct drm_file *file_priv);
    827 extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
    828 				     struct drm_file *file_priv);
    829 extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
    830 			      uint32_t id, struct vmw_buffer_object **out,
    831 			      struct ttm_base_object **base);
    832 extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
    833 				struct vmw_fence_obj *fence);
    834 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
    835 extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
    836 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
    837 			       struct ttm_mem_reg *mem);
    838 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
    839 extern struct vmw_buffer_object *
    840 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
    841 
    842 /**
    843  * vmw_user_bo_noref_release - release a buffer object pointer looked up
    844  * without reference
    845  */
    846 static inline void vmw_user_bo_noref_release(void)
    847 {
    848 	ttm_base_object_noref_release();
    849 }
    850 
    851 /**
    852  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
    853  * according to attached resources
    854  * @vbo: The struct vmw_buffer_object
    855  */
    856 static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
    857 {
    858 	int i = ARRAY_SIZE(vbo->res_prios);
    859 
    860 	while (i--) {
    861 		if (vbo->res_prios[i]) {
    862 			vbo->base.priority = i;
    863 			return;
    864 		}
    865 	}
    866 
    867 	vbo->base.priority = 3;
    868 }
    869 
    870 /**
    871  * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
    872  * eviction priority
    873  * @vbo: The struct vmw_buffer_object
    874  * @prio: The resource priority
    875  *
    876  * After being notified, the code assigns the highest resource eviction priority
    877  * to the backing buffer object (mob).
    878  */
    879 static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
    880 {
    881 	if (vbo->res_prios[prio]++ == 0)
    882 		vmw_bo_prio_adjust(vbo);
    883 }
    884 
    885 /**
    886  * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
    887  * priority being removed
    888  * @vbo: The struct vmw_buffer_object
    889  * @prio: The resource priority
    890  *
    891  * After being notified, the code assigns the highest resource eviction priority
    892  * to the backing buffer object (mob).
    893  */
    894 static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
    895 {
    896 	if (--vbo->res_prios[prio] == 0)
    897 		vmw_bo_prio_adjust(vbo);
    898 }
    899 
    900 /**
    901  * Misc Ioctl functionality - vmwgfx_ioctl.c
    902  */
    903 
    904 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
    905 			      struct drm_file *file_priv);
    906 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
    907 				struct drm_file *file_priv);
    908 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
    909 			     struct drm_file *file_priv);
    910 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
    911 				      struct drm_file *file_priv);
    912 #ifdef __NetBSD__
    913 #else
    914 extern __poll_t vmw_fops_poll(struct file *filp,
    915 				  struct poll_table_struct *wait);
    916 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
    917 			     size_t count, loff_t *offset);
    918 #endif
    919 
    920 /**
    921  * Fifo utilities - vmwgfx_fifo.c
    922  */
    923 
    924 extern int vmw_fifo_init(struct vmw_private *dev_priv,
    925 			 struct vmw_fifo_state *fifo);
    926 extern void vmw_fifo_release(struct vmw_private *dev_priv,
    927 			     struct vmw_fifo_state *fifo);
    928 extern void *
    929 vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
    930 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
    931 extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
    932 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
    933 			       uint32_t *seqno);
    934 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
    935 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
    936 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
    937 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
    938 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
    939 				     uint32_t cid);
    940 extern int vmw_fifo_flush(struct vmw_private *dev_priv,
    941 			  bool interruptible);
    942 
    943 #define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id)                        \
    944 ({                                                                            \
    945 	vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({                 \
    946 		DRM_ERROR("FIFO reserve failed at %s for %u bytes\n",         \
    947 			  __func__, (unsigned int) __bytes);                  \
    948 		NULL;                                                         \
    949 	});                                                                   \
    950 })
    951 
    952 #define VMW_FIFO_RESERVE(__priv, __bytes)                                     \
    953 	VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
    954 
    955 /**
    956  * TTM glue - vmwgfx_ttm_glue.c
    957  */
    958 
    959 #ifdef __NetBSD__
    960 struct uvm_object;
    961 extern int vmw_mmap_object(struct drm_device *, off_t, size_t, vm_prot_t,
    962     struct uvm_object **, voff_t, struct file *);
    963 #else
    964 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
    965 #endif
    966 
    967 extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
    968 					size_t gran);
    969 /**
    970  * TTM buffer object driver - vmwgfx_ttm_buffer.c
    971  */
    972 
    973 extern const size_t vmw_tt_size;
    974 extern struct ttm_placement vmw_vram_placement;
    975 extern struct ttm_placement vmw_vram_ne_placement;
    976 extern struct ttm_placement vmw_vram_sys_placement;
    977 extern struct ttm_placement vmw_vram_gmr_placement;
    978 extern struct ttm_placement vmw_vram_gmr_ne_placement;
    979 extern struct ttm_placement vmw_sys_placement;
    980 extern struct ttm_placement vmw_sys_ne_placement;
    981 extern struct ttm_placement vmw_evictable_placement;
    982 extern struct ttm_placement vmw_srf_placement;
    983 extern struct ttm_placement vmw_mob_placement;
    984 extern struct ttm_placement vmw_mob_ne_placement;
    985 extern struct ttm_placement vmw_nonfixed_placement;
    986 extern struct ttm_bo_driver vmw_bo_driver;
    987 extern int vmw_dma_quiescent(struct drm_device *dev);
    988 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
    989 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
    990 extern const struct vmw_sg_table *
    991 vmw_bo_sg_table(struct ttm_buffer_object *bo);
    992 extern void vmw_piter_start(struct vmw_piter *viter,
    993 			    const struct vmw_sg_table *vsgt,
    994 			    unsigned long p_offs);
    995 
    996 /**
    997  * vmw_piter_next - Advance the iterator one page.
    998  *
    999  * @viter: Pointer to the iterator to advance.
   1000  *
   1001  * Returns false if past the list of pages, true otherwise.
   1002  */
   1003 static inline bool vmw_piter_next(struct vmw_piter *viter)
   1004 {
   1005 	return viter->next(viter);
   1006 }
   1007 
   1008 /**
   1009  * vmw_piter_dma_addr - Return the DMA address of the current page.
   1010  *
   1011  * @viter: Pointer to the iterator
   1012  *
   1013  * Returns the DMA address of the page pointed to by @viter.
   1014  */
   1015 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
   1016 {
   1017 	return viter->dma_address(viter);
   1018 }
   1019 
   1020 /**
   1021  * vmw_piter_page - Return a pointer to the current page.
   1022  *
   1023  * @viter: Pointer to the iterator
   1024  *
   1025  * Returns the DMA address of the page pointed to by @viter.
   1026  */
   1027 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
   1028 {
   1029 	return viter->page(viter);
   1030 }
   1031 
   1032 /**
   1033  * Command submission - vmwgfx_execbuf.c
   1034  */
   1035 
   1036 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
   1037 			     struct drm_file *file_priv);
   1038 extern int vmw_execbuf_process(struct drm_file *file_priv,
   1039 			       struct vmw_private *dev_priv,
   1040 			       void __user *user_commands,
   1041 			       void *kernel_commands,
   1042 			       uint32_t command_size,
   1043 			       uint64_t throttle_us,
   1044 			       uint32_t dx_context_handle,
   1045 			       struct drm_vmw_fence_rep __user
   1046 			       *user_fence_rep,
   1047 			       struct vmw_fence_obj **out_fence,
   1048 			       uint32_t flags);
   1049 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
   1050 					    struct vmw_fence_obj *fence);
   1051 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
   1052 
   1053 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
   1054 				      struct vmw_private *dev_priv,
   1055 				      struct vmw_fence_obj **p_fence,
   1056 				      uint32_t *p_handle);
   1057 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
   1058 					struct vmw_fpriv *vmw_fp,
   1059 					int ret,
   1060 					struct drm_vmw_fence_rep __user
   1061 					*user_fence_rep,
   1062 					struct vmw_fence_obj *fence,
   1063 					uint32_t fence_handle,
   1064 					int32_t out_fence_fd,
   1065 					struct sync_file *sync_file);
   1066 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
   1067 
   1068 /**
   1069  * IRQs and wating - vmwgfx_irq.c
   1070  */
   1071 
   1072 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
   1073 			  uint32_t seqno, bool interruptible,
   1074 			  unsigned long timeout);
   1075 extern int vmw_irq_install(struct drm_device *dev, int irq);
   1076 extern void vmw_irq_uninstall(struct drm_device *dev);
   1077 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
   1078 				uint32_t seqno);
   1079 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
   1080 			     bool lazy,
   1081 			     bool fifo_idle,
   1082 			     uint32_t seqno,
   1083 			     bool interruptible,
   1084 			     unsigned long timeout);
   1085 extern void vmw_update_seqno(struct vmw_private *dev_priv,
   1086 				struct vmw_fifo_state *fifo_state);
   1087 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
   1088 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
   1089 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
   1090 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
   1091 extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
   1092 				   int *waiter_count);
   1093 extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
   1094 				      u32 flag, int *waiter_count);
   1095 
   1096 /**
   1097  * Rudimentary fence-like objects currently used only for throttling -
   1098  * vmwgfx_marker.c
   1099  */
   1100 
   1101 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
   1102 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
   1103 extern int vmw_marker_push(struct vmw_marker_queue *queue,
   1104 			   uint32_t seqno);
   1105 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
   1106 			   uint32_t signaled_seqno);
   1107 extern int vmw_wait_lag(struct vmw_private *dev_priv,
   1108 			struct vmw_marker_queue *queue, uint32_t us);
   1109 
   1110 /**
   1111  * Kernel framebuffer - vmwgfx_fb.c
   1112  */
   1113 
   1114 int vmw_fb_init(struct vmw_private *vmw_priv);
   1115 int vmw_fb_close(struct vmw_private *dev_priv);
   1116 int vmw_fb_off(struct vmw_private *vmw_priv);
   1117 int vmw_fb_on(struct vmw_private *vmw_priv);
   1118 
   1119 /**
   1120  * Kernel modesetting - vmwgfx_kms.c
   1121  */
   1122 
   1123 int vmw_kms_init(struct vmw_private *dev_priv);
   1124 int vmw_kms_close(struct vmw_private *dev_priv);
   1125 int vmw_kms_save_vga(struct vmw_private *vmw_priv);
   1126 int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
   1127 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
   1128 				struct drm_file *file_priv);
   1129 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
   1130 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
   1131 			  struct ttm_object_file *tfile,
   1132 			  struct ttm_buffer_object *bo,
   1133 			  SVGA3dCmdHeader *header);
   1134 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
   1135 		       unsigned width, unsigned height, unsigned pitch,
   1136 		       unsigned bpp, unsigned depth);
   1137 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
   1138 				uint32_t pitch,
   1139 				uint32_t height);
   1140 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
   1141 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
   1142 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
   1143 int vmw_kms_present(struct vmw_private *dev_priv,
   1144 		    struct drm_file *file_priv,
   1145 		    struct vmw_framebuffer *vfb,
   1146 		    struct vmw_surface *surface,
   1147 		    uint32_t sid, int32_t destX, int32_t destY,
   1148 		    struct drm_vmw_rect *clips,
   1149 		    uint32_t num_clips);
   1150 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
   1151 				struct drm_file *file_priv);
   1152 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
   1153 int vmw_kms_suspend(struct drm_device *dev);
   1154 int vmw_kms_resume(struct drm_device *dev);
   1155 void vmw_kms_lost_device(struct drm_device *dev);
   1156 
   1157 int vmw_dumb_create(struct drm_file *file_priv,
   1158 		    struct drm_device *dev,
   1159 		    struct drm_mode_create_dumb *args);
   1160 
   1161 int vmw_dumb_map_offset(struct drm_file *file_priv,
   1162 			struct drm_device *dev, uint32_t handle,
   1163 			uint64_t *offset);
   1164 int vmw_dumb_destroy(struct drm_file *file_priv,
   1165 		     struct drm_device *dev,
   1166 		     uint32_t handle);
   1167 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
   1168 extern void vmw_resource_unpin(struct vmw_resource *res);
   1169 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
   1170 
   1171 /**
   1172  * Overlay control - vmwgfx_overlay.c
   1173  */
   1174 
   1175 int vmw_overlay_init(struct vmw_private *dev_priv);
   1176 int vmw_overlay_close(struct vmw_private *dev_priv);
   1177 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
   1178 		      struct drm_file *file_priv);
   1179 int vmw_overlay_stop_all(struct vmw_private *dev_priv);
   1180 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
   1181 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
   1182 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
   1183 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
   1184 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
   1185 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
   1186 
   1187 /**
   1188  * GMR Id manager
   1189  */
   1190 
   1191 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
   1192 
   1193 /**
   1194  * Prime - vmwgfx_prime.c
   1195  */
   1196 
   1197 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
   1198 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
   1199 				  struct drm_file *file_priv,
   1200 				  int fd, u32 *handle);
   1201 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
   1202 				  struct drm_file *file_priv,
   1203 				  uint32_t handle, uint32_t flags,
   1204 				  int *prime_fd);
   1205 
   1206 /*
   1207  * MemoryOBject management -  vmwgfx_mob.c
   1208  */
   1209 struct vmw_mob;
   1210 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
   1211 			const struct vmw_sg_table *vsgt,
   1212 			unsigned long num_data_pages, int32_t mob_id);
   1213 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
   1214 			   struct vmw_mob *mob);
   1215 extern void vmw_mob_destroy(struct vmw_mob *mob);
   1216 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
   1217 extern int vmw_otables_setup(struct vmw_private *dev_priv);
   1218 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
   1219 
   1220 /*
   1221  * Context management - vmwgfx_context.c
   1222  */
   1223 
   1224 extern const struct vmw_user_resource_conv *user_context_converter;
   1225 
   1226 extern int vmw_context_check(struct vmw_private *dev_priv,
   1227 			     struct ttm_object_file *tfile,
   1228 			     int id,
   1229 			     struct vmw_resource **p_res);
   1230 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
   1231 				    struct drm_file *file_priv);
   1232 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
   1233 					     struct drm_file *file_priv);
   1234 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
   1235 				     struct drm_file *file_priv);
   1236 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
   1237 extern struct vmw_cmdbuf_res_manager *
   1238 vmw_context_res_man(struct vmw_resource *ctx);
   1239 extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
   1240 						SVGACOTableType cotable_type);
   1241 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
   1242 struct vmw_ctx_binding_state;
   1243 extern struct vmw_ctx_binding_state *
   1244 vmw_context_binding_state(struct vmw_resource *ctx);
   1245 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
   1246 					  bool readback);
   1247 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
   1248 				     struct vmw_buffer_object *mob);
   1249 extern struct vmw_buffer_object *
   1250 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
   1251 
   1252 
   1253 /*
   1254  * Surface management - vmwgfx_surface.c
   1255  */
   1256 
   1257 extern const struct vmw_user_resource_conv *user_surface_converter;
   1258 
   1259 extern void vmw_surface_res_free(struct vmw_resource *res);
   1260 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
   1261 				     struct drm_file *file_priv);
   1262 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
   1263 				    struct drm_file *file_priv);
   1264 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
   1265 				       struct drm_file *file_priv);
   1266 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
   1267 				       struct drm_file *file_priv);
   1268 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
   1269 					  struct drm_file *file_priv);
   1270 extern int vmw_surface_check(struct vmw_private *dev_priv,
   1271 			     struct ttm_object_file *tfile,
   1272 			     uint32_t handle, int *id);
   1273 extern int vmw_surface_validate(struct vmw_private *dev_priv,
   1274 				struct vmw_surface *srf);
   1275 int vmw_surface_gb_priv_define(struct drm_device *dev,
   1276 			       uint32_t user_accounting_size,
   1277 			       SVGA3dSurfaceAllFlags svga3d_flags,
   1278 			       SVGA3dSurfaceFormat format,
   1279 			       bool for_scanout,
   1280 			       uint32_t num_mip_levels,
   1281 			       uint32_t multisample_count,
   1282 			       uint32_t array_size,
   1283 			       struct drm_vmw_size size,
   1284 			       SVGA3dMSPattern multisample_pattern,
   1285 			       SVGA3dMSQualityLevel quality_level,
   1286 			       struct vmw_surface **srf_out);
   1287 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
   1288 					   void *data,
   1289 					   struct drm_file *file_priv);
   1290 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
   1291 					      void *data,
   1292 					      struct drm_file *file_priv);
   1293 
   1294 /*
   1295  * Shader management - vmwgfx_shader.c
   1296  */
   1297 
   1298 extern const struct vmw_user_resource_conv *user_shader_converter;
   1299 
   1300 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
   1301 				   struct drm_file *file_priv);
   1302 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
   1303 				    struct drm_file *file_priv);
   1304 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
   1305 				 struct vmw_cmdbuf_res_manager *man,
   1306 				 u32 user_key, const void *bytecode,
   1307 				 SVGA3dShaderType shader_type,
   1308 				 size_t size,
   1309 				 struct list_head *list);
   1310 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
   1311 			     u32 user_key, SVGA3dShaderType shader_type,
   1312 			     struct list_head *list);
   1313 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
   1314 			     struct vmw_resource *ctx,
   1315 			     u32 user_key,
   1316 			     SVGA3dShaderType shader_type,
   1317 			     struct list_head *list);
   1318 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
   1319 					     struct list_head *list,
   1320 					     bool readback);
   1321 
   1322 extern struct vmw_resource *
   1323 vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
   1324 		  u32 user_key, SVGA3dShaderType shader_type);
   1325 
   1326 /*
   1327  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
   1328  */
   1329 
   1330 extern struct vmw_cmdbuf_res_manager *
   1331 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
   1332 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
   1333 extern size_t vmw_cmdbuf_res_man_size(void);
   1334 extern struct vmw_resource *
   1335 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
   1336 		      enum vmw_cmdbuf_res_type res_type,
   1337 		      u32 user_key);
   1338 extern void vmw_cmdbuf_res_revert(struct list_head *list);
   1339 extern void vmw_cmdbuf_res_commit(struct list_head *list);
   1340 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
   1341 			      enum vmw_cmdbuf_res_type res_type,
   1342 			      u32 user_key,
   1343 			      struct vmw_resource *res,
   1344 			      struct list_head *list);
   1345 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
   1346 				 enum vmw_cmdbuf_res_type res_type,
   1347 				 u32 user_key,
   1348 				 struct list_head *list,
   1349 				 struct vmw_resource **res);
   1350 
   1351 /*
   1352  * COTable management - vmwgfx_cotable.c
   1353  */
   1354 extern const SVGACOTableType vmw_cotable_scrub_order[];
   1355 extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
   1356 					      struct vmw_resource *ctx,
   1357 					      u32 type);
   1358 extern int vmw_cotable_notify(struct vmw_resource *res, int id);
   1359 extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
   1360 extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
   1361 				     struct list_head *head);
   1362 
   1363 /*
   1364  * Command buffer managerment vmwgfx_cmdbuf.c
   1365  */
   1366 struct vmw_cmdbuf_man;
   1367 struct vmw_cmdbuf_header;
   1368 
   1369 extern struct vmw_cmdbuf_man *
   1370 vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
   1371 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
   1372 				    size_t size, size_t default_size);
   1373 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
   1374 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
   1375 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
   1376 			   unsigned long timeout);
   1377 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
   1378 				int ctx_id, bool interruptible,
   1379 				struct vmw_cmdbuf_header *header);
   1380 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
   1381 			      struct vmw_cmdbuf_header *header,
   1382 			      bool flush);
   1383 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
   1384 			      size_t size, bool interruptible,
   1385 			      struct vmw_cmdbuf_header **p_header);
   1386 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
   1387 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
   1388 				bool interruptible);
   1389 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
   1390 
   1391 /* CPU blit utilities - vmwgfx_blit.c */
   1392 
   1393 /**
   1394  * struct vmw_diff_cpy - CPU blit information structure
   1395  *
   1396  * @rect: The output bounding box rectangle.
   1397  * @line: The current line of the blit.
   1398  * @line_offset: Offset of the current line segment.
   1399  * @cpp: Bytes per pixel (granularity information).
   1400  * @memcpy: Which memcpy function to use.
   1401  */
   1402 struct vmw_diff_cpy {
   1403 	struct drm_rect rect;
   1404 	size_t line;
   1405 	size_t line_offset;
   1406 	int cpp;
   1407 	void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
   1408 		       size_t n);
   1409 };
   1410 
   1411 #define VMW_CPU_BLIT_INITIALIZER {	\
   1412 	.do_cpy = vmw_memcpy,		\
   1413 }
   1414 
   1415 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) {	  \
   1416 	.line = 0,				  \
   1417 	.line_offset = 0,			  \
   1418 	.rect = { .x1 = INT_MAX/2,		  \
   1419 		  .y1 = INT_MAX/2,		  \
   1420 		  .x2 = INT_MIN/2,		  \
   1421 		  .y2 = INT_MIN/2		  \
   1422 	},					  \
   1423 	.cpp = _cpp,				  \
   1424 	.do_cpy = vmw_diff_memcpy,		  \
   1425 }
   1426 
   1427 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
   1428 		     size_t n);
   1429 
   1430 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
   1431 
   1432 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
   1433 		    u32 dst_offset, u32 dst_stride,
   1434 		    struct ttm_buffer_object *src,
   1435 		    u32 src_offset, u32 src_stride,
   1436 		    u32 w, u32 h,
   1437 		    struct vmw_diff_cpy *diff);
   1438 
   1439 /* Host messaging -vmwgfx_msg.c: */
   1440 int vmw_host_get_guestinfo(const char *guest_info_param,
   1441 			   char *buffer, size_t *length);
   1442 int vmw_host_log(const char *log);
   1443 int vmw_msg_ioctl(struct drm_device *dev, void *data,
   1444 		  struct drm_file *file_priv);
   1445 
   1446 /* VMW logging */
   1447 
   1448 /**
   1449  * VMW_DEBUG_USER - Debug output for user-space debugging.
   1450  *
   1451  * @fmt: printf() like format string.
   1452  *
   1453  * This macro is for logging user-space error and debugging messages for e.g.
   1454  * command buffer execution errors due to malformed commands, invalid context,
   1455  * etc.
   1456  */
   1457 #define VMW_DEBUG_USER(fmt, ...)                                              \
   1458 	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
   1459 
   1460 /* Resource dirtying - vmwgfx_page_dirty.c */
   1461 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
   1462 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
   1463 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
   1464 void vmw_bo_dirty_clear_res(struct vmw_resource *res);
   1465 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
   1466 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
   1467 			pgoff_t start, pgoff_t end);
   1468 #ifdef __NetBSD__
   1469 struct uvm_fault_info;
   1470 struct vm_page;
   1471 int vmw_bo_vm_fault(struct uvm_fault_info *, vaddr_t, struct vm_page **,
   1472     int, int, vm_prot_t, int);
   1473 int vmw_bo_vm_mkwrite(struct uvm_fault_info *, vaddr_t, struct vm_page **,
   1474     int, int, vm_prot_t, int);
   1475 #else
   1476 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
   1477 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
   1478 #endif
   1479 
   1480 /**
   1481  * VMW_DEBUG_KMS - Debug output for kernel mode-setting
   1482  *
   1483  * This macro is for debugging vmwgfx mode-setting code.
   1484  */
   1485 #define VMW_DEBUG_KMS(fmt, ...)                                               \
   1486 	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
   1487 
   1488 /**
   1489  * Inline helper functions
   1490  */
   1491 
   1492 static inline void vmw_surface_unreference(struct vmw_surface **srf)
   1493 {
   1494 	struct vmw_surface *tmp_srf = *srf;
   1495 	struct vmw_resource *res = &tmp_srf->res;
   1496 	*srf = NULL;
   1497 
   1498 	vmw_resource_unreference(&res);
   1499 }
   1500 
   1501 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
   1502 {
   1503 	(void) vmw_resource_reference(&srf->res);
   1504 	return srf;
   1505 }
   1506 
   1507 static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
   1508 {
   1509 	struct vmw_buffer_object *tmp_buf = *buf;
   1510 
   1511 	*buf = NULL;
   1512 	if (tmp_buf != NULL) {
   1513 		ttm_bo_put(&tmp_buf->base);
   1514 	}
   1515 }
   1516 
   1517 static inline struct vmw_buffer_object *
   1518 vmw_bo_reference(struct vmw_buffer_object *buf)
   1519 {
   1520 	ttm_bo_get(&buf->base);
   1521 	return buf;
   1522 }
   1523 
   1524 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
   1525 {
   1526 	return &ttm_mem_glob;
   1527 }
   1528 
   1529 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
   1530 {
   1531 	atomic_inc(&dev_priv->num_fifo_resources);
   1532 }
   1533 
   1534 static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
   1535 {
   1536 	atomic_dec(&dev_priv->num_fifo_resources);
   1537 }
   1538 
   1539 /**
   1540  * vmw_mmio_read - Perform a MMIO read from volatile memory
   1541  *
   1542  * @addr: The address to read from
   1543  *
   1544  * This function is intended to be equivalent to ioread32() on
   1545  * memremap'd memory, but without byteswapping.
   1546  */
   1547 static inline u32 vmw_mmio_read(u32 *addr)
   1548 {
   1549 	return READ_ONCE(*addr);
   1550 }
   1551 
   1552 /**
   1553  * vmw_mmio_write - Perform a MMIO write to volatile memory
   1554  *
   1555  * @addr: The address to write to
   1556  *
   1557  * This function is intended to be equivalent to iowrite32 on
   1558  * memremap'd memory, but without byteswapping.
   1559  */
   1560 static inline void vmw_mmio_write(u32 value, u32 *addr)
   1561 {
   1562 	WRITE_ONCE(*addr, value);
   1563 }
   1564 #endif
   1565