Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_drv.c revision 1.1.1.1
      1 /**************************************************************************
      2  *
      3  * Copyright  2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 #include <linux/module.h>
     28 
     29 #include <drm/drmP.h>
     30 #include "vmwgfx_drv.h"
     31 #include <drm/ttm/ttm_placement.h>
     32 #include <drm/ttm/ttm_bo_driver.h>
     33 #include <drm/ttm/ttm_object.h>
     34 #include <drm/ttm/ttm_module.h>
     35 
     36 #define VMWGFX_DRIVER_NAME "vmwgfx"
     37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
     38 #define VMWGFX_CHIP_SVGAII 0
     39 #define VMW_FB_RESERVATION 0
     40 
     41 #define VMW_MIN_INITIAL_WIDTH 800
     42 #define VMW_MIN_INITIAL_HEIGHT 600
     43 
     44 
     45 /**
     46  * Fully encoded drm commands. Might move to vmw_drm.h
     47  */
     48 
     49 #define DRM_IOCTL_VMW_GET_PARAM					\
     50 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
     51 		 struct drm_vmw_getparam_arg)
     52 #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
     53 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
     54 		union drm_vmw_alloc_dmabuf_arg)
     55 #define DRM_IOCTL_VMW_UNREF_DMABUF				\
     56 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
     57 		struct drm_vmw_unref_dmabuf_arg)
     58 #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
     59 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
     60 		 struct drm_vmw_cursor_bypass_arg)
     61 
     62 #define DRM_IOCTL_VMW_CONTROL_STREAM				\
     63 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
     64 		 struct drm_vmw_control_stream_arg)
     65 #define DRM_IOCTL_VMW_CLAIM_STREAM				\
     66 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
     67 		 struct drm_vmw_stream_arg)
     68 #define DRM_IOCTL_VMW_UNREF_STREAM				\
     69 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
     70 		 struct drm_vmw_stream_arg)
     71 
     72 #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
     73 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
     74 		struct drm_vmw_context_arg)
     75 #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
     76 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
     77 		struct drm_vmw_context_arg)
     78 #define DRM_IOCTL_VMW_CREATE_SURFACE				\
     79 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
     80 		 union drm_vmw_surface_create_arg)
     81 #define DRM_IOCTL_VMW_UNREF_SURFACE				\
     82 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
     83 		 struct drm_vmw_surface_arg)
     84 #define DRM_IOCTL_VMW_REF_SURFACE				\
     85 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
     86 		 union drm_vmw_surface_reference_arg)
     87 #define DRM_IOCTL_VMW_EXECBUF					\
     88 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
     89 		struct drm_vmw_execbuf_arg)
     90 #define DRM_IOCTL_VMW_GET_3D_CAP				\
     91 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
     92 		 struct drm_vmw_get_3d_cap_arg)
     93 #define DRM_IOCTL_VMW_FENCE_WAIT				\
     94 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
     95 		 struct drm_vmw_fence_wait_arg)
     96 #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
     97 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
     98 		 struct drm_vmw_fence_signaled_arg)
     99 #define DRM_IOCTL_VMW_FENCE_UNREF				\
    100 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
    101 		 struct drm_vmw_fence_arg)
    102 #define DRM_IOCTL_VMW_FENCE_EVENT				\
    103 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
    104 		 struct drm_vmw_fence_event_arg)
    105 #define DRM_IOCTL_VMW_PRESENT					\
    106 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
    107 		 struct drm_vmw_present_arg)
    108 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
    109 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
    110 		 struct drm_vmw_present_readback_arg)
    111 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
    112 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
    113 		 struct drm_vmw_update_layout_arg)
    114 
    115 /**
    116  * The core DRM version of this macro doesn't account for
    117  * DRM_COMMAND_BASE.
    118  */
    119 
    120 #define VMW_IOCTL_DEF(ioctl, func, flags) \
    121   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
    122 
    123 /**
    124  * Ioctl definitions.
    125  */
    126 
    127 static struct drm_ioctl_desc vmw_ioctls[] = {
    128 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
    129 		      DRM_AUTH | DRM_UNLOCKED),
    130 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
    131 		      DRM_AUTH | DRM_UNLOCKED),
    132 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
    133 		      DRM_AUTH | DRM_UNLOCKED),
    134 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
    135 		      vmw_kms_cursor_bypass_ioctl,
    136 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
    137 
    138 	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
    139 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
    140 	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
    141 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
    142 	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
    143 		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
    144 
    145 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
    146 		      DRM_AUTH | DRM_UNLOCKED),
    147 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
    148 		      DRM_AUTH | DRM_UNLOCKED),
    149 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
    150 		      DRM_AUTH | DRM_UNLOCKED),
    151 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
    152 		      DRM_AUTH | DRM_UNLOCKED),
    153 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
    154 		      DRM_AUTH | DRM_UNLOCKED),
    155 	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
    156 		      DRM_AUTH | DRM_UNLOCKED),
    157 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
    158 		      DRM_AUTH | DRM_UNLOCKED),
    159 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
    160 		      vmw_fence_obj_signaled_ioctl,
    161 		      DRM_AUTH | DRM_UNLOCKED),
    162 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
    163 		      DRM_AUTH | DRM_UNLOCKED),
    164 	VMW_IOCTL_DEF(VMW_FENCE_EVENT,
    165 		      vmw_fence_event_ioctl,
    166 		      DRM_AUTH | DRM_UNLOCKED),
    167 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
    168 		      DRM_AUTH | DRM_UNLOCKED),
    169 
    170 	/* these allow direct access to the framebuffers mark as master only */
    171 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
    172 		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
    173 	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
    174 		      vmw_present_readback_ioctl,
    175 		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
    176 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
    177 		      vmw_kms_update_layout_ioctl,
    178 		      DRM_MASTER | DRM_UNLOCKED),
    179 };
    180 
    181 static struct pci_device_id vmw_pci_id_list[] = {
    182 	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
    183 	{0, 0, 0}
    184 };
    185 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
    186 
    187 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
    188 
    189 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
    190 static void vmw_master_init(struct vmw_master *);
    191 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
    192 			      void *ptr);
    193 
    194 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
    195 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
    196 
    197 static void vmw_print_capabilities(uint32_t capabilities)
    198 {
    199 	DRM_INFO("Capabilities:\n");
    200 	if (capabilities & SVGA_CAP_RECT_COPY)
    201 		DRM_INFO("  Rect copy.\n");
    202 	if (capabilities & SVGA_CAP_CURSOR)
    203 		DRM_INFO("  Cursor.\n");
    204 	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
    205 		DRM_INFO("  Cursor bypass.\n");
    206 	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
    207 		DRM_INFO("  Cursor bypass 2.\n");
    208 	if (capabilities & SVGA_CAP_8BIT_EMULATION)
    209 		DRM_INFO("  8bit emulation.\n");
    210 	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
    211 		DRM_INFO("  Alpha cursor.\n");
    212 	if (capabilities & SVGA_CAP_3D)
    213 		DRM_INFO("  3D.\n");
    214 	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
    215 		DRM_INFO("  Extended Fifo.\n");
    216 	if (capabilities & SVGA_CAP_MULTIMON)
    217 		DRM_INFO("  Multimon.\n");
    218 	if (capabilities & SVGA_CAP_PITCHLOCK)
    219 		DRM_INFO("  Pitchlock.\n");
    220 	if (capabilities & SVGA_CAP_IRQMASK)
    221 		DRM_INFO("  Irq mask.\n");
    222 	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
    223 		DRM_INFO("  Display Topology.\n");
    224 	if (capabilities & SVGA_CAP_GMR)
    225 		DRM_INFO("  GMR.\n");
    226 	if (capabilities & SVGA_CAP_TRACES)
    227 		DRM_INFO("  Traces.\n");
    228 	if (capabilities & SVGA_CAP_GMR2)
    229 		DRM_INFO("  GMR2.\n");
    230 	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
    231 		DRM_INFO("  Screen Object 2.\n");
    232 }
    233 
    234 
    235 /**
    236  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
    237  * the start of a buffer object.
    238  *
    239  * @dev_priv: The device private structure.
    240  *
    241  * This function will idle the buffer using an uninterruptible wait, then
    242  * map the first page and initialize a pending occlusion query result structure,
    243  * Finally it will unmap the buffer.
    244  *
    245  * TODO: Since we're only mapping a single page, we should optimize the map
    246  * to use kmap_atomic / iomap_atomic.
    247  */
    248 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
    249 {
    250 	struct ttm_bo_kmap_obj map;
    251 	volatile SVGA3dQueryResult *result;
    252 	bool dummy;
    253 	int ret;
    254 	struct ttm_bo_device *bdev = &dev_priv->bdev;
    255 	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
    256 
    257 	ttm_bo_reserve(bo, false, false, false, 0);
    258 	spin_lock(&bdev->fence_lock);
    259 	ret = ttm_bo_wait(bo, false, false, false);
    260 	spin_unlock(&bdev->fence_lock);
    261 	if (unlikely(ret != 0))
    262 		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
    263 					 10*HZ);
    264 
    265 	ret = ttm_bo_kmap(bo, 0, 1, &map);
    266 	if (likely(ret == 0)) {
    267 		result = ttm_kmap_obj_virtual(&map, &dummy);
    268 		result->totalSize = sizeof(*result);
    269 		result->state = SVGA3D_QUERYSTATE_PENDING;
    270 		result->result32 = 0xff;
    271 		ttm_bo_kunmap(&map);
    272 	} else
    273 		DRM_ERROR("Dummy query buffer map failed.\n");
    274 	ttm_bo_unreserve(bo);
    275 }
    276 
    277 
    278 /**
    279  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
    280  *
    281  * @dev_priv: A device private structure.
    282  *
    283  * This function creates a small buffer object that holds the query
    284  * result for dummy queries emitted as query barriers.
    285  * No interruptible waits are done within this function.
    286  *
    287  * Returns an error if bo creation fails.
    288  */
    289 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
    290 {
    291 	return ttm_bo_create(&dev_priv->bdev,
    292 			     PAGE_SIZE,
    293 			     ttm_bo_type_device,
    294 			     &vmw_vram_sys_placement,
    295 			     0, false, NULL,
    296 			     &dev_priv->dummy_query_bo);
    297 }
    298 
    299 
    300 static int vmw_request_device(struct vmw_private *dev_priv)
    301 {
    302 	int ret;
    303 
    304 	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
    305 	if (unlikely(ret != 0)) {
    306 		DRM_ERROR("Unable to initialize FIFO.\n");
    307 		return ret;
    308 	}
    309 	vmw_fence_fifo_up(dev_priv->fman);
    310 	ret = vmw_dummy_query_bo_create(dev_priv);
    311 	if (unlikely(ret != 0))
    312 		goto out_no_query_bo;
    313 	vmw_dummy_query_bo_prepare(dev_priv);
    314 
    315 	return 0;
    316 
    317 out_no_query_bo:
    318 	vmw_fence_fifo_down(dev_priv->fman);
    319 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
    320 	return ret;
    321 }
    322 
    323 static void vmw_release_device(struct vmw_private *dev_priv)
    324 {
    325 	/*
    326 	 * Previous destructions should've released
    327 	 * the pinned bo.
    328 	 */
    329 
    330 	BUG_ON(dev_priv->pinned_bo != NULL);
    331 
    332 	ttm_bo_unref(&dev_priv->dummy_query_bo);
    333 	vmw_fence_fifo_down(dev_priv->fman);
    334 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
    335 }
    336 
    337 /**
    338  * Increase the 3d resource refcount.
    339  * If the count was prevously zero, initialize the fifo, switching to svga
    340  * mode. Note that the master holds a ref as well, and may request an
    341  * explicit switch to svga mode if fb is not running, using @unhide_svga.
    342  */
    343 int vmw_3d_resource_inc(struct vmw_private *dev_priv,
    344 			bool unhide_svga)
    345 {
    346 	int ret = 0;
    347 
    348 	mutex_lock(&dev_priv->release_mutex);
    349 	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
    350 		ret = vmw_request_device(dev_priv);
    351 		if (unlikely(ret != 0))
    352 			--dev_priv->num_3d_resources;
    353 	} else if (unhide_svga) {
    354 		mutex_lock(&dev_priv->hw_mutex);
    355 		vmw_write(dev_priv, SVGA_REG_ENABLE,
    356 			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
    357 			  ~SVGA_REG_ENABLE_HIDE);
    358 		mutex_unlock(&dev_priv->hw_mutex);
    359 	}
    360 
    361 	mutex_unlock(&dev_priv->release_mutex);
    362 	return ret;
    363 }
    364 
    365 /**
    366  * Decrease the 3d resource refcount.
    367  * If the count reaches zero, disable the fifo, switching to vga mode.
    368  * Note that the master holds a refcount as well, and may request an
    369  * explicit switch to vga mode when it releases its refcount to account
    370  * for the situation of an X server vt switch to VGA with 3d resources
    371  * active.
    372  */
    373 void vmw_3d_resource_dec(struct vmw_private *dev_priv,
    374 			 bool hide_svga)
    375 {
    376 	int32_t n3d;
    377 
    378 	mutex_lock(&dev_priv->release_mutex);
    379 	if (unlikely(--dev_priv->num_3d_resources == 0))
    380 		vmw_release_device(dev_priv);
    381 	else if (hide_svga) {
    382 		mutex_lock(&dev_priv->hw_mutex);
    383 		vmw_write(dev_priv, SVGA_REG_ENABLE,
    384 			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
    385 			  SVGA_REG_ENABLE_HIDE);
    386 		mutex_unlock(&dev_priv->hw_mutex);
    387 	}
    388 
    389 	n3d = (int32_t) dev_priv->num_3d_resources;
    390 	mutex_unlock(&dev_priv->release_mutex);
    391 
    392 	BUG_ON(n3d < 0);
    393 }
    394 
    395 /**
    396  * Sets the initial_[width|height] fields on the given vmw_private.
    397  *
    398  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
    399  * clamping the value to fb_max_[width|height] fields and the
    400  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
    401  * If the values appear to be invalid, set them to
    402  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
    403  */
    404 static void vmw_get_initial_size(struct vmw_private *dev_priv)
    405 {
    406 	uint32_t width;
    407 	uint32_t height;
    408 
    409 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
    410 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
    411 
    412 	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
    413 	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
    414 
    415 	if (width > dev_priv->fb_max_width ||
    416 	    height > dev_priv->fb_max_height) {
    417 
    418 		/*
    419 		 * This is a host error and shouldn't occur.
    420 		 */
    421 
    422 		width = VMW_MIN_INITIAL_WIDTH;
    423 		height = VMW_MIN_INITIAL_HEIGHT;
    424 	}
    425 
    426 	dev_priv->initial_width = width;
    427 	dev_priv->initial_height = height;
    428 }
    429 
    430 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
    431 {
    432 	struct vmw_private *dev_priv;
    433 	int ret;
    434 	uint32_t svga_id;
    435 	enum vmw_res_type i;
    436 
    437 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
    438 	if (unlikely(dev_priv == NULL)) {
    439 		DRM_ERROR("Failed allocating a device private struct.\n");
    440 		return -ENOMEM;
    441 	}
    442 
    443 	pci_set_master(dev->pdev);
    444 
    445 	dev_priv->dev = dev;
    446 	dev_priv->vmw_chipset = chipset;
    447 	dev_priv->last_read_seqno = (uint32_t) -100;
    448 	mutex_init(&dev_priv->hw_mutex);
    449 	mutex_init(&dev_priv->cmdbuf_mutex);
    450 	mutex_init(&dev_priv->release_mutex);
    451 	rwlock_init(&dev_priv->resource_lock);
    452 
    453 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
    454 		idr_init(&dev_priv->res_idr[i]);
    455 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
    456 	}
    457 
    458 	mutex_init(&dev_priv->init_mutex);
    459 	init_waitqueue_head(&dev_priv->fence_queue);
    460 	init_waitqueue_head(&dev_priv->fifo_queue);
    461 	dev_priv->fence_queue_waiters = 0;
    462 	atomic_set(&dev_priv->fifo_queue_waiters, 0);
    463 
    464 	dev_priv->used_memory_size = 0;
    465 
    466 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
    467 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
    468 	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
    469 
    470 	dev_priv->enable_fb = enable_fbdev;
    471 
    472 	mutex_lock(&dev_priv->hw_mutex);
    473 
    474 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
    475 	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
    476 	if (svga_id != SVGA_ID_2) {
    477 		ret = -ENOSYS;
    478 		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
    479 		mutex_unlock(&dev_priv->hw_mutex);
    480 		goto out_err0;
    481 	}
    482 
    483 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
    484 
    485 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
    486 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
    487 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
    488 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
    489 
    490 	vmw_get_initial_size(dev_priv);
    491 
    492 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
    493 		dev_priv->max_gmr_descriptors =
    494 			vmw_read(dev_priv,
    495 				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
    496 		dev_priv->max_gmr_ids =
    497 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
    498 	}
    499 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
    500 		dev_priv->max_gmr_pages =
    501 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
    502 		dev_priv->memory_size =
    503 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
    504 		dev_priv->memory_size -= dev_priv->vram_size;
    505 	} else {
    506 		/*
    507 		 * An arbitrary limit of 512MiB on surface
    508 		 * memory. But all HWV8 hardware supports GMR2.
    509 		 */
    510 		dev_priv->memory_size = 512*1024*1024;
    511 	}
    512 
    513 	mutex_unlock(&dev_priv->hw_mutex);
    514 
    515 	vmw_print_capabilities(dev_priv->capabilities);
    516 
    517 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
    518 		DRM_INFO("Max GMR ids is %u\n",
    519 			 (unsigned)dev_priv->max_gmr_ids);
    520 		DRM_INFO("Max GMR descriptors is %u\n",
    521 			 (unsigned)dev_priv->max_gmr_descriptors);
    522 	}
    523 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
    524 		DRM_INFO("Max number of GMR pages is %u\n",
    525 			 (unsigned)dev_priv->max_gmr_pages);
    526 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
    527 			 (unsigned)dev_priv->memory_size / 1024);
    528 	}
    529 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
    530 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
    531 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
    532 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
    533 
    534 	ret = vmw_ttm_global_init(dev_priv);
    535 	if (unlikely(ret != 0))
    536 		goto out_err0;
    537 
    538 
    539 	vmw_master_init(&dev_priv->fbdev_master);
    540 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
    541 	dev_priv->active_master = &dev_priv->fbdev_master;
    542 
    543 
    544 	ret = ttm_bo_device_init(&dev_priv->bdev,
    545 				 dev_priv->bo_global_ref.ref.object,
    546 				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
    547 				 false);
    548 	if (unlikely(ret != 0)) {
    549 		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
    550 		goto out_err1;
    551 	}
    552 
    553 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
    554 			     (dev_priv->vram_size >> PAGE_SHIFT));
    555 	if (unlikely(ret != 0)) {
    556 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
    557 		goto out_err2;
    558 	}
    559 
    560 	dev_priv->has_gmr = true;
    561 	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
    562 			   dev_priv->max_gmr_ids) != 0) {
    563 		DRM_INFO("No GMR memory available. "
    564 			 "Graphics memory resources are very limited.\n");
    565 		dev_priv->has_gmr = false;
    566 	}
    567 
    568 	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
    569 					   dev_priv->mmio_size, DRM_MTRR_WC);
    570 
    571 	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
    572 					 dev_priv->mmio_size);
    573 
    574 	if (unlikely(dev_priv->mmio_virt == NULL)) {
    575 		ret = -ENOMEM;
    576 		DRM_ERROR("Failed mapping MMIO.\n");
    577 		goto out_err3;
    578 	}
    579 
    580 	/* Need mmio memory to check for fifo pitchlock cap. */
    581 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
    582 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
    583 	    !vmw_fifo_have_pitchlock(dev_priv)) {
    584 		ret = -ENOSYS;
    585 		DRM_ERROR("Hardware has no pitchlock\n");
    586 		goto out_err4;
    587 	}
    588 
    589 	dev_priv->tdev = ttm_object_device_init
    590 	    (dev_priv->mem_global_ref.object, 12);
    591 
    592 	if (unlikely(dev_priv->tdev == NULL)) {
    593 		DRM_ERROR("Unable to initialize TTM object management.\n");
    594 		ret = -ENOMEM;
    595 		goto out_err4;
    596 	}
    597 
    598 	dev->dev_private = dev_priv;
    599 
    600 	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
    601 	dev_priv->stealth = (ret != 0);
    602 	if (dev_priv->stealth) {
    603 		/**
    604 		 * Request at least the mmio PCI resource.
    605 		 */
    606 
    607 		DRM_INFO("It appears like vesafb is loaded. "
    608 			 "Ignore above error if any.\n");
    609 		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
    610 		if (unlikely(ret != 0)) {
    611 			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
    612 			goto out_no_device;
    613 		}
    614 	}
    615 
    616 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
    617 		ret = drm_irq_install(dev);
    618 		if (ret != 0) {
    619 			DRM_ERROR("Failed installing irq: %d\n", ret);
    620 			goto out_no_irq;
    621 		}
    622 	}
    623 
    624 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
    625 	if (unlikely(dev_priv->fman == NULL))
    626 		goto out_no_fman;
    627 
    628 	vmw_kms_save_vga(dev_priv);
    629 
    630 	/* Start kms and overlay systems, needs fifo. */
    631 	ret = vmw_kms_init(dev_priv);
    632 	if (unlikely(ret != 0))
    633 		goto out_no_kms;
    634 	vmw_overlay_init(dev_priv);
    635 
    636 	if (dev_priv->enable_fb) {
    637 		ret = vmw_3d_resource_inc(dev_priv, true);
    638 		if (unlikely(ret != 0))
    639 			goto out_no_fifo;
    640 		vmw_fb_init(dev_priv);
    641 	}
    642 
    643 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
    644 	register_pm_notifier(&dev_priv->pm_nb);
    645 
    646 	return 0;
    647 
    648 out_no_fifo:
    649 	vmw_overlay_close(dev_priv);
    650 	vmw_kms_close(dev_priv);
    651 out_no_kms:
    652 	vmw_kms_restore_vga(dev_priv);
    653 	vmw_fence_manager_takedown(dev_priv->fman);
    654 out_no_fman:
    655 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
    656 		drm_irq_uninstall(dev_priv->dev);
    657 out_no_irq:
    658 	if (dev_priv->stealth)
    659 		pci_release_region(dev->pdev, 2);
    660 	else
    661 		pci_release_regions(dev->pdev);
    662 out_no_device:
    663 	ttm_object_device_release(&dev_priv->tdev);
    664 out_err4:
    665 	iounmap(dev_priv->mmio_virt);
    666 out_err3:
    667 	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
    668 		     dev_priv->mmio_size, DRM_MTRR_WC);
    669 	if (dev_priv->has_gmr)
    670 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    671 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
    672 out_err2:
    673 	(void)ttm_bo_device_release(&dev_priv->bdev);
    674 out_err1:
    675 	vmw_ttm_global_release(dev_priv);
    676 out_err0:
    677 	for (i = vmw_res_context; i < vmw_res_max; ++i)
    678 		idr_destroy(&dev_priv->res_idr[i]);
    679 
    680 	kfree(dev_priv);
    681 	return ret;
    682 }
    683 
    684 static int vmw_driver_unload(struct drm_device *dev)
    685 {
    686 	struct vmw_private *dev_priv = vmw_priv(dev);
    687 	enum vmw_res_type i;
    688 
    689 	unregister_pm_notifier(&dev_priv->pm_nb);
    690 
    691 	if (dev_priv->ctx.res_ht_initialized)
    692 		drm_ht_remove(&dev_priv->ctx.res_ht);
    693 	if (dev_priv->ctx.cmd_bounce)
    694 		vfree(dev_priv->ctx.cmd_bounce);
    695 	if (dev_priv->enable_fb) {
    696 		vmw_fb_close(dev_priv);
    697 		vmw_kms_restore_vga(dev_priv);
    698 		vmw_3d_resource_dec(dev_priv, false);
    699 	}
    700 	vmw_kms_close(dev_priv);
    701 	vmw_overlay_close(dev_priv);
    702 	vmw_fence_manager_takedown(dev_priv->fman);
    703 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
    704 		drm_irq_uninstall(dev_priv->dev);
    705 	if (dev_priv->stealth)
    706 		pci_release_region(dev->pdev, 2);
    707 	else
    708 		pci_release_regions(dev->pdev);
    709 
    710 	ttm_object_device_release(&dev_priv->tdev);
    711 	iounmap(dev_priv->mmio_virt);
    712 	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
    713 		     dev_priv->mmio_size, DRM_MTRR_WC);
    714 	if (dev_priv->has_gmr)
    715 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    716 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
    717 	(void)ttm_bo_device_release(&dev_priv->bdev);
    718 	vmw_ttm_global_release(dev_priv);
    719 
    720 	for (i = vmw_res_context; i < vmw_res_max; ++i)
    721 		idr_destroy(&dev_priv->res_idr[i]);
    722 
    723 	kfree(dev_priv);
    724 
    725 	return 0;
    726 }
    727 
    728 static void vmw_preclose(struct drm_device *dev,
    729 			 struct drm_file *file_priv)
    730 {
    731 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
    732 	struct vmw_private *dev_priv = vmw_priv(dev);
    733 
    734 	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
    735 }
    736 
    737 static void vmw_postclose(struct drm_device *dev,
    738 			 struct drm_file *file_priv)
    739 {
    740 	struct vmw_fpriv *vmw_fp;
    741 
    742 	vmw_fp = vmw_fpriv(file_priv);
    743 	ttm_object_file_release(&vmw_fp->tfile);
    744 	if (vmw_fp->locked_master)
    745 		drm_master_put(&vmw_fp->locked_master);
    746 	kfree(vmw_fp);
    747 }
    748 
    749 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
    750 {
    751 	struct vmw_private *dev_priv = vmw_priv(dev);
    752 	struct vmw_fpriv *vmw_fp;
    753 	int ret = -ENOMEM;
    754 
    755 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
    756 	if (unlikely(vmw_fp == NULL))
    757 		return ret;
    758 
    759 	INIT_LIST_HEAD(&vmw_fp->fence_events);
    760 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
    761 	if (unlikely(vmw_fp->tfile == NULL))
    762 		goto out_no_tfile;
    763 
    764 	file_priv->driver_priv = vmw_fp;
    765 	dev_priv->bdev.dev_mapping = dev->dev_mapping;
    766 
    767 	return 0;
    768 
    769 out_no_tfile:
    770 	kfree(vmw_fp);
    771 	return ret;
    772 }
    773 
    774 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
    775 			       unsigned long arg)
    776 {
    777 	struct drm_file *file_priv = filp->private_data;
    778 	struct drm_device *dev = file_priv->minor->dev;
    779 	unsigned int nr = DRM_IOCTL_NR(cmd);
    780 
    781 	/*
    782 	 * Do extra checking on driver private ioctls.
    783 	 */
    784 
    785 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
    786 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
    787 		struct drm_ioctl_desc *ioctl =
    788 		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
    789 
    790 		if (unlikely(ioctl->cmd_drv != cmd)) {
    791 			DRM_ERROR("Invalid command format, ioctl %d\n",
    792 				  nr - DRM_COMMAND_BASE);
    793 			return -EINVAL;
    794 		}
    795 	}
    796 
    797 	return drm_ioctl(filp, cmd, arg);
    798 }
    799 
    800 static int vmw_firstopen(struct drm_device *dev)
    801 {
    802 	struct vmw_private *dev_priv = vmw_priv(dev);
    803 	dev_priv->is_opened = true;
    804 
    805 	return 0;
    806 }
    807 
    808 static void vmw_lastclose(struct drm_device *dev)
    809 {
    810 	struct vmw_private *dev_priv = vmw_priv(dev);
    811 	struct drm_crtc *crtc;
    812 	struct drm_mode_set set;
    813 	int ret;
    814 
    815 	/**
    816 	 * Do nothing on the lastclose call from drm_unload.
    817 	 */
    818 
    819 	if (!dev_priv->is_opened)
    820 		return;
    821 
    822 	dev_priv->is_opened = false;
    823 	set.x = 0;
    824 	set.y = 0;
    825 	set.fb = NULL;
    826 	set.mode = NULL;
    827 	set.connectors = NULL;
    828 	set.num_connectors = 0;
    829 
    830 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
    831 		set.crtc = crtc;
    832 		ret = crtc->funcs->set_config(&set);
    833 		WARN_ON(ret != 0);
    834 	}
    835 
    836 }
    837 
    838 static void vmw_master_init(struct vmw_master *vmaster)
    839 {
    840 	ttm_lock_init(&vmaster->lock);
    841 	INIT_LIST_HEAD(&vmaster->fb_surf);
    842 	mutex_init(&vmaster->fb_surf_mutex);
    843 }
    844 
    845 static int vmw_master_create(struct drm_device *dev,
    846 			     struct drm_master *master)
    847 {
    848 	struct vmw_master *vmaster;
    849 
    850 	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
    851 	if (unlikely(vmaster == NULL))
    852 		return -ENOMEM;
    853 
    854 	vmw_master_init(vmaster);
    855 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
    856 	master->driver_priv = vmaster;
    857 
    858 	return 0;
    859 }
    860 
    861 static void vmw_master_destroy(struct drm_device *dev,
    862 			       struct drm_master *master)
    863 {
    864 	struct vmw_master *vmaster = vmw_master(master);
    865 
    866 	master->driver_priv = NULL;
    867 	kfree(vmaster);
    868 }
    869 
    870 
    871 static int vmw_master_set(struct drm_device *dev,
    872 			  struct drm_file *file_priv,
    873 			  bool from_open)
    874 {
    875 	struct vmw_private *dev_priv = vmw_priv(dev);
    876 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
    877 	struct vmw_master *active = dev_priv->active_master;
    878 	struct vmw_master *vmaster = vmw_master(file_priv->master);
    879 	int ret = 0;
    880 
    881 	if (!dev_priv->enable_fb) {
    882 		ret = vmw_3d_resource_inc(dev_priv, true);
    883 		if (unlikely(ret != 0))
    884 			return ret;
    885 		vmw_kms_save_vga(dev_priv);
    886 		mutex_lock(&dev_priv->hw_mutex);
    887 		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
    888 		mutex_unlock(&dev_priv->hw_mutex);
    889 	}
    890 
    891 	if (active) {
    892 		BUG_ON(active != &dev_priv->fbdev_master);
    893 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
    894 		if (unlikely(ret != 0))
    895 			goto out_no_active_lock;
    896 
    897 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
    898 		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
    899 		if (unlikely(ret != 0)) {
    900 			DRM_ERROR("Unable to clean VRAM on "
    901 				  "master drop.\n");
    902 		}
    903 
    904 		dev_priv->active_master = NULL;
    905 	}
    906 
    907 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
    908 	if (!from_open) {
    909 		ttm_vt_unlock(&vmaster->lock);
    910 		BUG_ON(vmw_fp->locked_master != file_priv->master);
    911 		drm_master_put(&vmw_fp->locked_master);
    912 	}
    913 
    914 	dev_priv->active_master = vmaster;
    915 
    916 	return 0;
    917 
    918 out_no_active_lock:
    919 	if (!dev_priv->enable_fb) {
    920 		vmw_kms_restore_vga(dev_priv);
    921 		vmw_3d_resource_dec(dev_priv, true);
    922 		mutex_lock(&dev_priv->hw_mutex);
    923 		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
    924 		mutex_unlock(&dev_priv->hw_mutex);
    925 	}
    926 	return ret;
    927 }
    928 
    929 static void vmw_master_drop(struct drm_device *dev,
    930 			    struct drm_file *file_priv,
    931 			    bool from_release)
    932 {
    933 	struct vmw_private *dev_priv = vmw_priv(dev);
    934 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
    935 	struct vmw_master *vmaster = vmw_master(file_priv->master);
    936 	int ret;
    937 
    938 	/**
    939 	 * Make sure the master doesn't disappear while we have
    940 	 * it locked.
    941 	 */
    942 
    943 	vmw_fp->locked_master = drm_master_get(file_priv->master);
    944 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
    945 	vmw_execbuf_release_pinned_bo(dev_priv);
    946 
    947 	if (unlikely((ret != 0))) {
    948 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
    949 		drm_master_put(&vmw_fp->locked_master);
    950 	}
    951 
    952 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
    953 
    954 	if (!dev_priv->enable_fb) {
    955 		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
    956 		if (unlikely(ret != 0))
    957 			DRM_ERROR("Unable to clean VRAM on master drop.\n");
    958 		vmw_kms_restore_vga(dev_priv);
    959 		vmw_3d_resource_dec(dev_priv, true);
    960 		mutex_lock(&dev_priv->hw_mutex);
    961 		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
    962 		mutex_unlock(&dev_priv->hw_mutex);
    963 	}
    964 
    965 	dev_priv->active_master = &dev_priv->fbdev_master;
    966 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
    967 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
    968 
    969 	if (dev_priv->enable_fb)
    970 		vmw_fb_on(dev_priv);
    971 }
    972 
    973 
    974 static void vmw_remove(struct pci_dev *pdev)
    975 {
    976 	struct drm_device *dev = pci_get_drvdata(pdev);
    977 
    978 	drm_put_dev(dev);
    979 }
    980 
    981 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
    982 			      void *ptr)
    983 {
    984 	struct vmw_private *dev_priv =
    985 		container_of(nb, struct vmw_private, pm_nb);
    986 	struct vmw_master *vmaster = dev_priv->active_master;
    987 
    988 	switch (val) {
    989 	case PM_HIBERNATION_PREPARE:
    990 	case PM_SUSPEND_PREPARE:
    991 		ttm_suspend_lock(&vmaster->lock);
    992 
    993 		/**
    994 		 * This empties VRAM and unbinds all GMR bindings.
    995 		 * Buffer contents is moved to swappable memory.
    996 		 */
    997 		vmw_execbuf_release_pinned_bo(dev_priv);
    998 		vmw_resource_evict_all(dev_priv);
    999 		ttm_bo_swapout_all(&dev_priv->bdev);
   1000 
   1001 		break;
   1002 	case PM_POST_HIBERNATION:
   1003 	case PM_POST_SUSPEND:
   1004 	case PM_POST_RESTORE:
   1005 		ttm_suspend_unlock(&vmaster->lock);
   1006 
   1007 		break;
   1008 	case PM_RESTORE_PREPARE:
   1009 		break;
   1010 	default:
   1011 		break;
   1012 	}
   1013 	return 0;
   1014 }
   1015 
   1016 /**
   1017  * These might not be needed with the virtual SVGA device.
   1018  */
   1019 
   1020 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
   1021 {
   1022 	struct drm_device *dev = pci_get_drvdata(pdev);
   1023 	struct vmw_private *dev_priv = vmw_priv(dev);
   1024 
   1025 	if (dev_priv->num_3d_resources != 0) {
   1026 		DRM_INFO("Can't suspend or hibernate "
   1027 			 "while 3D resources are active.\n");
   1028 		return -EBUSY;
   1029 	}
   1030 
   1031 	pci_save_state(pdev);
   1032 	pci_disable_device(pdev);
   1033 	pci_set_power_state(pdev, PCI_D3hot);
   1034 	return 0;
   1035 }
   1036 
   1037 static int vmw_pci_resume(struct pci_dev *pdev)
   1038 {
   1039 	pci_set_power_state(pdev, PCI_D0);
   1040 	pci_restore_state(pdev);
   1041 	return pci_enable_device(pdev);
   1042 }
   1043 
   1044 static int vmw_pm_suspend(struct device *kdev)
   1045 {
   1046 	struct pci_dev *pdev = to_pci_dev(kdev);
   1047 	struct pm_message dummy;
   1048 
   1049 	dummy.event = 0;
   1050 
   1051 	return vmw_pci_suspend(pdev, dummy);
   1052 }
   1053 
   1054 static int vmw_pm_resume(struct device *kdev)
   1055 {
   1056 	struct pci_dev *pdev = to_pci_dev(kdev);
   1057 
   1058 	return vmw_pci_resume(pdev);
   1059 }
   1060 
   1061 static int vmw_pm_prepare(struct device *kdev)
   1062 {
   1063 	struct pci_dev *pdev = to_pci_dev(kdev);
   1064 	struct drm_device *dev = pci_get_drvdata(pdev);
   1065 	struct vmw_private *dev_priv = vmw_priv(dev);
   1066 
   1067 	/**
   1068 	 * Release 3d reference held by fbdev and potentially
   1069 	 * stop fifo.
   1070 	 */
   1071 	dev_priv->suspended = true;
   1072 	if (dev_priv->enable_fb)
   1073 			vmw_3d_resource_dec(dev_priv, true);
   1074 
   1075 	if (dev_priv->num_3d_resources != 0) {
   1076 
   1077 		DRM_INFO("Can't suspend or hibernate "
   1078 			 "while 3D resources are active.\n");
   1079 
   1080 		if (dev_priv->enable_fb)
   1081 			vmw_3d_resource_inc(dev_priv, true);
   1082 		dev_priv->suspended = false;
   1083 		return -EBUSY;
   1084 	}
   1085 
   1086 	return 0;
   1087 }
   1088 
   1089 static void vmw_pm_complete(struct device *kdev)
   1090 {
   1091 	struct pci_dev *pdev = to_pci_dev(kdev);
   1092 	struct drm_device *dev = pci_get_drvdata(pdev);
   1093 	struct vmw_private *dev_priv = vmw_priv(dev);
   1094 
   1095 	mutex_lock(&dev_priv->hw_mutex);
   1096 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
   1097 	(void) vmw_read(dev_priv, SVGA_REG_ID);
   1098 	mutex_unlock(&dev_priv->hw_mutex);
   1099 
   1100 	/**
   1101 	 * Reclaim 3d reference held by fbdev and potentially
   1102 	 * start fifo.
   1103 	 */
   1104 	if (dev_priv->enable_fb)
   1105 			vmw_3d_resource_inc(dev_priv, false);
   1106 
   1107 	dev_priv->suspended = false;
   1108 }
   1109 
   1110 static const struct dev_pm_ops vmw_pm_ops = {
   1111 	.prepare = vmw_pm_prepare,
   1112 	.complete = vmw_pm_complete,
   1113 	.suspend = vmw_pm_suspend,
   1114 	.resume = vmw_pm_resume,
   1115 };
   1116 
   1117 static const struct file_operations vmwgfx_driver_fops = {
   1118 	.owner = THIS_MODULE,
   1119 	.open = drm_open,
   1120 	.release = drm_release,
   1121 	.unlocked_ioctl = vmw_unlocked_ioctl,
   1122 	.mmap = vmw_mmap,
   1123 	.poll = vmw_fops_poll,
   1124 	.read = vmw_fops_read,
   1125 	.fasync = drm_fasync,
   1126 #if defined(CONFIG_COMPAT)
   1127 	.compat_ioctl = drm_compat_ioctl,
   1128 #endif
   1129 	.llseek = noop_llseek,
   1130 };
   1131 
   1132 static struct drm_driver driver = {
   1133 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
   1134 	DRIVER_MODESET,
   1135 	.load = vmw_driver_load,
   1136 	.unload = vmw_driver_unload,
   1137 	.firstopen = vmw_firstopen,
   1138 	.lastclose = vmw_lastclose,
   1139 	.irq_preinstall = vmw_irq_preinstall,
   1140 	.irq_postinstall = vmw_irq_postinstall,
   1141 	.irq_uninstall = vmw_irq_uninstall,
   1142 	.irq_handler = vmw_irq_handler,
   1143 	.get_vblank_counter = vmw_get_vblank_counter,
   1144 	.enable_vblank = vmw_enable_vblank,
   1145 	.disable_vblank = vmw_disable_vblank,
   1146 	.ioctls = vmw_ioctls,
   1147 	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
   1148 	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
   1149 	.master_create = vmw_master_create,
   1150 	.master_destroy = vmw_master_destroy,
   1151 	.master_set = vmw_master_set,
   1152 	.master_drop = vmw_master_drop,
   1153 	.open = vmw_driver_open,
   1154 	.preclose = vmw_preclose,
   1155 	.postclose = vmw_postclose,
   1156 
   1157 	.dumb_create = vmw_dumb_create,
   1158 	.dumb_map_offset = vmw_dumb_map_offset,
   1159 	.dumb_destroy = vmw_dumb_destroy,
   1160 
   1161 	.fops = &vmwgfx_driver_fops,
   1162 	.name = VMWGFX_DRIVER_NAME,
   1163 	.desc = VMWGFX_DRIVER_DESC,
   1164 	.date = VMWGFX_DRIVER_DATE,
   1165 	.major = VMWGFX_DRIVER_MAJOR,
   1166 	.minor = VMWGFX_DRIVER_MINOR,
   1167 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
   1168 };
   1169 
   1170 static struct pci_driver vmw_pci_driver = {
   1171 	.name = VMWGFX_DRIVER_NAME,
   1172 	.id_table = vmw_pci_id_list,
   1173 	.probe = vmw_probe,
   1174 	.remove = vmw_remove,
   1175 	.driver = {
   1176 		.pm = &vmw_pm_ops
   1177 	}
   1178 };
   1179 
   1180 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   1181 {
   1182 	return drm_get_pci_dev(pdev, ent, &driver);
   1183 }
   1184 
   1185 static int __init vmwgfx_init(void)
   1186 {
   1187 	int ret;
   1188 	ret = drm_pci_init(&driver, &vmw_pci_driver);
   1189 	if (ret)
   1190 		DRM_ERROR("Failed initializing DRM.\n");
   1191 	return ret;
   1192 }
   1193 
   1194 static void __exit vmwgfx_exit(void)
   1195 {
   1196 	drm_pci_exit(&driver, &vmw_pci_driver);
   1197 }
   1198 
   1199 module_init(vmwgfx_init);
   1200 module_exit(vmwgfx_exit);
   1201 
   1202 MODULE_AUTHOR("VMware Inc. and others");
   1203 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
   1204 MODULE_LICENSE("GPL and additional rights");
   1205 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
   1206 	       __stringify(VMWGFX_DRIVER_MINOR) "."
   1207 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
   1208 	       "0");
   1209