Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_kms.c revision 1.4
      1 /*	$NetBSD: vmwgfx_kms.c,v 1.4 2018/08/27 04:58:37 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright  2009-2015 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_kms.c,v 1.4 2018/08/27 04:58:37 riastradh Exp $");
     32 
     33 #include "vmwgfx_kms.h"
     34 
     35 /* Might need a hrtimer here? */
     36 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
     37 
     38 void vmw_du_cleanup(struct vmw_display_unit *du)
     39 {
     40 	if (du->cursor_surface)
     41 		vmw_surface_unreference(&du->cursor_surface);
     42 	if (du->cursor_dmabuf)
     43 		vmw_dmabuf_unreference(&du->cursor_dmabuf);
     44 	drm_connector_unregister(&du->connector);
     45 	drm_crtc_cleanup(&du->crtc);
     46 	drm_encoder_cleanup(&du->encoder);
     47 	drm_connector_cleanup(&du->connector);
     48 }
     49 
     50 /*
     51  * Display Unit Cursor functions
     52  */
     53 
     54 int vmw_cursor_update_image(struct vmw_private *dev_priv,
     55 			    u32 *image, u32 width, u32 height,
     56 			    u32 hotspotX, u32 hotspotY)
     57 {
     58 	struct {
     59 		u32 cmd;
     60 		SVGAFifoCmdDefineAlphaCursor cursor;
     61 	} *cmd;
     62 	u32 image_size = width * height * 4;
     63 	u32 cmd_size = sizeof(*cmd) + image_size;
     64 
     65 	if (!image)
     66 		return -EINVAL;
     67 
     68 	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
     69 	if (unlikely(cmd == NULL)) {
     70 		DRM_ERROR("Fifo reserve failed.\n");
     71 		return -ENOMEM;
     72 	}
     73 
     74 	memset(cmd, 0, sizeof(*cmd));
     75 
     76 	memcpy(&cmd[1], image, image_size);
     77 
     78 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
     79 	cmd->cursor.id = 0;
     80 	cmd->cursor.width = width;
     81 	cmd->cursor.height = height;
     82 	cmd->cursor.hotspotX = hotspotX;
     83 	cmd->cursor.hotspotY = hotspotY;
     84 
     85 	vmw_fifo_commit_flush(dev_priv, cmd_size);
     86 
     87 	return 0;
     88 }
     89 
     90 int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
     91 			     struct vmw_dma_buffer *dmabuf,
     92 			     u32 width, u32 height,
     93 			     u32 hotspotX, u32 hotspotY)
     94 {
     95 	struct ttm_bo_kmap_obj map;
     96 	unsigned long kmap_offset;
     97 	unsigned long kmap_num;
     98 	void *virtual;
     99 	bool dummy;
    100 	int ret;
    101 
    102 	kmap_offset = 0;
    103 	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
    104 
    105 	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
    106 	if (unlikely(ret != 0)) {
    107 		DRM_ERROR("reserve failed\n");
    108 		return -EINVAL;
    109 	}
    110 
    111 	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
    112 	if (unlikely(ret != 0))
    113 		goto err_unreserve;
    114 
    115 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
    116 	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
    117 				      hotspotX, hotspotY);
    118 
    119 	ttm_bo_kunmap(&map);
    120 err_unreserve:
    121 	ttm_bo_unreserve(&dmabuf->base);
    122 
    123 	return ret;
    124 }
    125 
    126 
    127 void vmw_cursor_update_position(struct vmw_private *dev_priv,
    128 				bool show, int x, int y)
    129 {
    130 	u32 *fifo_mem = dev_priv->mmio_virt;
    131 	uint32_t count;
    132 
    133 	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
    134 	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
    135 	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
    136 	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
    137 	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
    138 }
    139 
    140 
    141 /*
    142  * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
    143  */
    144 int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
    145 			    uint32_t handle, uint32_t width, uint32_t height,
    146 			    int32_t hot_x, int32_t hot_y)
    147 {
    148 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
    149 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
    150 	struct vmw_surface *surface = NULL;
    151 	struct vmw_dma_buffer *dmabuf = NULL;
    152 	s32 hotspot_x, hotspot_y;
    153 	int ret;
    154 
    155 	/*
    156 	 * FIXME: Unclear whether there's any global state touched by the
    157 	 * cursor_set function, especially vmw_cursor_update_position looks
    158 	 * suspicious. For now take the easy route and reacquire all locks. We
    159 	 * can do this since the caller in the drm core doesn't check anything
    160 	 * which is protected by any looks.
    161 	 */
    162 	drm_modeset_unlock_crtc(crtc);
    163 	drm_modeset_lock_all(dev_priv->dev);
    164 	hotspot_x = hot_x + du->hotspot_x;
    165 	hotspot_y = hot_y + du->hotspot_y;
    166 
    167 	/* A lot of the code assumes this */
    168 	if (handle && (width != 64 || height != 64)) {
    169 		ret = -EINVAL;
    170 		goto out;
    171 	}
    172 
    173 	if (handle) {
    174 		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    175 
    176 		ret = vmw_user_lookup_handle(dev_priv, tfile,
    177 					     handle, &surface, &dmabuf);
    178 		if (ret) {
    179 			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
    180 			ret = -EINVAL;
    181 			goto out;
    182 		}
    183 	}
    184 
    185 	/* need to do this before taking down old image */
    186 	if (surface && !surface->snooper.image) {
    187 		DRM_ERROR("surface not suitable for cursor\n");
    188 		vmw_surface_unreference(&surface);
    189 		ret = -EINVAL;
    190 		goto out;
    191 	}
    192 
    193 	/* takedown old cursor */
    194 	if (du->cursor_surface) {
    195 		du->cursor_surface->snooper.crtc = NULL;
    196 		vmw_surface_unreference(&du->cursor_surface);
    197 	}
    198 	if (du->cursor_dmabuf)
    199 		vmw_dmabuf_unreference(&du->cursor_dmabuf);
    200 
    201 	/* setup new image */
    202 	ret = 0;
    203 	if (surface) {
    204 		/* vmw_user_surface_lookup takes one reference */
    205 		du->cursor_surface = surface;
    206 
    207 		du->cursor_surface->snooper.crtc = crtc;
    208 		du->cursor_age = du->cursor_surface->snooper.age;
    209 		ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
    210 					      64, 64, hotspot_x, hotspot_y);
    211 	} else if (dmabuf) {
    212 		/* vmw_user_surface_lookup takes one reference */
    213 		du->cursor_dmabuf = dmabuf;
    214 
    215 		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
    216 					       hotspot_x, hotspot_y);
    217 	} else {
    218 		vmw_cursor_update_position(dev_priv, false, 0, 0);
    219 		goto out;
    220 	}
    221 
    222 	if (!ret) {
    223 		vmw_cursor_update_position(dev_priv, true,
    224 					   du->cursor_x + hotspot_x,
    225 					   du->cursor_y + hotspot_y);
    226 		du->core_hotspot_x = hot_x;
    227 		du->core_hotspot_y = hot_y;
    228 	}
    229 
    230 out:
    231 	drm_modeset_unlock_all(dev_priv->dev);
    232 	drm_modeset_lock_crtc(crtc, crtc->cursor);
    233 
    234 	return ret;
    235 }
    236 
    237 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
    238 {
    239 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
    240 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
    241 	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
    242 
    243 	du->cursor_x = x + crtc->x;
    244 	du->cursor_y = y + crtc->y;
    245 
    246 	/*
    247 	 * FIXME: Unclear whether there's any global state touched by the
    248 	 * cursor_set function, especially vmw_cursor_update_position looks
    249 	 * suspicious. For now take the easy route and reacquire all locks. We
    250 	 * can do this since the caller in the drm core doesn't check anything
    251 	 * which is protected by any looks.
    252 	 */
    253 	drm_modeset_unlock_crtc(crtc);
    254 	drm_modeset_lock_all(dev_priv->dev);
    255 
    256 	vmw_cursor_update_position(dev_priv, shown,
    257 				   du->cursor_x + du->hotspot_x +
    258 				   du->core_hotspot_x,
    259 				   du->cursor_y + du->hotspot_y +
    260 				   du->core_hotspot_y);
    261 
    262 	drm_modeset_unlock_all(dev_priv->dev);
    263 	drm_modeset_lock_crtc(crtc, crtc->cursor);
    264 
    265 	return 0;
    266 }
    267 
    268 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
    269 			  struct ttm_object_file *tfile,
    270 			  struct ttm_buffer_object *bo,
    271 			  SVGA3dCmdHeader *header)
    272 {
    273 	struct ttm_bo_kmap_obj map;
    274 	unsigned long kmap_offset;
    275 	unsigned long kmap_num;
    276 	SVGA3dCopyBox *box;
    277 	unsigned box_count;
    278 	void *virtual;
    279 	bool dummy;
    280 	struct vmw_dma_cmd {
    281 		SVGA3dCmdHeader header;
    282 		SVGA3dCmdSurfaceDMA dma;
    283 	} *cmd;
    284 	int i, ret;
    285 
    286 	cmd = container_of(header, struct vmw_dma_cmd, header);
    287 
    288 	/* No snooper installed */
    289 	if (!srf->snooper.image)
    290 		return;
    291 
    292 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
    293 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
    294 		return;
    295 	}
    296 
    297 	if (cmd->header.size < 64) {
    298 		DRM_ERROR("at least one full copy box must be given\n");
    299 		return;
    300 	}
    301 
    302 	box = (SVGA3dCopyBox *)&cmd[1];
    303 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
    304 			sizeof(SVGA3dCopyBox);
    305 
    306 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
    307 	    box->x != 0    || box->y != 0    || box->z != 0    ||
    308 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
    309 	    box->d != 1    || box_count != 1) {
    310 		/* TODO handle none page aligned offsets */
    311 		/* TODO handle more dst & src != 0 */
    312 		/* TODO handle more than one copy */
    313 		DRM_ERROR("Cant snoop dma request for cursor!\n");
    314 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
    315 			  box->srcx, box->srcy, box->srcz,
    316 			  box->x, box->y, box->z,
    317 			  box->w, box->h, box->d, box_count,
    318 			  cmd->dma.guest.ptr.offset);
    319 		return;
    320 	}
    321 
    322 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
    323 	kmap_num = (64*64*4) >> PAGE_SHIFT;
    324 
    325 	ret = ttm_bo_reserve(bo, true, false, false, NULL);
    326 	if (unlikely(ret != 0)) {
    327 		DRM_ERROR("reserve failed\n");
    328 		return;
    329 	}
    330 
    331 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
    332 	if (unlikely(ret != 0))
    333 		goto err_unreserve;
    334 
    335 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
    336 
    337 	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
    338 		memcpy(srf->snooper.image, virtual, 64*64*4);
    339 	} else {
    340 		/* Image is unsigned pointer. */
    341 		for (i = 0; i < box->h; i++)
    342 			memcpy(srf->snooper.image + i * 64,
    343 			       virtual + i * cmd->dma.guest.pitch,
    344 			       box->w * 4);
    345 	}
    346 
    347 	srf->snooper.age++;
    348 
    349 	ttm_bo_kunmap(&map);
    350 err_unreserve:
    351 	ttm_bo_unreserve(bo);
    352 }
    353 
    354 /**
    355  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
    356  *
    357  * @dev_priv: Pointer to the device private struct.
    358  *
    359  * Clears all legacy hotspots.
    360  */
    361 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
    362 {
    363 	struct drm_device *dev = dev_priv->dev;
    364 	struct vmw_display_unit *du;
    365 	struct drm_crtc *crtc;
    366 
    367 	drm_modeset_lock_all(dev);
    368 	drm_for_each_crtc(crtc, dev) {
    369 		du = vmw_crtc_to_du(crtc);
    370 
    371 		du->hotspot_x = 0;
    372 		du->hotspot_y = 0;
    373 	}
    374 	drm_modeset_unlock_all(dev);
    375 }
    376 
    377 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
    378 {
    379 	struct drm_device *dev = dev_priv->dev;
    380 	struct vmw_display_unit *du;
    381 	struct drm_crtc *crtc;
    382 
    383 	mutex_lock(&dev->mode_config.mutex);
    384 
    385 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
    386 		du = vmw_crtc_to_du(crtc);
    387 		if (!du->cursor_surface ||
    388 		    du->cursor_age == du->cursor_surface->snooper.age)
    389 			continue;
    390 
    391 		du->cursor_age = du->cursor_surface->snooper.age;
    392 		vmw_cursor_update_image(dev_priv,
    393 					du->cursor_surface->snooper.image,
    394 					64, 64,
    395 					du->hotspot_x + du->core_hotspot_x,
    396 					du->hotspot_y + du->core_hotspot_y);
    397 	}
    398 
    399 	mutex_unlock(&dev->mode_config.mutex);
    400 }
    401 
    402 /*
    403  * Generic framebuffer code
    404  */
    405 
    406 /*
    407  * Surface framebuffer code
    408  */
    409 
    410 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
    411 {
    412 	struct vmw_framebuffer_surface *vfbs =
    413 		vmw_framebuffer_to_vfbs(framebuffer);
    414 
    415 	drm_framebuffer_cleanup(framebuffer);
    416 	vmw_surface_unreference(&vfbs->surface);
    417 	if (vfbs->base.user_obj)
    418 		ttm_base_object_unref(&vfbs->base.user_obj);
    419 
    420 	kfree(vfbs);
    421 }
    422 
    423 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
    424 				  struct drm_file *file_priv,
    425 				  unsigned flags, unsigned color,
    426 				  struct drm_clip_rect *clips,
    427 				  unsigned num_clips)
    428 {
    429 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
    430 	struct vmw_framebuffer_surface *vfbs =
    431 		vmw_framebuffer_to_vfbs(framebuffer);
    432 	struct drm_clip_rect norect;
    433 	int ret, inc = 1;
    434 
    435 	/* Legacy Display Unit does not support 3D */
    436 	if (dev_priv->active_display_unit == vmw_du_legacy)
    437 		return -EINVAL;
    438 
    439 	drm_modeset_lock_all(dev_priv->dev);
    440 
    441 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
    442 	if (unlikely(ret != 0)) {
    443 		drm_modeset_unlock_all(dev_priv->dev);
    444 		return ret;
    445 	}
    446 
    447 	if (!num_clips) {
    448 		num_clips = 1;
    449 		clips = &norect;
    450 		norect.x1 = norect.y1 = 0;
    451 		norect.x2 = framebuffer->width;
    452 		norect.y2 = framebuffer->height;
    453 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
    454 		num_clips /= 2;
    455 		inc = 2; /* skip source rects */
    456 	}
    457 
    458 	if (dev_priv->active_display_unit == vmw_du_screen_object)
    459 		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
    460 						   clips, NULL, NULL, 0, 0,
    461 						   num_clips, inc, NULL);
    462 	else
    463 		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
    464 						 clips, NULL, NULL, 0, 0,
    465 						 num_clips, inc, NULL);
    466 
    467 	vmw_fifo_flush(dev_priv, false);
    468 	ttm_read_unlock(&dev_priv->reservation_sem);
    469 
    470 	drm_modeset_unlock_all(dev_priv->dev);
    471 
    472 	return 0;
    473 }
    474 
    475 /**
    476  * vmw_kms_readback - Perform a readback from the screen system to
    477  * a dma-buffer backed framebuffer.
    478  *
    479  * @dev_priv: Pointer to the device private structure.
    480  * @file_priv: Pointer to a struct drm_file identifying the caller.
    481  * Must be set to NULL if @user_fence_rep is NULL.
    482  * @vfb: Pointer to the dma-buffer backed framebuffer.
    483  * @user_fence_rep: User-space provided structure for fence information.
    484  * Must be set to non-NULL if @file_priv is non-NULL.
    485  * @vclips: Array of clip rects.
    486  * @num_clips: Number of clip rects in @vclips.
    487  *
    488  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
    489  * interrupted.
    490  */
    491 int vmw_kms_readback(struct vmw_private *dev_priv,
    492 		     struct drm_file *file_priv,
    493 		     struct vmw_framebuffer *vfb,
    494 		     struct drm_vmw_fence_rep __user *user_fence_rep,
    495 		     struct drm_vmw_rect *vclips,
    496 		     uint32_t num_clips)
    497 {
    498 	switch (dev_priv->active_display_unit) {
    499 	case vmw_du_screen_object:
    500 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
    501 					    user_fence_rep, vclips, num_clips);
    502 	case vmw_du_screen_target:
    503 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
    504 					user_fence_rep, NULL, vclips, num_clips,
    505 					1, false, true);
    506 	default:
    507 		WARN_ONCE(true,
    508 			  "Readback called with invalid display system.\n");
    509 }
    510 
    511 	return -ENOSYS;
    512 }
    513 
    514 
    515 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
    516 	.destroy = vmw_framebuffer_surface_destroy,
    517 	.dirty = vmw_framebuffer_surface_dirty,
    518 };
    519 
    520 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
    521 					   struct vmw_surface *surface,
    522 					   struct vmw_framebuffer **out,
    523 					   const struct drm_mode_fb_cmd
    524 					   *mode_cmd,
    525 					   bool is_dmabuf_proxy)
    526 
    527 {
    528 	struct drm_device *dev = dev_priv->dev;
    529 	struct vmw_framebuffer_surface *vfbs;
    530 	enum SVGA3dSurfaceFormat format;
    531 	int ret;
    532 
    533 	/* 3D is only supported on HWv8 and newer hosts */
    534 	if (dev_priv->active_display_unit == vmw_du_legacy)
    535 		return -ENOSYS;
    536 
    537 	/*
    538 	 * Sanity checks.
    539 	 */
    540 
    541 	/* Surface must be marked as a scanout. */
    542 	if (unlikely(!surface->scanout))
    543 		return -EINVAL;
    544 
    545 	if (unlikely(surface->mip_levels[0] != 1 ||
    546 		     surface->num_sizes != 1 ||
    547 		     surface->base_size.width < mode_cmd->width ||
    548 		     surface->base_size.height < mode_cmd->height ||
    549 		     surface->base_size.depth != 1)) {
    550 		DRM_ERROR("Incompatible surface dimensions "
    551 			  "for requested mode.\n");
    552 		return -EINVAL;
    553 	}
    554 
    555 	switch (mode_cmd->depth) {
    556 	case 32:
    557 		format = SVGA3D_A8R8G8B8;
    558 		break;
    559 	case 24:
    560 		format = SVGA3D_X8R8G8B8;
    561 		break;
    562 	case 16:
    563 		format = SVGA3D_R5G6B5;
    564 		break;
    565 	case 15:
    566 		format = SVGA3D_A1R5G5B5;
    567 		break;
    568 	default:
    569 		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
    570 		return -EINVAL;
    571 	}
    572 
    573 	/*
    574 	 * For DX, surface format validation is done when surface->scanout
    575 	 * is set.
    576 	 */
    577 	if (!dev_priv->has_dx && format != surface->format) {
    578 		DRM_ERROR("Invalid surface format for requested mode.\n");
    579 		return -EINVAL;
    580 	}
    581 
    582 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
    583 	if (!vfbs) {
    584 		ret = -ENOMEM;
    585 		goto out_err1;
    586 	}
    587 
    588 	/* XXX get the first 3 from the surface info */
    589 	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
    590 	vfbs->base.base.pitches[0] = mode_cmd->pitch;
    591 	vfbs->base.base.depth = mode_cmd->depth;
    592 	vfbs->base.base.width = mode_cmd->width;
    593 	vfbs->base.base.height = mode_cmd->height;
    594 	vfbs->surface = vmw_surface_reference(surface);
    595 	vfbs->base.user_handle = mode_cmd->handle;
    596 	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
    597 
    598 	*out = &vfbs->base;
    599 
    600 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
    601 				   &vmw_framebuffer_surface_funcs);
    602 	if (ret)
    603 		goto out_err2;
    604 
    605 	return 0;
    606 
    607 out_err2:
    608 	vmw_surface_unreference(&surface);
    609 	kfree(vfbs);
    610 out_err1:
    611 	return ret;
    612 }
    613 
    614 /*
    615  * Dmabuf framebuffer code
    616  */
    617 
    618 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
    619 {
    620 	struct vmw_framebuffer_dmabuf *vfbd =
    621 		vmw_framebuffer_to_vfbd(framebuffer);
    622 
    623 	drm_framebuffer_cleanup(framebuffer);
    624 	vmw_dmabuf_unreference(&vfbd->buffer);
    625 	if (vfbd->base.user_obj)
    626 		ttm_base_object_unref(&vfbd->base.user_obj);
    627 
    628 	kfree(vfbd);
    629 }
    630 
    631 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
    632 				 struct drm_file *file_priv,
    633 				 unsigned flags, unsigned color,
    634 				 struct drm_clip_rect *clips,
    635 				 unsigned num_clips)
    636 {
    637 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
    638 	struct vmw_framebuffer_dmabuf *vfbd =
    639 		vmw_framebuffer_to_vfbd(framebuffer);
    640 	struct drm_clip_rect norect;
    641 	int ret, increment = 1;
    642 
    643 	drm_modeset_lock_all(dev_priv->dev);
    644 
    645 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
    646 	if (unlikely(ret != 0)) {
    647 		drm_modeset_unlock_all(dev_priv->dev);
    648 		return ret;
    649 	}
    650 
    651 	if (!num_clips) {
    652 		num_clips = 1;
    653 		clips = &norect;
    654 		norect.x1 = norect.y1 = 0;
    655 		norect.x2 = framebuffer->width;
    656 		norect.y2 = framebuffer->height;
    657 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
    658 		num_clips /= 2;
    659 		increment = 2;
    660 	}
    661 
    662 	switch (dev_priv->active_display_unit) {
    663 	case vmw_du_screen_target:
    664 		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
    665 				       clips, NULL, num_clips, increment,
    666 				       true, true);
    667 		break;
    668 	case vmw_du_screen_object:
    669 		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
    670 						  clips, num_clips, increment,
    671 						  true,
    672 						  NULL);
    673 		break;
    674 	case vmw_du_legacy:
    675 		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
    676 						  clips, num_clips, increment);
    677 		break;
    678 	default:
    679 		ret = -EINVAL;
    680 		WARN_ONCE(true, "Dirty called with invalid display system.\n");
    681 		break;
    682 	}
    683 
    684 	vmw_fifo_flush(dev_priv, false);
    685 	ttm_read_unlock(&dev_priv->reservation_sem);
    686 
    687 	drm_modeset_unlock_all(dev_priv->dev);
    688 
    689 	return ret;
    690 }
    691 
    692 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
    693 	.destroy = vmw_framebuffer_dmabuf_destroy,
    694 	.dirty = vmw_framebuffer_dmabuf_dirty,
    695 };
    696 
    697 /**
    698  * Pin the dmabuffer to the start of vram.
    699  */
    700 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
    701 {
    702 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
    703 	struct vmw_dma_buffer *buf;
    704 	int ret;
    705 
    706 	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
    707 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
    708 
    709 	if (!buf)
    710 		return 0;
    711 
    712 	switch (dev_priv->active_display_unit) {
    713 	case vmw_du_legacy:
    714 		vmw_overlay_pause_all(dev_priv);
    715 		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
    716 		vmw_overlay_resume_all(dev_priv);
    717 		break;
    718 	case vmw_du_screen_object:
    719 	case vmw_du_screen_target:
    720 		if (vfb->dmabuf)
    721 			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
    722 							     false);
    723 
    724 		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
    725 						   &vmw_mob_placement, false);
    726 	default:
    727 		return -EINVAL;
    728 	}
    729 
    730 	return ret;
    731 }
    732 
    733 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
    734 {
    735 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
    736 	struct vmw_dma_buffer *buf;
    737 
    738 	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
    739 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
    740 
    741 	if (WARN_ON(!buf))
    742 		return 0;
    743 
    744 	return vmw_dmabuf_unpin(dev_priv, buf, false);
    745 }
    746 
    747 /**
    748  * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
    749  *
    750  * @dev: DRM device
    751  * @mode_cmd: parameters for the new surface
    752  * @dmabuf_mob: MOB backing the DMA buf
    753  * @srf_out: newly created surface
    754  *
    755  * When the content FB is a DMA buf, we create a surface as a proxy to the
    756  * same buffer.  This way we can do a surface copy rather than a surface DMA.
    757  * This is a more efficient approach
    758  *
    759  * RETURNS:
    760  * 0 on success, error code otherwise
    761  */
    762 static int vmw_create_dmabuf_proxy(struct drm_device *dev,
    763 				   const struct drm_mode_fb_cmd *mode_cmd,
    764 				   struct vmw_dma_buffer *dmabuf_mob,
    765 				   struct vmw_surface **srf_out)
    766 {
    767 	uint32_t format;
    768 	struct drm_vmw_size content_base_size;
    769 	struct vmw_resource *res;
    770 	unsigned int bytes_pp;
    771 	int ret;
    772 
    773 	switch (mode_cmd->depth) {
    774 	case 32:
    775 	case 24:
    776 		format = SVGA3D_X8R8G8B8;
    777 		bytes_pp = 4;
    778 		break;
    779 
    780 	case 16:
    781 	case 15:
    782 		format = SVGA3D_R5G6B5;
    783 		bytes_pp = 2;
    784 		break;
    785 
    786 	case 8:
    787 		format = SVGA3D_P8;
    788 		bytes_pp = 1;
    789 		break;
    790 
    791 	default:
    792 		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
    793 		return -EINVAL;
    794 	}
    795 
    796 	content_base_size.width  = mode_cmd->pitch / bytes_pp;
    797 	content_base_size.height = mode_cmd->height;
    798 	content_base_size.depth  = 1;
    799 
    800 	ret = vmw_surface_gb_priv_define(dev,
    801 			0, /* kernel visible only */
    802 			0, /* flags */
    803 			format,
    804 			true, /* can be a scanout buffer */
    805 			1, /* num of mip levels */
    806 			0,
    807 			0,
    808 			content_base_size,
    809 			srf_out);
    810 	if (ret) {
    811 		DRM_ERROR("Failed to allocate proxy content buffer\n");
    812 		return ret;
    813 	}
    814 
    815 	res = &(*srf_out)->res;
    816 
    817 	/* Reserve and switch the backing mob. */
    818 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
    819 	(void) vmw_resource_reserve(res, false, true);
    820 	vmw_dmabuf_unreference(&res->backup);
    821 	res->backup = vmw_dmabuf_reference(dmabuf_mob);
    822 	res->backup_offset = 0;
    823 	vmw_resource_unreserve(res, false, NULL, 0);
    824 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
    825 
    826 	return 0;
    827 }
    828 
    829 
    830 
    831 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
    832 					  struct vmw_dma_buffer *dmabuf,
    833 					  struct vmw_framebuffer **out,
    834 					  const struct drm_mode_fb_cmd
    835 					  *mode_cmd)
    836 
    837 {
    838 	struct drm_device *dev = dev_priv->dev;
    839 	struct vmw_framebuffer_dmabuf *vfbd;
    840 	unsigned int requested_size;
    841 	int ret;
    842 
    843 	requested_size = mode_cmd->height * mode_cmd->pitch;
    844 	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
    845 		DRM_ERROR("Screen buffer object size is too small "
    846 			  "for requested mode.\n");
    847 		return -EINVAL;
    848 	}
    849 
    850 	/* Limited framebuffer color depth support for screen objects */
    851 	if (dev_priv->active_display_unit == vmw_du_screen_object) {
    852 		switch (mode_cmd->depth) {
    853 		case 32:
    854 		case 24:
    855 			/* Only support 32 bpp for 32 and 24 depth fbs */
    856 			if (mode_cmd->bpp == 32)
    857 				break;
    858 
    859 			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
    860 				  mode_cmd->depth, mode_cmd->bpp);
    861 			return -EINVAL;
    862 		case 16:
    863 		case 15:
    864 			/* Only support 16 bpp for 16 and 15 depth fbs */
    865 			if (mode_cmd->bpp == 16)
    866 				break;
    867 
    868 			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
    869 				  mode_cmd->depth, mode_cmd->bpp);
    870 			return -EINVAL;
    871 		default:
    872 			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
    873 			return -EINVAL;
    874 		}
    875 	}
    876 
    877 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
    878 	if (!vfbd) {
    879 		ret = -ENOMEM;
    880 		goto out_err1;
    881 	}
    882 
    883 	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
    884 	vfbd->base.base.pitches[0] = mode_cmd->pitch;
    885 	vfbd->base.base.depth = mode_cmd->depth;
    886 	vfbd->base.base.width = mode_cmd->width;
    887 	vfbd->base.base.height = mode_cmd->height;
    888 	vfbd->base.dmabuf = true;
    889 	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
    890 	vfbd->base.user_handle = mode_cmd->handle;
    891 	*out = &vfbd->base;
    892 
    893 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
    894 				   &vmw_framebuffer_dmabuf_funcs);
    895 	if (ret)
    896 		goto out_err2;
    897 
    898 	return 0;
    899 
    900 out_err2:
    901 	vmw_dmabuf_unreference(&dmabuf);
    902 	kfree(vfbd);
    903 out_err1:
    904 	return ret;
    905 }
    906 
    907 /**
    908  * vmw_kms_new_framebuffer - Create a new framebuffer.
    909  *
    910  * @dev_priv: Pointer to device private struct.
    911  * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
    912  * Either @dmabuf or @surface must be NULL.
    913  * @surface: Pointer to a surface to wrap the kms framebuffer around.
    914  * Either @dmabuf or @surface must be NULL.
    915  * @only_2d: No presents will occur to this dma buffer based framebuffer. This
    916  * Helps the code to do some important optimizations.
    917  * @mode_cmd: Frame-buffer metadata.
    918  */
    919 struct vmw_framebuffer *
    920 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
    921 			struct vmw_dma_buffer *dmabuf,
    922 			struct vmw_surface *surface,
    923 			bool only_2d,
    924 			const struct drm_mode_fb_cmd *mode_cmd)
    925 {
    926 	struct vmw_framebuffer *vfb = NULL;
    927 	bool is_dmabuf_proxy = false;
    928 	int ret;
    929 
    930 	/*
    931 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
    932 	 * therefore, wrap the DMA buf in a surface so we can use the
    933 	 * SurfaceCopy command.
    934 	 */
    935 	if (dmabuf && only_2d &&
    936 	    dev_priv->active_display_unit == vmw_du_screen_target) {
    937 		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
    938 					      dmabuf, &surface);
    939 		if (ret)
    940 			return ERR_PTR(ret);
    941 
    942 		is_dmabuf_proxy = true;
    943 	}
    944 
    945 	/* Create the new framebuffer depending one what we have */
    946 	if (surface) {
    947 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
    948 						      mode_cmd,
    949 						      is_dmabuf_proxy);
    950 
    951 		/*
    952 		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
    953 		 * needed
    954 		 */
    955 		if (is_dmabuf_proxy)
    956 			vmw_surface_unreference(&surface);
    957 	} else if (dmabuf) {
    958 		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
    959 						     mode_cmd);
    960 	} else {
    961 		BUG();
    962 	}
    963 
    964 	if (ret)
    965 		return ERR_PTR(ret);
    966 
    967 	vfb->pin = vmw_framebuffer_pin;
    968 	vfb->unpin = vmw_framebuffer_unpin;
    969 
    970 	return vfb;
    971 }
    972 
    973 /*
    974  * Generic Kernel modesetting functions
    975  */
    976 
    977 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
    978 						 struct drm_file *file_priv,
    979 						 struct drm_mode_fb_cmd2 *mode_cmd2)
    980 {
    981 	struct vmw_private *dev_priv = vmw_priv(dev);
    982 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
    983 	struct vmw_framebuffer *vfb = NULL;
    984 	struct vmw_surface *surface = NULL;
    985 	struct vmw_dma_buffer *bo = NULL;
    986 	struct ttm_base_object *user_obj;
    987 	struct drm_mode_fb_cmd mode_cmd;
    988 	int ret;
    989 
    990 	mode_cmd.width = mode_cmd2->width;
    991 	mode_cmd.height = mode_cmd2->height;
    992 	mode_cmd.pitch = mode_cmd2->pitches[0];
    993 	mode_cmd.handle = mode_cmd2->handles[0];
    994 	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
    995 				    &mode_cmd.bpp);
    996 
    997 	/**
    998 	 * This code should be conditioned on Screen Objects not being used.
    999 	 * If screen objects are used, we can allocate a GMR to hold the
   1000 	 * requested framebuffer.
   1001 	 */
   1002 
   1003 	if (!vmw_kms_validate_mode_vram(dev_priv,
   1004 					mode_cmd.pitch,
   1005 					mode_cmd.height)) {
   1006 		DRM_ERROR("Requested mode exceed bounding box limit.\n");
   1007 		return ERR_PTR(-ENOMEM);
   1008 	}
   1009 
   1010 	/*
   1011 	 * Take a reference on the user object of the resource
   1012 	 * backing the kms fb. This ensures that user-space handle
   1013 	 * lookups on that resource will always work as long as
   1014 	 * it's registered with a kms framebuffer. This is important,
   1015 	 * since vmw_execbuf_process identifies resources in the
   1016 	 * command stream using user-space handles.
   1017 	 */
   1018 
   1019 	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
   1020 	if (unlikely(user_obj == NULL)) {
   1021 		DRM_ERROR("Could not locate requested kms frame buffer.\n");
   1022 		return ERR_PTR(-ENOENT);
   1023 	}
   1024 
   1025 	/**
   1026 	 * End conditioned code.
   1027 	 */
   1028 
   1029 	/* returns either a dmabuf or surface */
   1030 	ret = vmw_user_lookup_handle(dev_priv, tfile,
   1031 				     mode_cmd.handle,
   1032 				     &surface, &bo);
   1033 	if (ret)
   1034 		goto err_out;
   1035 
   1036 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
   1037 				      !(dev_priv->capabilities & SVGA_CAP_3D),
   1038 				      &mode_cmd);
   1039 	if (IS_ERR(vfb)) {
   1040 		ret = PTR_ERR(vfb);
   1041 		goto err_out;
   1042  	}
   1043 
   1044 err_out:
   1045 	/* vmw_user_lookup_handle takes one ref so does new_fb */
   1046 	if (bo)
   1047 		vmw_dmabuf_unreference(&bo);
   1048 	if (surface)
   1049 		vmw_surface_unreference(&surface);
   1050 
   1051 	if (ret) {
   1052 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
   1053 		ttm_base_object_unref(&user_obj);
   1054 		return ERR_PTR(ret);
   1055 	} else
   1056 		vfb->user_obj = user_obj;
   1057 
   1058 	return &vfb->base;
   1059 }
   1060 
   1061 static const struct drm_mode_config_funcs vmw_kms_funcs = {
   1062 	.fb_create = vmw_kms_fb_create,
   1063 };
   1064 
   1065 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
   1066 				   struct drm_file *file_priv,
   1067 				   struct vmw_framebuffer *vfb,
   1068 				   struct vmw_surface *surface,
   1069 				   uint32_t sid,
   1070 				   int32_t destX, int32_t destY,
   1071 				   struct drm_vmw_rect *clips,
   1072 				   uint32_t num_clips)
   1073 {
   1074 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
   1075 					    &surface->res, destX, destY,
   1076 					    num_clips, 1, NULL);
   1077 }
   1078 
   1079 
   1080 int vmw_kms_present(struct vmw_private *dev_priv,
   1081 		    struct drm_file *file_priv,
   1082 		    struct vmw_framebuffer *vfb,
   1083 		    struct vmw_surface *surface,
   1084 		    uint32_t sid,
   1085 		    int32_t destX, int32_t destY,
   1086 		    struct drm_vmw_rect *clips,
   1087 		    uint32_t num_clips)
   1088 {
   1089 	int ret;
   1090 
   1091 	switch (dev_priv->active_display_unit) {
   1092 	case vmw_du_screen_target:
   1093 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
   1094 						 &surface->res, destX, destY,
   1095 						 num_clips, 1, NULL);
   1096 		break;
   1097 	case vmw_du_screen_object:
   1098 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
   1099 					      sid, destX, destY, clips,
   1100 					      num_clips);
   1101 		break;
   1102 	default:
   1103 		WARN_ONCE(true,
   1104 			  "Present called with invalid display system.\n");
   1105 		ret = -ENOSYS;
   1106 		break;
   1107 	}
   1108 	if (ret)
   1109 		return ret;
   1110 
   1111 	vmw_fifo_flush(dev_priv, false);
   1112 
   1113 	return 0;
   1114 }
   1115 
   1116 int vmw_kms_init(struct vmw_private *dev_priv)
   1117 {
   1118 	struct drm_device *dev = dev_priv->dev;
   1119 	int ret;
   1120 
   1121 	drm_mode_config_init(dev);
   1122 	dev->mode_config.funcs = &vmw_kms_funcs;
   1123 	dev->mode_config.min_width = 1;
   1124 	dev->mode_config.min_height = 1;
   1125 	dev->mode_config.max_width = dev_priv->texture_max_width;
   1126 	dev->mode_config.max_height = dev_priv->texture_max_height;
   1127 
   1128 	ret = vmw_kms_stdu_init_display(dev_priv);
   1129 	if (ret) {
   1130 		ret = vmw_kms_sou_init_display(dev_priv);
   1131 		if (ret) /* Fallback */
   1132 			ret = vmw_kms_ldu_init_display(dev_priv);
   1133 	}
   1134 
   1135 	return ret;
   1136 }
   1137 
   1138 int vmw_kms_close(struct vmw_private *dev_priv)
   1139 {
   1140 	int ret;
   1141 
   1142 	/*
   1143 	 * Docs says we should take the lock before calling this function
   1144 	 * but since it destroys encoders and our destructor calls
   1145 	 * drm_encoder_cleanup which takes the lock we deadlock.
   1146 	 */
   1147 	drm_mode_config_cleanup(dev_priv->dev);
   1148 	if (dev_priv->active_display_unit == vmw_du_screen_object)
   1149 		ret = vmw_kms_sou_close_display(dev_priv);
   1150 	else if (dev_priv->active_display_unit == vmw_du_screen_target)
   1151 		ret = vmw_kms_stdu_close_display(dev_priv);
   1152 	else
   1153 		ret = vmw_kms_ldu_close_display(dev_priv);
   1154 
   1155 	return ret;
   1156 }
   1157 
   1158 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
   1159 				struct drm_file *file_priv)
   1160 {
   1161 	struct drm_vmw_cursor_bypass_arg *arg = data;
   1162 	struct vmw_display_unit *du;
   1163 	struct drm_crtc *crtc;
   1164 	int ret = 0;
   1165 
   1166 
   1167 	mutex_lock(&dev->mode_config.mutex);
   1168 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
   1169 
   1170 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1171 			du = vmw_crtc_to_du(crtc);
   1172 			du->hotspot_x = arg->xhot;
   1173 			du->hotspot_y = arg->yhot;
   1174 		}
   1175 
   1176 		mutex_unlock(&dev->mode_config.mutex);
   1177 		return 0;
   1178 	}
   1179 
   1180 	crtc = drm_crtc_find(dev, arg->crtc_id);
   1181 	if (!crtc) {
   1182 		ret = -ENOENT;
   1183 		goto out;
   1184 	}
   1185 
   1186 	du = vmw_crtc_to_du(crtc);
   1187 
   1188 	du->hotspot_x = arg->xhot;
   1189 	du->hotspot_y = arg->yhot;
   1190 
   1191 out:
   1192 	mutex_unlock(&dev->mode_config.mutex);
   1193 
   1194 	return ret;
   1195 }
   1196 
   1197 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
   1198 			unsigned width, unsigned height, unsigned pitch,
   1199 			unsigned bpp, unsigned depth)
   1200 {
   1201 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1202 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
   1203 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1204 		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
   1205 			       SVGA_FIFO_PITCHLOCK);
   1206 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
   1207 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
   1208 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
   1209 
   1210 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
   1211 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
   1212 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
   1213 		return -EINVAL;
   1214 	}
   1215 
   1216 	return 0;
   1217 }
   1218 
   1219 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
   1220 {
   1221 	struct vmw_vga_topology_state *save;
   1222 	uint32_t i;
   1223 
   1224 	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
   1225 	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
   1226 	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
   1227 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1228 		vmw_priv->vga_pitchlock =
   1229 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
   1230 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1231 		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
   1232 							SVGA_FIFO_PITCHLOCK);
   1233 
   1234 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
   1235 		return 0;
   1236 
   1237 	vmw_priv->num_displays = vmw_read(vmw_priv,
   1238 					  SVGA_REG_NUM_GUEST_DISPLAYS);
   1239 
   1240 	if (vmw_priv->num_displays == 0)
   1241 		vmw_priv->num_displays = 1;
   1242 
   1243 	for (i = 0; i < vmw_priv->num_displays; ++i) {
   1244 		save = &vmw_priv->vga_save[i];
   1245 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
   1246 		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
   1247 		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
   1248 		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
   1249 		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
   1250 		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
   1251 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
   1252 		if (i == 0 && vmw_priv->num_displays == 1 &&
   1253 		    save->width == 0 && save->height == 0) {
   1254 
   1255 			/*
   1256 			 * It should be fairly safe to assume that these
   1257 			 * values are uninitialized.
   1258 			 */
   1259 
   1260 			save->width = vmw_priv->vga_width - save->pos_x;
   1261 			save->height = vmw_priv->vga_height - save->pos_y;
   1262 		}
   1263 	}
   1264 
   1265 	return 0;
   1266 }
   1267 
   1268 int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
   1269 {
   1270 	struct vmw_vga_topology_state *save;
   1271 	uint32_t i;
   1272 
   1273 	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
   1274 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
   1275 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
   1276 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1277 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
   1278 			  vmw_priv->vga_pitchlock);
   1279 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1280 		vmw_mmio_write(vmw_priv->vga_pitchlock,
   1281 			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
   1282 
   1283 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
   1284 		return 0;
   1285 
   1286 	for (i = 0; i < vmw_priv->num_displays; ++i) {
   1287 		save = &vmw_priv->vga_save[i];
   1288 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
   1289 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
   1290 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
   1291 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
   1292 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
   1293 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
   1294 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
   1295 	}
   1296 
   1297 	return 0;
   1298 }
   1299 
   1300 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
   1301 				uint32_t pitch,
   1302 				uint32_t height)
   1303 {
   1304 	return ((u64) pitch * (u64) height) < (u64)
   1305 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
   1306 		 dev_priv->prim_bb_mem : dev_priv->vram_size);
   1307 }
   1308 
   1309 
   1310 /**
   1311  * Function called by DRM code called with vbl_lock held.
   1312  */
   1313 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
   1314 {
   1315 	return 0;
   1316 }
   1317 
   1318 /**
   1319  * Function called by DRM code called with vbl_lock held.
   1320  */
   1321 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
   1322 {
   1323 	return -ENOSYS;
   1324 }
   1325 
   1326 /**
   1327  * Function called by DRM code called with vbl_lock held.
   1328  */
   1329 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
   1330 {
   1331 }
   1332 
   1333 
   1334 /*
   1335  * Small shared kms functions.
   1336  */
   1337 
   1338 static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
   1339 			 struct drm_vmw_rect *rects)
   1340 {
   1341 	struct drm_device *dev = dev_priv->dev;
   1342 	struct vmw_display_unit *du;
   1343 	struct drm_connector *con;
   1344 
   1345 	mutex_lock(&dev->mode_config.mutex);
   1346 
   1347 #if 0
   1348 	{
   1349 		unsigned int i;
   1350 
   1351 		DRM_INFO("%s: new layout ", __func__);
   1352 		for (i = 0; i < num; i++)
   1353 			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
   1354 				 rects[i].w, rects[i].h);
   1355 		DRM_INFO("\n");
   1356 	}
   1357 #endif
   1358 
   1359 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
   1360 		du = vmw_connector_to_du(con);
   1361 		if (num > du->unit) {
   1362 			du->pref_width = rects[du->unit].w;
   1363 			du->pref_height = rects[du->unit].h;
   1364 			du->pref_active = true;
   1365 			du->gui_x = rects[du->unit].x;
   1366 			du->gui_y = rects[du->unit].y;
   1367 		} else {
   1368 			du->pref_width = 800;
   1369 			du->pref_height = 600;
   1370 			du->pref_active = false;
   1371 		}
   1372 		con->status = vmw_du_connector_detect(con, true);
   1373 	}
   1374 
   1375 	mutex_unlock(&dev->mode_config.mutex);
   1376 
   1377 	return 0;
   1378 }
   1379 
   1380 void vmw_du_crtc_save(struct drm_crtc *crtc)
   1381 {
   1382 }
   1383 
   1384 void vmw_du_crtc_restore(struct drm_crtc *crtc)
   1385 {
   1386 }
   1387 
   1388 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
   1389 			   u16 *r, u16 *g, u16 *b,
   1390 			   uint32_t start, uint32_t size)
   1391 {
   1392 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
   1393 	int i;
   1394 
   1395 	for (i = 0; i < size; i++) {
   1396 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
   1397 			  r[i], g[i], b[i]);
   1398 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
   1399 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
   1400 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
   1401 	}
   1402 }
   1403 
   1404 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
   1405 {
   1406 	return 0;
   1407 }
   1408 
   1409 void vmw_du_connector_save(struct drm_connector *connector)
   1410 {
   1411 }
   1412 
   1413 void vmw_du_connector_restore(struct drm_connector *connector)
   1414 {
   1415 }
   1416 
   1417 enum drm_connector_status
   1418 vmw_du_connector_detect(struct drm_connector *connector, bool force)
   1419 {
   1420 	uint32_t num_displays;
   1421 	struct drm_device *dev = connector->dev;
   1422 	struct vmw_private *dev_priv = vmw_priv(dev);
   1423 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
   1424 
   1425 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
   1426 
   1427 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
   1428 		 du->pref_active) ?
   1429 		connector_status_connected : connector_status_disconnected);
   1430 }
   1431 
   1432 static struct drm_display_mode vmw_kms_connector_builtin[] = {
   1433 	/* 640x480@60Hz */
   1434 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
   1435 		   752, 800, 0, 480, 489, 492, 525, 0,
   1436 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
   1437 	/* 800x600@60Hz */
   1438 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
   1439 		   968, 1056, 0, 600, 601, 605, 628, 0,
   1440 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1441 	/* 1024x768@60Hz */
   1442 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
   1443 		   1184, 1344, 0, 768, 771, 777, 806, 0,
   1444 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
   1445 	/* 1152x864@75Hz */
   1446 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
   1447 		   1344, 1600, 0, 864, 865, 868, 900, 0,
   1448 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1449 	/* 1280x768@60Hz */
   1450 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
   1451 		   1472, 1664, 0, 768, 771, 778, 798, 0,
   1452 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1453 	/* 1280x800@60Hz */
   1454 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
   1455 		   1480, 1680, 0, 800, 803, 809, 831, 0,
   1456 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
   1457 	/* 1280x960@60Hz */
   1458 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
   1459 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
   1460 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1461 	/* 1280x1024@60Hz */
   1462 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
   1463 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
   1464 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1465 	/* 1360x768@60Hz */
   1466 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
   1467 		   1536, 1792, 0, 768, 771, 777, 795, 0,
   1468 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1469 	/* 1440x1050@60Hz */
   1470 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
   1471 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
   1472 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1473 	/* 1440x900@60Hz */
   1474 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
   1475 		   1672, 1904, 0, 900, 903, 909, 934, 0,
   1476 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1477 	/* 1600x1200@60Hz */
   1478 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
   1479 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
   1480 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1481 	/* 1680x1050@60Hz */
   1482 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
   1483 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
   1484 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1485 	/* 1792x1344@60Hz */
   1486 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
   1487 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
   1488 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1489 	/* 1853x1392@60Hz */
   1490 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
   1491 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
   1492 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1493 	/* 1920x1200@60Hz */
   1494 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
   1495 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
   1496 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1497 	/* 1920x1440@60Hz */
   1498 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
   1499 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
   1500 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1501 	/* 2560x1600@60Hz */
   1502 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
   1503 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
   1504 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   1505 	/* Terminate */
   1506 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
   1507 };
   1508 
   1509 /**
   1510  * vmw_guess_mode_timing - Provide fake timings for a
   1511  * 60Hz vrefresh mode.
   1512  *
   1513  * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
   1514  * members filled in.
   1515  */
   1516 void vmw_guess_mode_timing(struct drm_display_mode *mode)
   1517 {
   1518 	mode->hsync_start = mode->hdisplay + 50;
   1519 	mode->hsync_end = mode->hsync_start + 50;
   1520 	mode->htotal = mode->hsync_end + 50;
   1521 
   1522 	mode->vsync_start = mode->vdisplay + 50;
   1523 	mode->vsync_end = mode->vsync_start + 50;
   1524 	mode->vtotal = mode->vsync_end + 50;
   1525 
   1526 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
   1527 	mode->vrefresh = drm_mode_vrefresh(mode);
   1528 }
   1529 
   1530 
   1531 int vmw_du_connector_fill_modes(struct drm_connector *connector,
   1532 				uint32_t max_width, uint32_t max_height)
   1533 {
   1534 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
   1535 	struct drm_device *dev = connector->dev;
   1536 	struct vmw_private *dev_priv = vmw_priv(dev);
   1537 	struct drm_display_mode *mode = NULL;
   1538 	struct drm_display_mode *bmode;
   1539 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
   1540 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
   1541 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1542 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
   1543 	};
   1544 	int i;
   1545 	u32 assumed_bpp = 4;
   1546 
   1547 	if (dev_priv->assume_16bpp)
   1548 		assumed_bpp = 2;
   1549 
   1550 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
   1551 		max_width  = min(max_width,  dev_priv->stdu_max_width);
   1552 		max_height = min(max_height, dev_priv->stdu_max_height);
   1553 	}
   1554 
   1555 	/* Add preferred mode */
   1556 	mode = drm_mode_duplicate(dev, &prefmode);
   1557 	if (!mode)
   1558 		return 0;
   1559 	mode->hdisplay = du->pref_width;
   1560 	mode->vdisplay = du->pref_height;
   1561 	vmw_guess_mode_timing(mode);
   1562 
   1563 	if (vmw_kms_validate_mode_vram(dev_priv,
   1564 					mode->hdisplay * assumed_bpp,
   1565 					mode->vdisplay)) {
   1566 		drm_mode_probed_add(connector, mode);
   1567 	} else {
   1568 		drm_mode_destroy(dev, mode);
   1569 		mode = NULL;
   1570 	}
   1571 
   1572 	if (du->pref_mode) {
   1573 		list_del_init(&du->pref_mode->head);
   1574 		drm_mode_destroy(dev, du->pref_mode);
   1575 	}
   1576 
   1577 	/* mode might be null here, this is intended */
   1578 	du->pref_mode = mode;
   1579 
   1580 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
   1581 		bmode = &vmw_kms_connector_builtin[i];
   1582 		if (bmode->hdisplay > max_width ||
   1583 		    bmode->vdisplay > max_height)
   1584 			continue;
   1585 
   1586 		if (!vmw_kms_validate_mode_vram(dev_priv,
   1587 						bmode->hdisplay * assumed_bpp,
   1588 						bmode->vdisplay))
   1589 			continue;
   1590 
   1591 		mode = drm_mode_duplicate(dev, bmode);
   1592 		if (!mode)
   1593 			return 0;
   1594 		mode->vrefresh = drm_mode_vrefresh(mode);
   1595 
   1596 		drm_mode_probed_add(connector, mode);
   1597 	}
   1598 
   1599 	drm_mode_connector_list_update(connector, true);
   1600 	/* Move the prefered mode first, help apps pick the right mode. */
   1601 	drm_mode_sort(&connector->modes);
   1602 
   1603 	return 1;
   1604 }
   1605 
   1606 int vmw_du_connector_set_property(struct drm_connector *connector,
   1607 				  struct drm_property *property,
   1608 				  uint64_t val)
   1609 {
   1610 	return 0;
   1611 }
   1612 
   1613 
   1614 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
   1615 				struct drm_file *file_priv)
   1616 {
   1617 	struct vmw_private *dev_priv = vmw_priv(dev);
   1618 	struct drm_vmw_update_layout_arg *arg =
   1619 		(struct drm_vmw_update_layout_arg *)data;
   1620 	void __user *user_rects;
   1621 	struct drm_vmw_rect *rects;
   1622 	unsigned rects_size;
   1623 	int ret;
   1624 	int i;
   1625 	u64 total_pixels = 0;
   1626 	struct drm_mode_config *mode_config = &dev->mode_config;
   1627 	struct drm_vmw_rect bounding_box = {0};
   1628 
   1629 	if (!arg->num_outputs) {
   1630 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
   1631 		vmw_du_update_layout(dev_priv, 1, &def_rect);
   1632 		return 0;
   1633 	}
   1634 
   1635 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
   1636 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
   1637 			GFP_KERNEL);
   1638 	if (unlikely(!rects))
   1639 		return -ENOMEM;
   1640 
   1641 	user_rects = (void __user *)(unsigned long)arg->rects;
   1642 	ret = copy_from_user(rects, user_rects, rects_size);
   1643 	if (unlikely(ret != 0)) {
   1644 		DRM_ERROR("Failed to get rects.\n");
   1645 		ret = -EFAULT;
   1646 		goto out_free;
   1647 	}
   1648 
   1649 	for (i = 0; i < arg->num_outputs; ++i) {
   1650 		if (rects[i].x < 0 ||
   1651 		    rects[i].y < 0 ||
   1652 		    rects[i].x + rects[i].w > mode_config->max_width ||
   1653 		    rects[i].y + rects[i].h > mode_config->max_height) {
   1654 			DRM_ERROR("Invalid GUI layout.\n");
   1655 			ret = -EINVAL;
   1656 			goto out_free;
   1657 		}
   1658 
   1659 		/*
   1660 		 * bounding_box.w and bunding_box.h are used as
   1661 		 * lower-right coordinates
   1662 		 */
   1663 		if (rects[i].x + rects[i].w > bounding_box.w)
   1664 			bounding_box.w = rects[i].x + rects[i].w;
   1665 
   1666 		if (rects[i].y + rects[i].h > bounding_box.h)
   1667 			bounding_box.h = rects[i].y + rects[i].h;
   1668 
   1669 		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
   1670 	}
   1671 
   1672 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
   1673 		/*
   1674 		 * For Screen Targets, the limits for a toplogy are:
   1675 		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
   1676 		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
   1677 		 */
   1678 		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
   1679 		u64 pixel_mem = total_pixels * 4;
   1680 
   1681 		if (bb_mem > dev_priv->prim_bb_mem) {
   1682 			DRM_ERROR("Topology is beyond supported limits.\n");
   1683 			ret = -EINVAL;
   1684 			goto out_free;
   1685 		}
   1686 
   1687 		if (pixel_mem > dev_priv->prim_bb_mem) {
   1688 			DRM_ERROR("Combined output size too large\n");
   1689 			ret = -EINVAL;
   1690 			goto out_free;
   1691 		}
   1692 	}
   1693 
   1694 	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
   1695 
   1696 out_free:
   1697 	kfree(rects);
   1698 	return ret;
   1699 }
   1700 
   1701 /**
   1702  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
   1703  * on a set of cliprects and a set of display units.
   1704  *
   1705  * @dev_priv: Pointer to a device private structure.
   1706  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
   1707  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
   1708  * Cliprects are given in framebuffer coordinates.
   1709  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
   1710  * be NULL. Cliprects are given in source coordinates.
   1711  * @dest_x: X coordinate offset for the crtc / destination clip rects.
   1712  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
   1713  * @num_clips: Number of cliprects in the @clips or @vclips array.
   1714  * @increment: Integer with which to increment the clip counter when looping.
   1715  * Used to skip a predetermined number of clip rects.
   1716  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
   1717  */
   1718 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
   1719 			 struct vmw_framebuffer *framebuffer,
   1720 			 const struct drm_clip_rect *clips,
   1721 			 const struct drm_vmw_rect *vclips,
   1722 			 s32 dest_x, s32 dest_y,
   1723 			 int num_clips,
   1724 			 int increment,
   1725 			 struct vmw_kms_dirty *dirty)
   1726 {
   1727 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
   1728 	struct drm_crtc *crtc;
   1729 	u32 num_units = 0;
   1730 	u32 i, k;
   1731 
   1732 	dirty->dev_priv = dev_priv;
   1733 
   1734 	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
   1735 		if (crtc->primary->fb != &framebuffer->base)
   1736 			continue;
   1737 		units[num_units++] = vmw_crtc_to_du(crtc);
   1738 	}
   1739 
   1740 	for (k = 0; k < num_units; k++) {
   1741 		struct vmw_display_unit *unit = units[k];
   1742 		s32 crtc_x = unit->crtc.x;
   1743 		s32 crtc_y = unit->crtc.y;
   1744 		s32 crtc_width = unit->crtc.mode.hdisplay;
   1745 		s32 crtc_height = unit->crtc.mode.vdisplay;
   1746 		const struct drm_clip_rect *clips_ptr = clips;
   1747 		const struct drm_vmw_rect *vclips_ptr = vclips;
   1748 
   1749 		dirty->unit = unit;
   1750 		if (dirty->fifo_reserve_size > 0) {
   1751 			dirty->cmd = vmw_fifo_reserve(dev_priv,
   1752 						      dirty->fifo_reserve_size);
   1753 			if (!dirty->cmd) {
   1754 				DRM_ERROR("Couldn't reserve fifo space "
   1755 					  "for dirty blits.\n");
   1756 				return -ENOMEM;
   1757 			}
   1758 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
   1759 		}
   1760 		dirty->num_hits = 0;
   1761 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
   1762 		       vclips_ptr += increment) {
   1763 			s32 clip_left;
   1764 			s32 clip_top;
   1765 
   1766 			/*
   1767 			 * Select clip array type. Note that integer type
   1768 			 * in @clips is unsigned short, whereas in @vclips
   1769 			 * it's 32-bit.
   1770 			 */
   1771 			if (clips) {
   1772 				dirty->fb_x = (s32) clips_ptr->x1;
   1773 				dirty->fb_y = (s32) clips_ptr->y1;
   1774 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
   1775 					crtc_x;
   1776 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
   1777 					crtc_y;
   1778 			} else {
   1779 				dirty->fb_x = vclips_ptr->x;
   1780 				dirty->fb_y = vclips_ptr->y;
   1781 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
   1782 					dest_x - crtc_x;
   1783 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
   1784 					dest_y - crtc_y;
   1785 			}
   1786 
   1787 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
   1788 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
   1789 
   1790 			/* Skip this clip if it's outside the crtc region */
   1791 			if (dirty->unit_x1 >= crtc_width ||
   1792 			    dirty->unit_y1 >= crtc_height ||
   1793 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
   1794 				continue;
   1795 
   1796 			/* Clip right and bottom to crtc limits */
   1797 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
   1798 					       crtc_width);
   1799 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
   1800 					       crtc_height);
   1801 
   1802 			/* Clip left and top to crtc limits */
   1803 			clip_left = min_t(s32, dirty->unit_x1, 0);
   1804 			clip_top = min_t(s32, dirty->unit_y1, 0);
   1805 			dirty->unit_x1 -= clip_left;
   1806 			dirty->unit_y1 -= clip_top;
   1807 			dirty->fb_x -= clip_left;
   1808 			dirty->fb_y -= clip_top;
   1809 
   1810 			dirty->clip(dirty);
   1811 		}
   1812 
   1813 		dirty->fifo_commit(dirty);
   1814 	}
   1815 
   1816 	return 0;
   1817 }
   1818 
   1819 /**
   1820  * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
   1821  * command submission.
   1822  *
   1823  * @dev_priv. Pointer to a device private structure.
   1824  * @buf: The buffer object
   1825  * @interruptible: Whether to perform waits as interruptible.
   1826  * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
   1827  * The buffer will be validated as a GMR. Already pinned buffers will not be
   1828  * validated.
   1829  *
   1830  * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
   1831  * interrupted by a signal.
   1832  */
   1833 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
   1834 				  struct vmw_dma_buffer *buf,
   1835 				  bool interruptible,
   1836 				  bool validate_as_mob)
   1837 {
   1838 	struct ttm_buffer_object *bo = &buf->base;
   1839 	int ret;
   1840 
   1841 	ttm_bo_reserve(bo, false, false, interruptible, NULL);
   1842 	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
   1843 					 validate_as_mob);
   1844 	if (ret)
   1845 		ttm_bo_unreserve(bo);
   1846 
   1847 	return ret;
   1848 }
   1849 
   1850 /**
   1851  * vmw_kms_helper_buffer_revert - Undo the actions of
   1852  * vmw_kms_helper_buffer_prepare.
   1853  *
   1854  * @res: Pointer to the buffer object.
   1855  *
   1856  * Helper to be used if an error forces the caller to undo the actions of
   1857  * vmw_kms_helper_buffer_prepare.
   1858  */
   1859 void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
   1860 {
   1861 	if (buf)
   1862 		ttm_bo_unreserve(&buf->base);
   1863 }
   1864 
   1865 /**
   1866  * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
   1867  * kms command submission.
   1868  *
   1869  * @dev_priv: Pointer to a device private structure.
   1870  * @file_priv: Pointer to a struct drm_file representing the caller's
   1871  * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
   1872  * if non-NULL, @user_fence_rep must be non-NULL.
   1873  * @buf: The buffer object.
   1874  * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
   1875  * ref-counted fence pointer is returned here.
   1876  * @user_fence_rep: Optional pointer to a user-space provided struct
   1877  * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
   1878  * function copies fence data to user-space in a fail-safe manner.
   1879  */
   1880 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
   1881 				  struct drm_file *file_priv,
   1882 				  struct vmw_dma_buffer *buf,
   1883 				  struct vmw_fence_obj **out_fence,
   1884 				  struct drm_vmw_fence_rep __user *
   1885 				  user_fence_rep)
   1886 {
   1887 	struct vmw_fence_obj *fence;
   1888 	uint32_t handle;
   1889 	int ret;
   1890 
   1891 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
   1892 					 file_priv ? &handle : NULL);
   1893 	if (buf)
   1894 		vmw_fence_single_bo(&buf->base, fence);
   1895 	if (file_priv)
   1896 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
   1897 					    ret, user_fence_rep, fence,
   1898 					    handle);
   1899 	if (out_fence)
   1900 		*out_fence = fence;
   1901 	else
   1902 		vmw_fence_obj_unreference(&fence);
   1903 
   1904 	vmw_kms_helper_buffer_revert(buf);
   1905 }
   1906 
   1907 
   1908 /**
   1909  * vmw_kms_helper_resource_revert - Undo the actions of
   1910  * vmw_kms_helper_resource_prepare.
   1911  *
   1912  * @res: Pointer to the resource. Typically a surface.
   1913  *
   1914  * Helper to be used if an error forces the caller to undo the actions of
   1915  * vmw_kms_helper_resource_prepare.
   1916  */
   1917 void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
   1918 {
   1919 	struct vmw_resource *res = ctx->res;
   1920 
   1921 	vmw_kms_helper_buffer_revert(ctx->buf);
   1922 	vmw_dmabuf_unreference(&ctx->buf);
   1923 	vmw_resource_unreserve(res, false, NULL, 0);
   1924 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
   1925 }
   1926 
   1927 /**
   1928  * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
   1929  * command submission.
   1930  *
   1931  * @res: Pointer to the resource. Typically a surface.
   1932  * @interruptible: Whether to perform waits as interruptible.
   1933  *
   1934  * Reserves and validates also the backup buffer if a guest-backed resource.
   1935  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
   1936  * interrupted by a signal.
   1937  */
   1938 int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
   1939 				    bool interruptible,
   1940 				    struct vmw_validation_ctx *ctx)
   1941 {
   1942 	int ret = 0;
   1943 
   1944 	ctx->buf = NULL;
   1945 	ctx->res = res;
   1946 
   1947 	if (interruptible)
   1948 		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
   1949 	else
   1950 		mutex_lock(&res->dev_priv->cmdbuf_mutex);
   1951 
   1952 	if (unlikely(ret != 0))
   1953 		return -ERESTARTSYS;
   1954 
   1955 	ret = vmw_resource_reserve(res, interruptible, false);
   1956 	if (ret)
   1957 		goto out_unlock;
   1958 
   1959 	if (res->backup) {
   1960 		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
   1961 						    interruptible,
   1962 						    res->dev_priv->has_mob);
   1963 		if (ret)
   1964 			goto out_unreserve;
   1965 
   1966 		ctx->buf = vmw_dmabuf_reference(res->backup);
   1967 	}
   1968 	ret = vmw_resource_validate(res);
   1969 	if (ret)
   1970 		goto out_revert;
   1971 	return 0;
   1972 
   1973 out_revert:
   1974 	vmw_kms_helper_buffer_revert(ctx->buf);
   1975 out_unreserve:
   1976 	vmw_resource_unreserve(res, false, NULL, 0);
   1977 out_unlock:
   1978 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
   1979 	return ret;
   1980 }
   1981 
   1982 /**
   1983  * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
   1984  * kms command submission.
   1985  *
   1986  * @res: Pointer to the resource. Typically a surface.
   1987  * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
   1988  * ref-counted fence pointer is returned here.
   1989  */
   1990 void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
   1991 				    struct vmw_fence_obj **out_fence)
   1992 {
   1993 	struct vmw_resource *res = ctx->res;
   1994 
   1995 	if (ctx->buf || out_fence)
   1996 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
   1997 					     out_fence, NULL);
   1998 
   1999 	vmw_dmabuf_unreference(&ctx->buf);
   2000 	vmw_resource_unreserve(res, false, NULL, 0);
   2001 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
   2002 }
   2003 
   2004 /**
   2005  * vmw_kms_update_proxy - Helper function to update a proxy surface from
   2006  * its backing MOB.
   2007  *
   2008  * @res: Pointer to the surface resource
   2009  * @clips: Clip rects in framebuffer (surface) space.
   2010  * @num_clips: Number of clips in @clips.
   2011  * @increment: Integer with which to increment the clip counter when looping.
   2012  * Used to skip a predetermined number of clip rects.
   2013  *
   2014  * This function makes sure the proxy surface is updated from its backing MOB
   2015  * using the region given by @clips. The surface resource @res and its backing
   2016  * MOB needs to be reserved and validated on call.
   2017  */
   2018 int vmw_kms_update_proxy(struct vmw_resource *res,
   2019 			 const struct drm_clip_rect *clips,
   2020 			 unsigned num_clips,
   2021 			 int increment)
   2022 {
   2023 	struct vmw_private *dev_priv = res->dev_priv;
   2024 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
   2025 	struct {
   2026 		SVGA3dCmdHeader header;
   2027 		SVGA3dCmdUpdateGBImage body;
   2028 	} *cmd;
   2029 	SVGA3dBox *box;
   2030 	size_t copy_size = 0;
   2031 	int i;
   2032 
   2033 	if (!clips)
   2034 		return 0;
   2035 
   2036 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
   2037 	if (!cmd) {
   2038 		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
   2039 			  "update.\n");
   2040 		return -ENOMEM;
   2041 	}
   2042 
   2043 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
   2044 		box = &cmd->body.box;
   2045 
   2046 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
   2047 		cmd->header.size = sizeof(cmd->body);
   2048 		cmd->body.image.sid = res->id;
   2049 		cmd->body.image.face = 0;
   2050 		cmd->body.image.mipmap = 0;
   2051 
   2052 		if (clips->x1 > size->width || clips->x2 > size->width ||
   2053 		    clips->y1 > size->height || clips->y2 > size->height) {
   2054 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
   2055 			return -EINVAL;
   2056 		}
   2057 
   2058 		box->x = clips->x1;
   2059 		box->y = clips->y1;
   2060 		box->z = 0;
   2061 		box->w = clips->x2 - clips->x1;
   2062 		box->h = clips->y2 - clips->y1;
   2063 		box->d = 1;
   2064 
   2065 		copy_size += sizeof(*cmd);
   2066 	}
   2067 
   2068 	vmw_fifo_commit(dev_priv, copy_size);
   2069 
   2070 	return 0;
   2071 }
   2072 
   2073 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
   2074 			    unsigned unit,
   2075 			    u32 max_width,
   2076 			    u32 max_height,
   2077 			    struct drm_connector **p_con,
   2078 			    struct drm_crtc **p_crtc,
   2079 			    struct drm_display_mode **p_mode)
   2080 {
   2081 	struct drm_connector *con;
   2082 	struct vmw_display_unit *du;
   2083 	struct drm_display_mode *mode;
   2084 	int i = 0;
   2085 
   2086 	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
   2087 			    head) {
   2088 		if (i == unit)
   2089 			break;
   2090 
   2091 		++i;
   2092 	}
   2093 
   2094 	if (i != unit) {
   2095 		DRM_ERROR("Could not find initial display unit.\n");
   2096 		return -EINVAL;
   2097 	}
   2098 
   2099 	if (list_empty(&con->modes))
   2100 		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
   2101 
   2102 	if (list_empty(&con->modes)) {
   2103 		DRM_ERROR("Could not find initial display mode.\n");
   2104 		return -EINVAL;
   2105 	}
   2106 
   2107 	du = vmw_connector_to_du(con);
   2108 	*p_con = con;
   2109 	*p_crtc = &du->crtc;
   2110 
   2111 	list_for_each_entry(mode, &con->modes, head) {
   2112 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
   2113 			break;
   2114 	}
   2115 
   2116 	if (mode->type & DRM_MODE_TYPE_PREFERRED)
   2117 		*p_mode = mode;
   2118 	else {
   2119 		WARN_ONCE(true, "Could not find initial preferred mode.\n");
   2120 		*p_mode = list_first_entry(&con->modes,
   2121 					   struct drm_display_mode,
   2122 					   head);
   2123 	}
   2124 
   2125 	return 0;
   2126 }
   2127