Home | History | Annotate | Line # | Download | only in vmwgfx
      1 /*	$NetBSD: vmwgfx_kms.c,v 1.7 2021/12/18 23:45:45 riastradh Exp $	*/
      2 
      3 // SPDX-License-Identifier: GPL-2.0 OR MIT
      4 /**************************************************************************
      5  *
      6  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_kms.c,v 1.7 2021/12/18 23:45:45 riastradh Exp $");
     32 
     33 #include <drm/drm_atomic.h>
     34 #include <drm/drm_atomic_helper.h>
     35 #include <drm/drm_damage_helper.h>
     36 #include <drm/drm_fourcc.h>
     37 #include <drm/drm_plane_helper.h>
     38 #include <drm/drm_rect.h>
     39 #include <drm/drm_sysfs.h>
     40 #include <drm/drm_vblank.h>
     41 
     42 #include "vmwgfx_kms.h"
     43 
     44 /* Might need a hrtimer here? */
     45 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
     46 
     47 void vmw_du_cleanup(struct vmw_display_unit *du)
     48 {
     49 	drm_plane_cleanup(&du->primary);
     50 	drm_plane_cleanup(&du->cursor);
     51 
     52 	drm_connector_unregister(&du->connector);
     53 	drm_crtc_cleanup(&du->crtc);
     54 	drm_encoder_cleanup(&du->encoder);
     55 	drm_connector_cleanup(&du->connector);
     56 }
     57 
     58 /*
     59  * Display Unit Cursor functions
     60  */
     61 
     62 static int vmw_cursor_update_image(struct vmw_private *dev_priv,
     63 				   u32 *image, u32 width, u32 height,
     64 				   u32 hotspotX, u32 hotspotY)
     65 {
     66 	struct {
     67 		u32 cmd;
     68 		SVGAFifoCmdDefineAlphaCursor cursor;
     69 	} *cmd;
     70 	u32 image_size = width * height * 4;
     71 	u32 cmd_size = sizeof(*cmd) + image_size;
     72 
     73 	if (!image)
     74 		return -EINVAL;
     75 
     76 	cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
     77 	if (unlikely(cmd == NULL))
     78 		return -ENOMEM;
     79 
     80 	memset(cmd, 0, sizeof(*cmd));
     81 
     82 	memcpy(&cmd[1], image, image_size);
     83 
     84 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
     85 	cmd->cursor.id = 0;
     86 	cmd->cursor.width = width;
     87 	cmd->cursor.height = height;
     88 	cmd->cursor.hotspotX = hotspotX;
     89 	cmd->cursor.hotspotY = hotspotY;
     90 
     91 	vmw_fifo_commit_flush(dev_priv, cmd_size);
     92 
     93 	return 0;
     94 }
     95 
     96 static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
     97 				struct vmw_buffer_object *bo,
     98 				u32 width, u32 height,
     99 				u32 hotspotX, u32 hotspotY)
    100 {
    101 	struct ttm_bo_kmap_obj map;
    102 	unsigned long kmap_offset;
    103 	unsigned long kmap_num;
    104 	void *virtual;
    105 	bool dummy;
    106 	int ret;
    107 
    108 	kmap_offset = 0;
    109 	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
    110 
    111 	ret = ttm_bo_reserve(&bo->base, true, false, NULL);
    112 	if (unlikely(ret != 0)) {
    113 		DRM_ERROR("reserve failed\n");
    114 		return -EINVAL;
    115 	}
    116 
    117 	ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
    118 	if (unlikely(ret != 0))
    119 		goto err_unreserve;
    120 
    121 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
    122 	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
    123 				      hotspotX, hotspotY);
    124 
    125 	ttm_bo_kunmap(&map);
    126 err_unreserve:
    127 	ttm_bo_unreserve(&bo->base);
    128 
    129 	return ret;
    130 }
    131 
    132 
    133 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
    134 				       bool show, int x, int y)
    135 {
    136 	u32 *fifo_mem = dev_priv->mmio_virt;
    137 	uint32_t count;
    138 
    139 	spin_lock(&dev_priv->cursor_lock);
    140 	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
    141 	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
    142 	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
    143 	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
    144 	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
    145 	spin_unlock(&dev_priv->cursor_lock);
    146 }
    147 
    148 
    149 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
    150 			  struct ttm_object_file *tfile,
    151 			  struct ttm_buffer_object *bo,
    152 			  SVGA3dCmdHeader *header)
    153 {
    154 	struct ttm_bo_kmap_obj map;
    155 	unsigned long kmap_offset;
    156 	unsigned long kmap_num;
    157 	SVGA3dCopyBox *box;
    158 	unsigned box_count;
    159 	void *virtual;
    160 	bool dummy;
    161 	struct vmw_dma_cmd {
    162 		SVGA3dCmdHeader header;
    163 		SVGA3dCmdSurfaceDMA dma;
    164 	} *cmd;
    165 	int i, ret;
    166 
    167 	cmd = container_of(header, struct vmw_dma_cmd, header);
    168 
    169 	/* No snooper installed */
    170 	if (!srf->snooper.image)
    171 		return;
    172 
    173 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
    174 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
    175 		return;
    176 	}
    177 
    178 	if (cmd->header.size < 64) {
    179 		DRM_ERROR("at least one full copy box must be given\n");
    180 		return;
    181 	}
    182 
    183 	box = (SVGA3dCopyBox *)&cmd[1];
    184 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
    185 			sizeof(SVGA3dCopyBox);
    186 
    187 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
    188 	    box->x != 0    || box->y != 0    || box->z != 0    ||
    189 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
    190 	    box->d != 1    || box_count != 1) {
    191 		/* TODO handle none page aligned offsets */
    192 		/* TODO handle more dst & src != 0 */
    193 		/* TODO handle more then one copy */
    194 		DRM_ERROR("Cant snoop dma request for cursor!\n");
    195 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
    196 			  box->srcx, box->srcy, box->srcz,
    197 			  box->x, box->y, box->z,
    198 			  box->w, box->h, box->d, box_count,
    199 			  cmd->dma.guest.ptr.offset);
    200 		return;
    201 	}
    202 
    203 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
    204 	kmap_num = (64*64*4) >> PAGE_SHIFT;
    205 
    206 	ret = ttm_bo_reserve(bo, true, false, NULL);
    207 	if (unlikely(ret != 0)) {
    208 		DRM_ERROR("reserve failed\n");
    209 		return;
    210 	}
    211 
    212 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
    213 	if (unlikely(ret != 0))
    214 		goto err_unreserve;
    215 
    216 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
    217 
    218 	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
    219 		memcpy(srf->snooper.image, virtual, 64*64*4);
    220 	} else {
    221 		/* Image is unsigned pointer. */
    222 		for (i = 0; i < box->h; i++)
    223 			memcpy(srf->snooper.image + i * 64,
    224 			       virtual + i * cmd->dma.guest.pitch,
    225 			       box->w * 4);
    226 	}
    227 
    228 	srf->snooper.age++;
    229 
    230 	ttm_bo_kunmap(&map);
    231 err_unreserve:
    232 	ttm_bo_unreserve(bo);
    233 }
    234 
    235 /**
    236  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
    237  *
    238  * @dev_priv: Pointer to the device private struct.
    239  *
    240  * Clears all legacy hotspots.
    241  */
    242 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
    243 {
    244 	struct drm_device *dev = dev_priv->dev;
    245 	struct vmw_display_unit *du;
    246 	struct drm_crtc *crtc;
    247 
    248 	drm_modeset_lock_all(dev);
    249 	drm_for_each_crtc(crtc, dev) {
    250 		du = vmw_crtc_to_du(crtc);
    251 
    252 		du->hotspot_x = 0;
    253 		du->hotspot_y = 0;
    254 	}
    255 	drm_modeset_unlock_all(dev);
    256 }
    257 
    258 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
    259 {
    260 	struct drm_device *dev = dev_priv->dev;
    261 	struct vmw_display_unit *du;
    262 	struct drm_crtc *crtc;
    263 
    264 	mutex_lock(&dev->mode_config.mutex);
    265 
    266 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
    267 		du = vmw_crtc_to_du(crtc);
    268 		if (!du->cursor_surface ||
    269 		    du->cursor_age == du->cursor_surface->snooper.age)
    270 			continue;
    271 
    272 		du->cursor_age = du->cursor_surface->snooper.age;
    273 		vmw_cursor_update_image(dev_priv,
    274 					du->cursor_surface->snooper.image,
    275 					64, 64,
    276 					du->hotspot_x + du->core_hotspot_x,
    277 					du->hotspot_y + du->core_hotspot_y);
    278 	}
    279 
    280 	mutex_unlock(&dev->mode_config.mutex);
    281 }
    282 
    283 
    284 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
    285 {
    286 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
    287 
    288 	drm_plane_cleanup(plane);
    289 }
    290 
    291 
    292 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
    293 {
    294 	drm_plane_cleanup(plane);
    295 
    296 	/* Planes are static in our case so we don't free it */
    297 }
    298 
    299 
    300 /**
    301  * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
    302  *
    303  * @vps: plane state associated with the display surface
    304  * @unreference: true if we also want to unreference the display.
    305  */
    306 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
    307 			     bool unreference)
    308 {
    309 	if (vps->surf) {
    310 		if (vps->pinned) {
    311 			vmw_resource_unpin(&vps->surf->res);
    312 			vps->pinned--;
    313 		}
    314 
    315 		if (unreference) {
    316 			if (vps->pinned)
    317 				DRM_ERROR("Surface still pinned\n");
    318 			vmw_surface_unreference(&vps->surf);
    319 		}
    320 	}
    321 }
    322 
    323 
    324 /**
    325  * vmw_du_plane_cleanup_fb - Unpins the cursor
    326  *
    327  * @plane:  display plane
    328  * @old_state: Contains the FB to clean up
    329  *
    330  * Unpins the framebuffer surface
    331  *
    332  * Returns 0 on success
    333  */
    334 void
    335 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
    336 			struct drm_plane_state *old_state)
    337 {
    338 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
    339 
    340 	vmw_du_plane_unpin_surf(vps, false);
    341 }
    342 
    343 
    344 /**
    345  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
    346  *
    347  * @plane:  display plane
    348  * @new_state: info on the new plane state, including the FB
    349  *
    350  * Returns 0 on success
    351  */
    352 int
    353 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
    354 			       struct drm_plane_state *new_state)
    355 {
    356 	struct drm_framebuffer *fb = new_state->fb;
    357 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
    358 
    359 
    360 	if (vps->surf)
    361 		vmw_surface_unreference(&vps->surf);
    362 
    363 	if (vps->bo)
    364 		vmw_bo_unreference(&vps->bo);
    365 
    366 	if (fb) {
    367 		if (vmw_framebuffer_to_vfb(fb)->bo) {
    368 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
    369 			vmw_bo_reference(vps->bo);
    370 		} else {
    371 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
    372 			vmw_surface_reference(vps->surf);
    373 		}
    374 	}
    375 
    376 	return 0;
    377 }
    378 
    379 
    380 void
    381 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
    382 				  struct drm_plane_state *old_state)
    383 {
    384 	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
    385 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
    386 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
    387 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
    388 	s32 hotspot_x, hotspot_y;
    389 	int ret = 0;
    390 
    391 
    392 	hotspot_x = du->hotspot_x;
    393 	hotspot_y = du->hotspot_y;
    394 
    395 	if (plane->state->fb) {
    396 		hotspot_x += plane->state->fb->hot_x;
    397 		hotspot_y += plane->state->fb->hot_y;
    398 	}
    399 
    400 	du->cursor_surface = vps->surf;
    401 	du->cursor_bo = vps->bo;
    402 
    403 	if (vps->surf) {
    404 		du->cursor_age = du->cursor_surface->snooper.age;
    405 
    406 		ret = vmw_cursor_update_image(dev_priv,
    407 					      vps->surf->snooper.image,
    408 					      64, 64, hotspot_x,
    409 					      hotspot_y);
    410 	} else if (vps->bo) {
    411 		ret = vmw_cursor_update_bo(dev_priv, vps->bo,
    412 					   plane->state->crtc_w,
    413 					   plane->state->crtc_h,
    414 					   hotspot_x, hotspot_y);
    415 	} else {
    416 		vmw_cursor_update_position(dev_priv, false, 0, 0);
    417 		return;
    418 	}
    419 
    420 	if (!ret) {
    421 		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
    422 		du->cursor_y = plane->state->crtc_y + du->set_gui_y;
    423 
    424 		vmw_cursor_update_position(dev_priv, true,
    425 					   du->cursor_x + hotspot_x,
    426 					   du->cursor_y + hotspot_y);
    427 
    428 		du->core_hotspot_x = hotspot_x - du->hotspot_x;
    429 		du->core_hotspot_y = hotspot_y - du->hotspot_y;
    430 	} else {
    431 		DRM_ERROR("Failed to update cursor image\n");
    432 	}
    433 }
    434 
    435 
    436 /**
    437  * vmw_du_primary_plane_atomic_check - check if the new state is okay
    438  *
    439  * @plane: display plane
    440  * @state: info on the new plane state, including the FB
    441  *
    442  * Check if the new state is settable given the current state.  Other
    443  * than what the atomic helper checks, we care about crtc fitting
    444  * the FB and maintaining one active framebuffer.
    445  *
    446  * Returns 0 on success
    447  */
    448 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
    449 				      struct drm_plane_state *state)
    450 {
    451 	struct drm_crtc_state *crtc_state = NULL;
    452 	struct drm_framebuffer *new_fb = state->fb;
    453 	int ret;
    454 
    455 	if (state->crtc)
    456 		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
    457 
    458 	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
    459 						  DRM_PLANE_HELPER_NO_SCALING,
    460 						  DRM_PLANE_HELPER_NO_SCALING,
    461 						  false, true);
    462 
    463 	if (!ret && new_fb) {
    464 		struct drm_crtc *crtc = state->crtc;
    465 		struct vmw_connector_state *vcs;
    466 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
    467 
    468 		vcs = vmw_connector_state_to_vcs(du->connector.state);
    469 	}
    470 
    471 
    472 	return ret;
    473 }
    474 
    475 
    476 /**
    477  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
    478  *
    479  * @plane: cursor plane
    480  * @state: info on the new plane state
    481  *
    482  * This is a chance to fail if the new cursor state does not fit
    483  * our requirements.
    484  *
    485  * Returns 0 on success
    486  */
    487 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
    488 				     struct drm_plane_state *new_state)
    489 {
    490 	int ret = 0;
    491 	struct drm_crtc_state *crtc_state = NULL;
    492 	struct vmw_surface *surface = NULL;
    493 	struct drm_framebuffer *fb = new_state->fb;
    494 
    495 	if (new_state->crtc)
    496 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
    497 							   new_state->crtc);
    498 
    499 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
    500 						  DRM_PLANE_HELPER_NO_SCALING,
    501 						  DRM_PLANE_HELPER_NO_SCALING,
    502 						  true, true);
    503 	if (ret)
    504 		return ret;
    505 
    506 	/* Turning off */
    507 	if (!fb)
    508 		return 0;
    509 
    510 	/* A lot of the code assumes this */
    511 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
    512 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
    513 			  new_state->crtc_w, new_state->crtc_h);
    514 		ret = -EINVAL;
    515 	}
    516 
    517 	if (!vmw_framebuffer_to_vfb(fb)->bo)
    518 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
    519 
    520 	if (surface && !surface->snooper.image) {
    521 		DRM_ERROR("surface not suitable for cursor\n");
    522 		ret = -EINVAL;
    523 	}
    524 
    525 	return ret;
    526 }
    527 
    528 
    529 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
    530 			     struct drm_crtc_state *new_state)
    531 {
    532 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
    533 	int connector_mask = drm_connector_mask(&du->connector);
    534 	bool has_primary = new_state->plane_mask &
    535 			   drm_plane_mask(crtc->primary);
    536 
    537 	/* We always want to have an active plane with an active CRTC */
    538 	if (has_primary != new_state->enable)
    539 		return -EINVAL;
    540 
    541 
    542 	if (new_state->connector_mask != connector_mask &&
    543 	    new_state->connector_mask != 0) {
    544 		DRM_ERROR("Invalid connectors configuration\n");
    545 		return -EINVAL;
    546 	}
    547 
    548 	/*
    549 	 * Our virtual device does not have a dot clock, so use the logical
    550 	 * clock value as the dot clock.
    551 	 */
    552 	if (new_state->mode.crtc_clock == 0)
    553 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
    554 
    555 	return 0;
    556 }
    557 
    558 
    559 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
    560 			      struct drm_crtc_state *old_crtc_state)
    561 {
    562 }
    563 
    564 
    565 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
    566 			      struct drm_crtc_state *old_crtc_state)
    567 {
    568 	struct drm_pending_vblank_event *event = crtc->state->event;
    569 
    570 	if (event) {
    571 		crtc->state->event = NULL;
    572 
    573 		spin_lock_irq(&crtc->dev->event_lock);
    574 		drm_crtc_send_vblank_event(crtc, event);
    575 		spin_unlock_irq(&crtc->dev->event_lock);
    576 	}
    577 }
    578 
    579 
    580 /**
    581  * vmw_du_crtc_duplicate_state - duplicate crtc state
    582  * @crtc: DRM crtc
    583  *
    584  * Allocates and returns a copy of the crtc state (both common and
    585  * vmw-specific) for the specified crtc.
    586  *
    587  * Returns: The newly allocated crtc state, or NULL on failure.
    588  */
    589 struct drm_crtc_state *
    590 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
    591 {
    592 	struct drm_crtc_state *state;
    593 	struct vmw_crtc_state *vcs;
    594 
    595 	if (WARN_ON(!crtc->state))
    596 		return NULL;
    597 
    598 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
    599 
    600 	if (!vcs)
    601 		return NULL;
    602 
    603 	state = &vcs->base;
    604 
    605 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
    606 
    607 	return state;
    608 }
    609 
    610 
    611 /**
    612  * vmw_du_crtc_reset - creates a blank vmw crtc state
    613  * @crtc: DRM crtc
    614  *
    615  * Resets the atomic state for @crtc by freeing the state pointer (which
    616  * might be NULL, e.g. at driver load time) and allocating a new empty state
    617  * object.
    618  */
    619 void vmw_du_crtc_reset(struct drm_crtc *crtc)
    620 {
    621 	struct vmw_crtc_state *vcs;
    622 
    623 
    624 	if (crtc->state) {
    625 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
    626 
    627 		kfree(vmw_crtc_state_to_vcs(crtc->state));
    628 	}
    629 
    630 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
    631 
    632 	if (!vcs) {
    633 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
    634 		return;
    635 	}
    636 
    637 	crtc->state = &vcs->base;
    638 	crtc->state->crtc = crtc;
    639 }
    640 
    641 
    642 /**
    643  * vmw_du_crtc_destroy_state - destroy crtc state
    644  * @crtc: DRM crtc
    645  * @state: state object to destroy
    646  *
    647  * Destroys the crtc state (both common and vmw-specific) for the
    648  * specified plane.
    649  */
    650 void
    651 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
    652 			  struct drm_crtc_state *state)
    653 {
    654 	drm_atomic_helper_crtc_destroy_state(crtc, state);
    655 }
    656 
    657 
    658 /**
    659  * vmw_du_plane_duplicate_state - duplicate plane state
    660  * @plane: drm plane
    661  *
    662  * Allocates and returns a copy of the plane state (both common and
    663  * vmw-specific) for the specified plane.
    664  *
    665  * Returns: The newly allocated plane state, or NULL on failure.
    666  */
    667 struct drm_plane_state *
    668 vmw_du_plane_duplicate_state(struct drm_plane *plane)
    669 {
    670 	struct drm_plane_state *state;
    671 	struct vmw_plane_state *vps;
    672 
    673 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
    674 
    675 	if (!vps)
    676 		return NULL;
    677 
    678 	vps->pinned = 0;
    679 	vps->cpp = 0;
    680 
    681 	/* Each ref counted resource needs to be acquired again */
    682 	if (vps->surf)
    683 		(void) vmw_surface_reference(vps->surf);
    684 
    685 	if (vps->bo)
    686 		(void) vmw_bo_reference(vps->bo);
    687 
    688 	state = &vps->base;
    689 
    690 	__drm_atomic_helper_plane_duplicate_state(plane, state);
    691 
    692 	return state;
    693 }
    694 
    695 
    696 /**
    697  * vmw_du_plane_reset - creates a blank vmw plane state
    698  * @plane: drm plane
    699  *
    700  * Resets the atomic state for @plane by freeing the state pointer (which might
    701  * be NULL, e.g. at driver load time) and allocating a new empty state object.
    702  */
    703 void vmw_du_plane_reset(struct drm_plane *plane)
    704 {
    705 	struct vmw_plane_state *vps;
    706 
    707 
    708 	if (plane->state)
    709 		vmw_du_plane_destroy_state(plane, plane->state);
    710 
    711 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
    712 
    713 	if (!vps) {
    714 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
    715 		return;
    716 	}
    717 
    718 	__drm_atomic_helper_plane_reset(plane, &vps->base);
    719 }
    720 
    721 
    722 /**
    723  * vmw_du_plane_destroy_state - destroy plane state
    724  * @plane: DRM plane
    725  * @state: state object to destroy
    726  *
    727  * Destroys the plane state (both common and vmw-specific) for the
    728  * specified plane.
    729  */
    730 void
    731 vmw_du_plane_destroy_state(struct drm_plane *plane,
    732 			   struct drm_plane_state *state)
    733 {
    734 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
    735 
    736 
    737 	/* Should have been freed by cleanup_fb */
    738 	if (vps->surf)
    739 		vmw_surface_unreference(&vps->surf);
    740 
    741 	if (vps->bo)
    742 		vmw_bo_unreference(&vps->bo);
    743 
    744 	drm_atomic_helper_plane_destroy_state(plane, state);
    745 }
    746 
    747 
    748 /**
    749  * vmw_du_connector_duplicate_state - duplicate connector state
    750  * @connector: DRM connector
    751  *
    752  * Allocates and returns a copy of the connector state (both common and
    753  * vmw-specific) for the specified connector.
    754  *
    755  * Returns: The newly allocated connector state, or NULL on failure.
    756  */
    757 struct drm_connector_state *
    758 vmw_du_connector_duplicate_state(struct drm_connector *connector)
    759 {
    760 	struct drm_connector_state *state;
    761 	struct vmw_connector_state *vcs;
    762 
    763 	if (WARN_ON(!connector->state))
    764 		return NULL;
    765 
    766 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
    767 
    768 	if (!vcs)
    769 		return NULL;
    770 
    771 	state = &vcs->base;
    772 
    773 	__drm_atomic_helper_connector_duplicate_state(connector, state);
    774 
    775 	return state;
    776 }
    777 
    778 
    779 /**
    780  * vmw_du_connector_reset - creates a blank vmw connector state
    781  * @connector: DRM connector
    782  *
    783  * Resets the atomic state for @connector by freeing the state pointer (which
    784  * might be NULL, e.g. at driver load time) and allocating a new empty state
    785  * object.
    786  */
    787 void vmw_du_connector_reset(struct drm_connector *connector)
    788 {
    789 	struct vmw_connector_state *vcs;
    790 
    791 
    792 	if (connector->state) {
    793 		__drm_atomic_helper_connector_destroy_state(connector->state);
    794 
    795 		kfree(vmw_connector_state_to_vcs(connector->state));
    796 	}
    797 
    798 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
    799 
    800 	if (!vcs) {
    801 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
    802 		return;
    803 	}
    804 
    805 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
    806 }
    807 
    808 
    809 /**
    810  * vmw_du_connector_destroy_state - destroy connector state
    811  * @connector: DRM connector
    812  * @state: state object to destroy
    813  *
    814  * Destroys the connector state (both common and vmw-specific) for the
    815  * specified plane.
    816  */
    817 void
    818 vmw_du_connector_destroy_state(struct drm_connector *connector,
    819 			  struct drm_connector_state *state)
    820 {
    821 	drm_atomic_helper_connector_destroy_state(connector, state);
    822 }
    823 /*
    824  * Generic framebuffer code
    825  */
    826 
    827 /*
    828  * Surface framebuffer code
    829  */
    830 
    831 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
    832 {
    833 	struct vmw_framebuffer_surface *vfbs =
    834 		vmw_framebuffer_to_vfbs(framebuffer);
    835 
    836 	drm_framebuffer_cleanup(framebuffer);
    837 	vmw_surface_unreference(&vfbs->surface);
    838 	if (vfbs->base.user_obj)
    839 		ttm_base_object_unref(&vfbs->base.user_obj);
    840 
    841 	kfree(vfbs);
    842 }
    843 
    844 /**
    845  * vmw_kms_readback - Perform a readback from the screen system to
    846  * a buffer-object backed framebuffer.
    847  *
    848  * @dev_priv: Pointer to the device private structure.
    849  * @file_priv: Pointer to a struct drm_file identifying the caller.
    850  * Must be set to NULL if @user_fence_rep is NULL.
    851  * @vfb: Pointer to the buffer-object backed framebuffer.
    852  * @user_fence_rep: User-space provided structure for fence information.
    853  * Must be set to non-NULL if @file_priv is non-NULL.
    854  * @vclips: Array of clip rects.
    855  * @num_clips: Number of clip rects in @vclips.
    856  *
    857  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
    858  * interrupted.
    859  */
    860 int vmw_kms_readback(struct vmw_private *dev_priv,
    861 		     struct drm_file *file_priv,
    862 		     struct vmw_framebuffer *vfb,
    863 		     struct drm_vmw_fence_rep __user *user_fence_rep,
    864 		     struct drm_vmw_rect *vclips,
    865 		     uint32_t num_clips)
    866 {
    867 	switch (dev_priv->active_display_unit) {
    868 	case vmw_du_screen_object:
    869 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
    870 					    user_fence_rep, vclips, num_clips,
    871 					    NULL);
    872 	case vmw_du_screen_target:
    873 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
    874 					user_fence_rep, NULL, vclips, num_clips,
    875 					1, false, true, NULL);
    876 	default:
    877 		WARN_ONCE(true,
    878 			  "Readback called with invalid display system.\n");
    879 }
    880 
    881 	return -ENOSYS;
    882 }
    883 
    884 
    885 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
    886 	.destroy = vmw_framebuffer_surface_destroy,
    887 	.dirty = drm_atomic_helper_dirtyfb,
    888 };
    889 
    890 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
    891 					   struct vmw_surface *surface,
    892 					   struct vmw_framebuffer **out,
    893 					   const struct drm_mode_fb_cmd2
    894 					   *mode_cmd,
    895 					   bool is_bo_proxy)
    896 
    897 {
    898 	struct drm_device *dev = dev_priv->dev;
    899 	struct vmw_framebuffer_surface *vfbs;
    900 	enum SVGA3dSurfaceFormat format;
    901 	int ret;
    902 	struct drm_format_name_buf format_name;
    903 
    904 	/* 3D is only supported on HWv8 and newer hosts */
    905 	if (dev_priv->active_display_unit == vmw_du_legacy)
    906 		return -ENOSYS;
    907 
    908 	/*
    909 	 * Sanity checks.
    910 	 */
    911 
    912 	/* Surface must be marked as a scanout. */
    913 	if (unlikely(!surface->scanout))
    914 		return -EINVAL;
    915 
    916 	if (unlikely(surface->mip_levels[0] != 1 ||
    917 		     surface->num_sizes != 1 ||
    918 		     surface->base_size.width < mode_cmd->width ||
    919 		     surface->base_size.height < mode_cmd->height ||
    920 		     surface->base_size.depth != 1)) {
    921 		DRM_ERROR("Incompatible surface dimensions "
    922 			  "for requested mode.\n");
    923 		return -EINVAL;
    924 	}
    925 
    926 	switch (mode_cmd->pixel_format) {
    927 	case DRM_FORMAT_ARGB8888:
    928 		format = SVGA3D_A8R8G8B8;
    929 		break;
    930 	case DRM_FORMAT_XRGB8888:
    931 		format = SVGA3D_X8R8G8B8;
    932 		break;
    933 	case DRM_FORMAT_RGB565:
    934 		format = SVGA3D_R5G6B5;
    935 		break;
    936 	case DRM_FORMAT_XRGB1555:
    937 		format = SVGA3D_A1R5G5B5;
    938 		break;
    939 	default:
    940 		DRM_ERROR("Invalid pixel format: %s\n",
    941 			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
    942 		return -EINVAL;
    943 	}
    944 
    945 	/*
    946 	 * For DX, surface format validation is done when surface->scanout
    947 	 * is set.
    948 	 */
    949 	if (!dev_priv->has_dx && format != surface->format) {
    950 		DRM_ERROR("Invalid surface format for requested mode.\n");
    951 		return -EINVAL;
    952 	}
    953 
    954 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
    955 	if (!vfbs) {
    956 		ret = -ENOMEM;
    957 		goto out_err1;
    958 	}
    959 
    960 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
    961 	vfbs->surface = vmw_surface_reference(surface);
    962 	vfbs->base.user_handle = mode_cmd->handles[0];
    963 	vfbs->is_bo_proxy = is_bo_proxy;
    964 
    965 	*out = &vfbs->base;
    966 
    967 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
    968 				   &vmw_framebuffer_surface_funcs);
    969 	if (ret)
    970 		goto out_err2;
    971 
    972 	return 0;
    973 
    974 out_err2:
    975 	vmw_surface_unreference(&surface);
    976 	kfree(vfbs);
    977 out_err1:
    978 	return ret;
    979 }
    980 
    981 /*
    982  * Buffer-object framebuffer code
    983  */
    984 
    985 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
    986 {
    987 	struct vmw_framebuffer_bo *vfbd =
    988 		vmw_framebuffer_to_vfbd(framebuffer);
    989 
    990 	drm_framebuffer_cleanup(framebuffer);
    991 	vmw_bo_unreference(&vfbd->buffer);
    992 	if (vfbd->base.user_obj)
    993 		ttm_base_object_unref(&vfbd->base.user_obj);
    994 
    995 	kfree(vfbd);
    996 }
    997 
    998 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
    999 				    struct drm_file *file_priv,
   1000 				    unsigned int flags, unsigned int color,
   1001 				    struct drm_clip_rect *clips,
   1002 				    unsigned int num_clips)
   1003 {
   1004 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
   1005 	struct vmw_framebuffer_bo *vfbd =
   1006 		vmw_framebuffer_to_vfbd(framebuffer);
   1007 	struct drm_clip_rect norect;
   1008 	int ret, increment = 1;
   1009 
   1010 	drm_modeset_lock_all(dev_priv->dev);
   1011 
   1012 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
   1013 	if (unlikely(ret != 0)) {
   1014 		drm_modeset_unlock_all(dev_priv->dev);
   1015 		return ret;
   1016 	}
   1017 
   1018 	if (!num_clips) {
   1019 		num_clips = 1;
   1020 		clips = &norect;
   1021 		norect.x1 = norect.y1 = 0;
   1022 		norect.x2 = framebuffer->width;
   1023 		norect.y2 = framebuffer->height;
   1024 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
   1025 		num_clips /= 2;
   1026 		increment = 2;
   1027 	}
   1028 
   1029 	switch (dev_priv->active_display_unit) {
   1030 	case vmw_du_legacy:
   1031 		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
   1032 					      clips, num_clips, increment);
   1033 		break;
   1034 	default:
   1035 		ret = -EINVAL;
   1036 		WARN_ONCE(true, "Dirty called with invalid display system.\n");
   1037 		break;
   1038 	}
   1039 
   1040 	vmw_fifo_flush(dev_priv, false);
   1041 	ttm_read_unlock(&dev_priv->reservation_sem);
   1042 
   1043 	drm_modeset_unlock_all(dev_priv->dev);
   1044 
   1045 	return ret;
   1046 }
   1047 
   1048 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
   1049 					struct drm_file *file_priv,
   1050 					unsigned int flags, unsigned int color,
   1051 					struct drm_clip_rect *clips,
   1052 					unsigned int num_clips)
   1053 {
   1054 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
   1055 
   1056 	if (dev_priv->active_display_unit == vmw_du_legacy)
   1057 		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
   1058 						color, clips, num_clips);
   1059 
   1060 	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
   1061 					 clips, num_clips);
   1062 }
   1063 
   1064 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
   1065 	.destroy = vmw_framebuffer_bo_destroy,
   1066 	.dirty = vmw_framebuffer_bo_dirty_ext,
   1067 };
   1068 
   1069 /**
   1070  * Pin the bofer in a location suitable for access by the
   1071  * display system.
   1072  */
   1073 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
   1074 {
   1075 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
   1076 	struct vmw_buffer_object *buf;
   1077 	struct ttm_placement *placement;
   1078 	int ret;
   1079 
   1080 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
   1081 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
   1082 
   1083 	if (!buf)
   1084 		return 0;
   1085 
   1086 	switch (dev_priv->active_display_unit) {
   1087 	case vmw_du_legacy:
   1088 		vmw_overlay_pause_all(dev_priv);
   1089 		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
   1090 		vmw_overlay_resume_all(dev_priv);
   1091 		break;
   1092 	case vmw_du_screen_object:
   1093 	case vmw_du_screen_target:
   1094 		if (vfb->bo) {
   1095 			if (dev_priv->capabilities & SVGA_CAP_3D) {
   1096 				/*
   1097 				 * Use surface DMA to get content to
   1098 				 * sreen target surface.
   1099 				 */
   1100 				placement = &vmw_vram_gmr_placement;
   1101 			} else {
   1102 				/* Use CPU blit. */
   1103 				placement = &vmw_sys_placement;
   1104 			}
   1105 		} else {
   1106 			/* Use surface / image update */
   1107 			placement = &vmw_mob_placement;
   1108 		}
   1109 
   1110 		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
   1111 	default:
   1112 		return -EINVAL;
   1113 	}
   1114 
   1115 	return ret;
   1116 }
   1117 
   1118 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
   1119 {
   1120 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
   1121 	struct vmw_buffer_object *buf;
   1122 
   1123 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
   1124 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
   1125 
   1126 	if (WARN_ON(!buf))
   1127 		return 0;
   1128 
   1129 	return vmw_bo_unpin(dev_priv, buf, false);
   1130 }
   1131 
   1132 /**
   1133  * vmw_create_bo_proxy - create a proxy surface for the buffer object
   1134  *
   1135  * @dev: DRM device
   1136  * @mode_cmd: parameters for the new surface
   1137  * @bo_mob: MOB backing the buffer object
   1138  * @srf_out: newly created surface
   1139  *
   1140  * When the content FB is a buffer object, we create a surface as a proxy to the
   1141  * same buffer.  This way we can do a surface copy rather than a surface DMA.
   1142  * This is a more efficient approach
   1143  *
   1144  * RETURNS:
   1145  * 0 on success, error code otherwise
   1146  */
   1147 static int vmw_create_bo_proxy(struct drm_device *dev,
   1148 			       const struct drm_mode_fb_cmd2 *mode_cmd,
   1149 			       struct vmw_buffer_object *bo_mob,
   1150 			       struct vmw_surface **srf_out)
   1151 {
   1152 	uint32_t format;
   1153 	struct drm_vmw_size content_base_size = {0};
   1154 	struct vmw_resource *res;
   1155 	unsigned int bytes_pp;
   1156 	struct drm_format_name_buf format_name;
   1157 	int ret;
   1158 
   1159 	switch (mode_cmd->pixel_format) {
   1160 	case DRM_FORMAT_ARGB8888:
   1161 	case DRM_FORMAT_XRGB8888:
   1162 		format = SVGA3D_X8R8G8B8;
   1163 		bytes_pp = 4;
   1164 		break;
   1165 
   1166 	case DRM_FORMAT_RGB565:
   1167 	case DRM_FORMAT_XRGB1555:
   1168 		format = SVGA3D_R5G6B5;
   1169 		bytes_pp = 2;
   1170 		break;
   1171 
   1172 	case 8:
   1173 		format = SVGA3D_P8;
   1174 		bytes_pp = 1;
   1175 		break;
   1176 
   1177 	default:
   1178 		DRM_ERROR("Invalid framebuffer format %s\n",
   1179 			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
   1180 		return -EINVAL;
   1181 	}
   1182 
   1183 	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
   1184 	content_base_size.height = mode_cmd->height;
   1185 	content_base_size.depth  = 1;
   1186 
   1187 	ret = vmw_surface_gb_priv_define(dev,
   1188 					 0, /* kernel visible only */
   1189 					 0, /* flags */
   1190 					 format,
   1191 					 true, /* can be a scanout buffer */
   1192 					 1, /* num of mip levels */
   1193 					 0,
   1194 					 0,
   1195 					 content_base_size,
   1196 					 SVGA3D_MS_PATTERN_NONE,
   1197 					 SVGA3D_MS_QUALITY_NONE,
   1198 					 srf_out);
   1199 	if (ret) {
   1200 		DRM_ERROR("Failed to allocate proxy content buffer\n");
   1201 		return ret;
   1202 	}
   1203 
   1204 	res = &(*srf_out)->res;
   1205 
   1206 	/* Reserve and switch the backing mob. */
   1207 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
   1208 	(void) vmw_resource_reserve(res, false, true);
   1209 	vmw_bo_unreference(&res->backup);
   1210 	res->backup = vmw_bo_reference(bo_mob);
   1211 	res->backup_offset = 0;
   1212 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
   1213 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
   1214 
   1215 	return 0;
   1216 }
   1217 
   1218 
   1219 
   1220 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
   1221 				      struct vmw_buffer_object *bo,
   1222 				      struct vmw_framebuffer **out,
   1223 				      const struct drm_mode_fb_cmd2
   1224 				      *mode_cmd)
   1225 
   1226 {
   1227 	struct drm_device *dev = dev_priv->dev;
   1228 	struct vmw_framebuffer_bo *vfbd;
   1229 	unsigned int requested_size;
   1230 	struct drm_format_name_buf format_name;
   1231 	int ret;
   1232 
   1233 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
   1234 	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
   1235 		DRM_ERROR("Screen buffer object size is too small "
   1236 			  "for requested mode.\n");
   1237 		return -EINVAL;
   1238 	}
   1239 
   1240 	/* Limited framebuffer color depth support for screen objects */
   1241 	if (dev_priv->active_display_unit == vmw_du_screen_object) {
   1242 		switch (mode_cmd->pixel_format) {
   1243 		case DRM_FORMAT_XRGB8888:
   1244 		case DRM_FORMAT_ARGB8888:
   1245 			break;
   1246 		case DRM_FORMAT_XRGB1555:
   1247 		case DRM_FORMAT_RGB565:
   1248 			break;
   1249 		default:
   1250 			DRM_ERROR("Invalid pixel format: %s\n",
   1251 				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
   1252 			return -EINVAL;
   1253 		}
   1254 	}
   1255 
   1256 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
   1257 	if (!vfbd) {
   1258 		ret = -ENOMEM;
   1259 		goto out_err1;
   1260 	}
   1261 
   1262 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
   1263 	vfbd->base.bo = true;
   1264 	vfbd->buffer = vmw_bo_reference(bo);
   1265 	vfbd->base.user_handle = mode_cmd->handles[0];
   1266 	*out = &vfbd->base;
   1267 
   1268 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
   1269 				   &vmw_framebuffer_bo_funcs);
   1270 	if (ret)
   1271 		goto out_err2;
   1272 
   1273 	return 0;
   1274 
   1275 out_err2:
   1276 	vmw_bo_unreference(&bo);
   1277 	kfree(vfbd);
   1278 out_err1:
   1279 	return ret;
   1280 }
   1281 
   1282 
   1283 /**
   1284  * vmw_kms_srf_ok - check if a surface can be created
   1285  *
   1286  * @width: requested width
   1287  * @height: requested height
   1288  *
   1289  * Surfaces need to be less than texture size
   1290  */
   1291 static bool
   1292 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
   1293 {
   1294 	if (width  > dev_priv->texture_max_width ||
   1295 	    height > dev_priv->texture_max_height)
   1296 		return false;
   1297 
   1298 	return true;
   1299 }
   1300 
   1301 /**
   1302  * vmw_kms_new_framebuffer - Create a new framebuffer.
   1303  *
   1304  * @dev_priv: Pointer to device private struct.
   1305  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
   1306  * Either @bo or @surface must be NULL.
   1307  * @surface: Pointer to a surface to wrap the kms framebuffer around.
   1308  * Either @bo or @surface must be NULL.
   1309  * @only_2d: No presents will occur to this buffer object based framebuffer.
   1310  * This helps the code to do some important optimizations.
   1311  * @mode_cmd: Frame-buffer metadata.
   1312  */
   1313 struct vmw_framebuffer *
   1314 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
   1315 			struct vmw_buffer_object *bo,
   1316 			struct vmw_surface *surface,
   1317 			bool only_2d,
   1318 			const struct drm_mode_fb_cmd2 *mode_cmd)
   1319 {
   1320 	struct vmw_framebuffer *vfb = NULL;
   1321 	bool is_bo_proxy = false;
   1322 	int ret;
   1323 
   1324 	/*
   1325 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
   1326 	 * therefore, wrap the buffer object in a surface so we can use the
   1327 	 * SurfaceCopy command.
   1328 	 */
   1329 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
   1330 	    bo && only_2d &&
   1331 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
   1332 	    dev_priv->active_display_unit == vmw_du_screen_target) {
   1333 		ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
   1334 					  bo, &surface);
   1335 		if (ret)
   1336 			return ERR_PTR(ret);
   1337 
   1338 		is_bo_proxy = true;
   1339 	}
   1340 
   1341 	/* Create the new framebuffer depending one what we have */
   1342 	if (surface) {
   1343 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
   1344 						      mode_cmd,
   1345 						      is_bo_proxy);
   1346 
   1347 		/*
   1348 		 * vmw_create_bo_proxy() adds a reference that is no longer
   1349 		 * needed
   1350 		 */
   1351 		if (is_bo_proxy)
   1352 			vmw_surface_unreference(&surface);
   1353 	} else if (bo) {
   1354 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
   1355 						 mode_cmd);
   1356 	} else {
   1357 		BUG();
   1358 	}
   1359 
   1360 	if (ret)
   1361 		return ERR_PTR(ret);
   1362 
   1363 	vfb->pin = vmw_framebuffer_pin;
   1364 	vfb->unpin = vmw_framebuffer_unpin;
   1365 
   1366 	return vfb;
   1367 }
   1368 
   1369 /*
   1370  * Generic Kernel modesetting functions
   1371  */
   1372 
   1373 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
   1374 						 struct drm_file *file_priv,
   1375 						 const struct drm_mode_fb_cmd2 *mode_cmd)
   1376 {
   1377 	struct vmw_private *dev_priv = vmw_priv(dev);
   1378 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
   1379 	struct vmw_framebuffer *vfb = NULL;
   1380 	struct vmw_surface *surface = NULL;
   1381 	struct vmw_buffer_object *bo = NULL;
   1382 	struct ttm_base_object *user_obj;
   1383 	int ret;
   1384 
   1385 	/*
   1386 	 * Take a reference on the user object of the resource
   1387 	 * backing the kms fb. This ensures that user-space handle
   1388 	 * lookups on that resource will always work as long as
   1389 	 * it's registered with a kms framebuffer. This is important,
   1390 	 * since vmw_execbuf_process identifies resources in the
   1391 	 * command stream using user-space handles.
   1392 	 */
   1393 
   1394 	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
   1395 	if (unlikely(user_obj == NULL)) {
   1396 		DRM_ERROR("Could not locate requested kms frame buffer.\n");
   1397 		return ERR_PTR(-ENOENT);
   1398 	}
   1399 
   1400 	/**
   1401 	 * End conditioned code.
   1402 	 */
   1403 
   1404 	/* returns either a bo or surface */
   1405 	ret = vmw_user_lookup_handle(dev_priv, tfile,
   1406 				     mode_cmd->handles[0],
   1407 				     &surface, &bo);
   1408 	if (ret)
   1409 		goto err_out;
   1410 
   1411 
   1412 	if (!bo &&
   1413 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
   1414 		DRM_ERROR("Surface size cannot exceed %dx%d",
   1415 			dev_priv->texture_max_width,
   1416 			dev_priv->texture_max_height);
   1417 		goto err_out;
   1418 	}
   1419 
   1420 
   1421 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
   1422 				      !(dev_priv->capabilities & SVGA_CAP_3D),
   1423 				      mode_cmd);
   1424 	if (IS_ERR(vfb)) {
   1425 		ret = PTR_ERR(vfb);
   1426 		goto err_out;
   1427  	}
   1428 
   1429 err_out:
   1430 	/* vmw_user_lookup_handle takes one ref so does new_fb */
   1431 	if (bo)
   1432 		vmw_bo_unreference(&bo);
   1433 	if (surface)
   1434 		vmw_surface_unreference(&surface);
   1435 
   1436 	if (ret) {
   1437 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
   1438 		ttm_base_object_unref(&user_obj);
   1439 		return ERR_PTR(ret);
   1440 	} else
   1441 		vfb->user_obj = user_obj;
   1442 
   1443 	return &vfb->base;
   1444 }
   1445 
   1446 /**
   1447  * vmw_kms_check_display_memory - Validates display memory required for a
   1448  * topology
   1449  * @dev: DRM device
   1450  * @num_rects: number of drm_rect in rects
   1451  * @rects: array of drm_rect representing the topology to validate indexed by
   1452  * crtc index.
   1453  *
   1454  * Returns:
   1455  * 0 on success otherwise negative error code
   1456  */
   1457 static int vmw_kms_check_display_memory(struct drm_device *dev,
   1458 					uint32_t num_rects,
   1459 					struct drm_rect *rects)
   1460 {
   1461 	struct vmw_private *dev_priv = vmw_priv(dev);
   1462 	struct drm_rect bounding_box = {0};
   1463 	u64 total_pixels = 0, pixel_mem, bb_mem;
   1464 	int i;
   1465 
   1466 	for (i = 0; i < num_rects; i++) {
   1467 		/*
   1468 		 * For STDU only individual screen (screen target) is limited by
   1469 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
   1470 		 */
   1471 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
   1472 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
   1473 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
   1474 			VMW_DEBUG_KMS("Screen size not supported.\n");
   1475 			return -EINVAL;
   1476 		}
   1477 
   1478 		/* Bounding box upper left is at (0,0). */
   1479 		if (rects[i].x2 > bounding_box.x2)
   1480 			bounding_box.x2 = rects[i].x2;
   1481 
   1482 		if (rects[i].y2 > bounding_box.y2)
   1483 			bounding_box.y2 = rects[i].y2;
   1484 
   1485 		total_pixels += (u64) drm_rect_width(&rects[i]) *
   1486 			(u64) drm_rect_height(&rects[i]);
   1487 	}
   1488 
   1489 	/* Virtual svga device primary limits are always in 32-bpp. */
   1490 	pixel_mem = total_pixels * 4;
   1491 
   1492 	/*
   1493 	 * For HV10 and below prim_bb_mem is vram size. When
   1494 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
   1495 	 * limit on primary bounding box
   1496 	 */
   1497 	if (pixel_mem > dev_priv->prim_bb_mem) {
   1498 		VMW_DEBUG_KMS("Combined output size too large.\n");
   1499 		return -EINVAL;
   1500 	}
   1501 
   1502 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
   1503 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
   1504 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
   1505 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
   1506 
   1507 		if (bb_mem > dev_priv->prim_bb_mem) {
   1508 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
   1509 			return -EINVAL;
   1510 		}
   1511 	}
   1512 
   1513 	return 0;
   1514 }
   1515 
   1516 /**
   1517  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
   1518  * crtc mutex
   1519  * @state: The atomic state pointer containing the new atomic state
   1520  * @crtc: The crtc
   1521  *
   1522  * This function returns the new crtc state if it's part of the state update.
   1523  * Otherwise returns the current crtc state. It also makes sure that the
   1524  * crtc mutex is locked.
   1525  *
   1526  * Returns: A valid crtc state pointer or NULL. It may also return a
   1527  * pointer error, in particular -EDEADLK if locking needs to be rerun.
   1528  */
   1529 static struct drm_crtc_state *
   1530 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
   1531 {
   1532 	struct drm_crtc_state *crtc_state;
   1533 
   1534 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
   1535 	if (crtc_state) {
   1536 		lockdep_assert_held(&crtc->mutex.mutex.base);
   1537 	} else {
   1538 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
   1539 
   1540 		if (ret != 0 && ret != -EALREADY)
   1541 			return ERR_PTR(ret);
   1542 
   1543 		crtc_state = crtc->state;
   1544 	}
   1545 
   1546 	return crtc_state;
   1547 }
   1548 
   1549 /**
   1550  * vmw_kms_check_implicit - Verify that all implicit display units scan out
   1551  * from the same fb after the new state is committed.
   1552  * @dev: The drm_device.
   1553  * @state: The new state to be checked.
   1554  *
   1555  * Returns:
   1556  *   Zero on success,
   1557  *   -EINVAL on invalid state,
   1558  *   -EDEADLK if modeset locking needs to be rerun.
   1559  */
   1560 static int vmw_kms_check_implicit(struct drm_device *dev,
   1561 				  struct drm_atomic_state *state)
   1562 {
   1563 	struct drm_framebuffer *implicit_fb = NULL;
   1564 	struct drm_crtc *crtc;
   1565 	struct drm_crtc_state *crtc_state;
   1566 	struct drm_plane_state *plane_state;
   1567 
   1568 	drm_for_each_crtc(crtc, dev) {
   1569 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
   1570 
   1571 		if (!du->is_implicit)
   1572 			continue;
   1573 
   1574 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
   1575 		if (IS_ERR(crtc_state))
   1576 			return PTR_ERR(crtc_state);
   1577 
   1578 		if (!crtc_state || !crtc_state->enable)
   1579 			continue;
   1580 
   1581 		/*
   1582 		 * Can't move primary planes across crtcs, so this is OK.
   1583 		 * It also means we don't need to take the plane mutex.
   1584 		 */
   1585 		plane_state = du->primary.state;
   1586 		if (plane_state->crtc != crtc)
   1587 			continue;
   1588 
   1589 		if (!implicit_fb)
   1590 			implicit_fb = plane_state->fb;
   1591 		else if (implicit_fb != plane_state->fb)
   1592 			return -EINVAL;
   1593 	}
   1594 
   1595 	return 0;
   1596 }
   1597 
   1598 /**
   1599  * vmw_kms_check_topology - Validates topology in drm_atomic_state
   1600  * @dev: DRM device
   1601  * @state: the driver state object
   1602  *
   1603  * Returns:
   1604  * 0 on success otherwise negative error code
   1605  */
   1606 static int vmw_kms_check_topology(struct drm_device *dev,
   1607 				  struct drm_atomic_state *state)
   1608 {
   1609 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
   1610 	struct drm_rect *rects;
   1611 	struct drm_crtc *crtc;
   1612 	uint32_t i;
   1613 	int ret = 0;
   1614 
   1615 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
   1616 			GFP_KERNEL);
   1617 	if (!rects)
   1618 		return -ENOMEM;
   1619 
   1620 	drm_for_each_crtc(crtc, dev) {
   1621 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
   1622 		struct drm_crtc_state *crtc_state;
   1623 
   1624 		i = drm_crtc_index(crtc);
   1625 
   1626 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
   1627 		if (IS_ERR(crtc_state)) {
   1628 			ret = PTR_ERR(crtc_state);
   1629 			goto clean;
   1630 		}
   1631 
   1632 		if (!crtc_state)
   1633 			continue;
   1634 
   1635 		if (crtc_state->enable) {
   1636 			rects[i].x1 = du->gui_x;
   1637 			rects[i].y1 = du->gui_y;
   1638 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
   1639 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
   1640 		} else {
   1641 			rects[i].x1 = 0;
   1642 			rects[i].y1 = 0;
   1643 			rects[i].x2 = 0;
   1644 			rects[i].y2 = 0;
   1645 		}
   1646 	}
   1647 
   1648 	/* Determine change to topology due to new atomic state */
   1649 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
   1650 				      new_crtc_state, i) {
   1651 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
   1652 		struct drm_connector *connector;
   1653 		struct drm_connector_state *conn_state;
   1654 		struct vmw_connector_state *vmw_conn_state;
   1655 
   1656 		if (!du->pref_active && new_crtc_state->enable) {
   1657 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
   1658 			ret = -EINVAL;
   1659 			goto clean;
   1660 		}
   1661 
   1662 		/*
   1663 		 * For vmwgfx each crtc has only one connector attached and it
   1664 		 * is not changed so don't really need to check the
   1665 		 * crtc->connector_mask and iterate over it.
   1666 		 */
   1667 		connector = &du->connector;
   1668 		conn_state = drm_atomic_get_connector_state(state, connector);
   1669 		if (IS_ERR(conn_state)) {
   1670 			ret = PTR_ERR(conn_state);
   1671 			goto clean;
   1672 		}
   1673 
   1674 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
   1675 		vmw_conn_state->gui_x = du->gui_x;
   1676 		vmw_conn_state->gui_y = du->gui_y;
   1677 	}
   1678 
   1679 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
   1680 					   rects);
   1681 
   1682 clean:
   1683 	kfree(rects);
   1684 	return ret;
   1685 }
   1686 
   1687 /**
   1688  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
   1689  *
   1690  * @dev: DRM device
   1691  * @state: the driver state object
   1692  *
   1693  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
   1694  * us to assign a value to mode->crtc_clock so that
   1695  * drm_calc_timestamping_constants() won't throw an error message
   1696  *
   1697  * Returns:
   1698  * Zero for success or -errno
   1699  */
   1700 static int
   1701 vmw_kms_atomic_check_modeset(struct drm_device *dev,
   1702 			     struct drm_atomic_state *state)
   1703 {
   1704 	struct drm_crtc *crtc;
   1705 	struct drm_crtc_state *crtc_state;
   1706 	bool need_modeset = false;
   1707 	int i, ret;
   1708 
   1709 	ret = drm_atomic_helper_check(dev, state);
   1710 	if (ret)
   1711 		return ret;
   1712 
   1713 	ret = vmw_kms_check_implicit(dev, state);
   1714 	if (ret) {
   1715 		VMW_DEBUG_KMS("Invalid implicit state\n");
   1716 		return ret;
   1717 	}
   1718 
   1719 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
   1720 		if (drm_atomic_crtc_needs_modeset(crtc_state))
   1721 			need_modeset = true;
   1722 	}
   1723 
   1724 	if (need_modeset)
   1725 		return vmw_kms_check_topology(dev, state);
   1726 
   1727 	return ret;
   1728 }
   1729 
   1730 static const struct drm_mode_config_funcs vmw_kms_funcs = {
   1731 	.fb_create = vmw_kms_fb_create,
   1732 	.atomic_check = vmw_kms_atomic_check_modeset,
   1733 	.atomic_commit = drm_atomic_helper_commit,
   1734 };
   1735 
   1736 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
   1737 				   struct drm_file *file_priv,
   1738 				   struct vmw_framebuffer *vfb,
   1739 				   struct vmw_surface *surface,
   1740 				   uint32_t sid,
   1741 				   int32_t destX, int32_t destY,
   1742 				   struct drm_vmw_rect *clips,
   1743 				   uint32_t num_clips)
   1744 {
   1745 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
   1746 					    &surface->res, destX, destY,
   1747 					    num_clips, 1, NULL, NULL);
   1748 }
   1749 
   1750 
   1751 int vmw_kms_present(struct vmw_private *dev_priv,
   1752 		    struct drm_file *file_priv,
   1753 		    struct vmw_framebuffer *vfb,
   1754 		    struct vmw_surface *surface,
   1755 		    uint32_t sid,
   1756 		    int32_t destX, int32_t destY,
   1757 		    struct drm_vmw_rect *clips,
   1758 		    uint32_t num_clips)
   1759 {
   1760 	int ret;
   1761 
   1762 	switch (dev_priv->active_display_unit) {
   1763 	case vmw_du_screen_target:
   1764 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
   1765 						 &surface->res, destX, destY,
   1766 						 num_clips, 1, NULL, NULL);
   1767 		break;
   1768 	case vmw_du_screen_object:
   1769 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
   1770 					      sid, destX, destY, clips,
   1771 					      num_clips);
   1772 		break;
   1773 	default:
   1774 		WARN_ONCE(true,
   1775 			  "Present called with invalid display system.\n");
   1776 		ret = -ENOSYS;
   1777 		break;
   1778 	}
   1779 	if (ret)
   1780 		return ret;
   1781 
   1782 	vmw_fifo_flush(dev_priv, false);
   1783 
   1784 	return 0;
   1785 }
   1786 
   1787 static void
   1788 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
   1789 {
   1790 	if (dev_priv->hotplug_mode_update_property)
   1791 		return;
   1792 
   1793 	dev_priv->hotplug_mode_update_property =
   1794 		drm_property_create_range(dev_priv->dev,
   1795 					  DRM_MODE_PROP_IMMUTABLE,
   1796 					  "hotplug_mode_update", 0, 1);
   1797 
   1798 	if (!dev_priv->hotplug_mode_update_property)
   1799 		return;
   1800 
   1801 }
   1802 
   1803 int vmw_kms_init(struct vmw_private *dev_priv)
   1804 {
   1805 	struct drm_device *dev = dev_priv->dev;
   1806 	int ret;
   1807 
   1808 	drm_mode_config_init(dev);
   1809 	dev->mode_config.funcs = &vmw_kms_funcs;
   1810 	dev->mode_config.min_width = 1;
   1811 	dev->mode_config.min_height = 1;
   1812 	dev->mode_config.max_width = dev_priv->texture_max_width;
   1813 	dev->mode_config.max_height = dev_priv->texture_max_height;
   1814 
   1815 	drm_mode_create_suggested_offset_properties(dev);
   1816 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
   1817 
   1818 	ret = vmw_kms_stdu_init_display(dev_priv);
   1819 	if (ret) {
   1820 		ret = vmw_kms_sou_init_display(dev_priv);
   1821 		if (ret) /* Fallback */
   1822 			ret = vmw_kms_ldu_init_display(dev_priv);
   1823 	}
   1824 
   1825 	return ret;
   1826 }
   1827 
   1828 int vmw_kms_close(struct vmw_private *dev_priv)
   1829 {
   1830 	int ret = 0;
   1831 
   1832 	/*
   1833 	 * Docs says we should take the lock before calling this function
   1834 	 * but since it destroys encoders and our destructor calls
   1835 	 * drm_encoder_cleanup which takes the lock we deadlock.
   1836 	 */
   1837 	drm_mode_config_cleanup(dev_priv->dev);
   1838 	if (dev_priv->active_display_unit == vmw_du_legacy)
   1839 		ret = vmw_kms_ldu_close_display(dev_priv);
   1840 
   1841 	return ret;
   1842 }
   1843 
   1844 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
   1845 				struct drm_file *file_priv)
   1846 {
   1847 	struct drm_vmw_cursor_bypass_arg *arg = data;
   1848 	struct vmw_display_unit *du;
   1849 	struct drm_crtc *crtc;
   1850 	int ret = 0;
   1851 
   1852 
   1853 	mutex_lock(&dev->mode_config.mutex);
   1854 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
   1855 
   1856 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
   1857 			du = vmw_crtc_to_du(crtc);
   1858 			du->hotspot_x = arg->xhot;
   1859 			du->hotspot_y = arg->yhot;
   1860 		}
   1861 
   1862 		mutex_unlock(&dev->mode_config.mutex);
   1863 		return 0;
   1864 	}
   1865 
   1866 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
   1867 	if (!crtc) {
   1868 		ret = -ENOENT;
   1869 		goto out;
   1870 	}
   1871 
   1872 	du = vmw_crtc_to_du(crtc);
   1873 
   1874 	du->hotspot_x = arg->xhot;
   1875 	du->hotspot_y = arg->yhot;
   1876 
   1877 out:
   1878 	mutex_unlock(&dev->mode_config.mutex);
   1879 
   1880 	return ret;
   1881 }
   1882 
   1883 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
   1884 			unsigned width, unsigned height, unsigned pitch,
   1885 			unsigned bpp, unsigned depth)
   1886 {
   1887 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1888 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
   1889 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1890 		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
   1891 			       SVGA_FIFO_PITCHLOCK);
   1892 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
   1893 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
   1894 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
   1895 
   1896 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
   1897 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
   1898 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
   1899 		return -EINVAL;
   1900 	}
   1901 
   1902 	return 0;
   1903 }
   1904 
   1905 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
   1906 {
   1907 	struct vmw_vga_topology_state *save;
   1908 	uint32_t i;
   1909 
   1910 	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
   1911 	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
   1912 	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
   1913 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1914 		vmw_priv->vga_pitchlock =
   1915 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
   1916 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1917 		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
   1918 							SVGA_FIFO_PITCHLOCK);
   1919 
   1920 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
   1921 		return 0;
   1922 
   1923 	vmw_priv->num_displays = vmw_read(vmw_priv,
   1924 					  SVGA_REG_NUM_GUEST_DISPLAYS);
   1925 
   1926 	if (vmw_priv->num_displays == 0)
   1927 		vmw_priv->num_displays = 1;
   1928 
   1929 	for (i = 0; i < vmw_priv->num_displays; ++i) {
   1930 		save = &vmw_priv->vga_save[i];
   1931 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
   1932 		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
   1933 		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
   1934 		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
   1935 		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
   1936 		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
   1937 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
   1938 		if (i == 0 && vmw_priv->num_displays == 1 &&
   1939 		    save->width == 0 && save->height == 0) {
   1940 
   1941 			/*
   1942 			 * It should be fairly safe to assume that these
   1943 			 * values are uninitialized.
   1944 			 */
   1945 
   1946 			save->width = vmw_priv->vga_width - save->pos_x;
   1947 			save->height = vmw_priv->vga_height - save->pos_y;
   1948 		}
   1949 	}
   1950 
   1951 	return 0;
   1952 }
   1953 
   1954 int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
   1955 {
   1956 	struct vmw_vga_topology_state *save;
   1957 	uint32_t i;
   1958 
   1959 	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
   1960 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
   1961 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
   1962 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
   1963 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
   1964 			  vmw_priv->vga_pitchlock);
   1965 	else if (vmw_fifo_have_pitchlock(vmw_priv))
   1966 		vmw_mmio_write(vmw_priv->vga_pitchlock,
   1967 			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
   1968 
   1969 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
   1970 		return 0;
   1971 
   1972 	for (i = 0; i < vmw_priv->num_displays; ++i) {
   1973 		save = &vmw_priv->vga_save[i];
   1974 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
   1975 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
   1976 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
   1977 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
   1978 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
   1979 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
   1980 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
   1981 	}
   1982 
   1983 	return 0;
   1984 }
   1985 
   1986 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
   1987 				uint32_t pitch,
   1988 				uint32_t height)
   1989 {
   1990 	return ((u64) pitch * (u64) height) < (u64)
   1991 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
   1992 		 dev_priv->prim_bb_mem : dev_priv->vram_size);
   1993 }
   1994 
   1995 
   1996 /**
   1997  * Function called by DRM code called with vbl_lock held.
   1998  */
   1999 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
   2000 {
   2001 	return 0;
   2002 }
   2003 
   2004 /**
   2005  * Function called by DRM code called with vbl_lock held.
   2006  */
   2007 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
   2008 {
   2009 	return -EINVAL;
   2010 }
   2011 
   2012 /**
   2013  * Function called by DRM code called with vbl_lock held.
   2014  */
   2015 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
   2016 {
   2017 }
   2018 
   2019 /**
   2020  * vmw_du_update_layout - Update the display unit with topology from resolution
   2021  * plugin and generate DRM uevent
   2022  * @dev_priv: device private
   2023  * @num_rects: number of drm_rect in rects
   2024  * @rects: toplogy to update
   2025  */
   2026 static int vmw_du_update_layout(struct vmw_private *dev_priv,
   2027 				unsigned int num_rects, struct drm_rect *rects)
   2028 {
   2029 	struct drm_device *dev = dev_priv->dev;
   2030 	struct vmw_display_unit *du;
   2031 	struct drm_connector *con;
   2032 	struct drm_connector_list_iter conn_iter;
   2033 	struct drm_modeset_acquire_ctx ctx;
   2034 	struct drm_crtc *crtc;
   2035 	int ret;
   2036 
   2037 	/* Currently gui_x/y is protected with the crtc mutex */
   2038 	mutex_lock(&dev->mode_config.mutex);
   2039 	drm_modeset_acquire_init(&ctx, 0);
   2040 retry:
   2041 	drm_for_each_crtc(crtc, dev) {
   2042 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
   2043 		if (ret < 0) {
   2044 			if (ret == -EDEADLK) {
   2045 				drm_modeset_backoff(&ctx);
   2046 				goto retry;
   2047       		}
   2048 			goto out_fini;
   2049 		}
   2050 	}
   2051 
   2052 	drm_connector_list_iter_begin(dev, &conn_iter);
   2053 	drm_for_each_connector_iter(con, &conn_iter) {
   2054 		du = vmw_connector_to_du(con);
   2055 		if (num_rects > du->unit) {
   2056 			du->pref_width = drm_rect_width(&rects[du->unit]);
   2057 			du->pref_height = drm_rect_height(&rects[du->unit]);
   2058 			du->pref_active = true;
   2059 			du->gui_x = rects[du->unit].x1;
   2060 			du->gui_y = rects[du->unit].y1;
   2061 		} else {
   2062 			du->pref_width = 800;
   2063 			du->pref_height = 600;
   2064 			du->pref_active = false;
   2065 			du->gui_x = 0;
   2066 			du->gui_y = 0;
   2067 		}
   2068 	}
   2069 	drm_connector_list_iter_end(&conn_iter);
   2070 
   2071 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
   2072 		du = vmw_connector_to_du(con);
   2073 		if (num_rects > du->unit) {
   2074 			drm_object_property_set_value
   2075 			  (&con->base, dev->mode_config.suggested_x_property,
   2076 			   du->gui_x);
   2077 			drm_object_property_set_value
   2078 			  (&con->base, dev->mode_config.suggested_y_property,
   2079 			   du->gui_y);
   2080 		} else {
   2081 			drm_object_property_set_value
   2082 			  (&con->base, dev->mode_config.suggested_x_property,
   2083 			   0);
   2084 			drm_object_property_set_value
   2085 			  (&con->base, dev->mode_config.suggested_y_property,
   2086 			   0);
   2087 		}
   2088 		con->status = vmw_du_connector_detect(con, true);
   2089 	}
   2090 
   2091 	drm_sysfs_hotplug_event(dev);
   2092 out_fini:
   2093 	drm_modeset_drop_locks(&ctx);
   2094 	drm_modeset_acquire_fini(&ctx);
   2095 	mutex_unlock(&dev->mode_config.mutex);
   2096 
   2097 	return 0;
   2098 }
   2099 
   2100 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
   2101 			  u16 *r, u16 *g, u16 *b,
   2102 			  uint32_t size,
   2103 			  struct drm_modeset_acquire_ctx *ctx)
   2104 {
   2105 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
   2106 	int i;
   2107 
   2108 	for (i = 0; i < size; i++) {
   2109 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
   2110 			  r[i], g[i], b[i]);
   2111 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
   2112 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
   2113 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
   2114 	}
   2115 
   2116 	return 0;
   2117 }
   2118 
   2119 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
   2120 {
   2121 	return 0;
   2122 }
   2123 
   2124 enum drm_connector_status
   2125 vmw_du_connector_detect(struct drm_connector *connector, bool force)
   2126 {
   2127 	uint32_t num_displays;
   2128 	struct drm_device *dev = connector->dev;
   2129 	struct vmw_private *dev_priv = vmw_priv(dev);
   2130 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
   2131 
   2132 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
   2133 
   2134 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
   2135 		 du->pref_active) ?
   2136 		connector_status_connected : connector_status_disconnected);
   2137 }
   2138 
   2139 static struct drm_display_mode vmw_kms_connector_builtin[] = {
   2140 	/* 640x480@60Hz */
   2141 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
   2142 		   752, 800, 0, 480, 489, 492, 525, 0,
   2143 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
   2144 	/* 800x600@60Hz */
   2145 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
   2146 		   968, 1056, 0, 600, 601, 605, 628, 0,
   2147 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2148 	/* 1024x768@60Hz */
   2149 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
   2150 		   1184, 1344, 0, 768, 771, 777, 806, 0,
   2151 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
   2152 	/* 1152x864@75Hz */
   2153 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
   2154 		   1344, 1600, 0, 864, 865, 868, 900, 0,
   2155 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2156 	/* 1280x768@60Hz */
   2157 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
   2158 		   1472, 1664, 0, 768, 771, 778, 798, 0,
   2159 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2160 	/* 1280x800@60Hz */
   2161 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
   2162 		   1480, 1680, 0, 800, 803, 809, 831, 0,
   2163 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
   2164 	/* 1280x960@60Hz */
   2165 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
   2166 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
   2167 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2168 	/* 1280x1024@60Hz */
   2169 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
   2170 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
   2171 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2172 	/* 1360x768@60Hz */
   2173 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
   2174 		   1536, 1792, 0, 768, 771, 777, 795, 0,
   2175 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2176 	/* 1440x1050@60Hz */
   2177 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
   2178 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
   2179 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2180 	/* 1440x900@60Hz */
   2181 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
   2182 		   1672, 1904, 0, 900, 903, 909, 934, 0,
   2183 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2184 	/* 1600x1200@60Hz */
   2185 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
   2186 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
   2187 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2188 	/* 1680x1050@60Hz */
   2189 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
   2190 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
   2191 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2192 	/* 1792x1344@60Hz */
   2193 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
   2194 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
   2195 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2196 	/* 1853x1392@60Hz */
   2197 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
   2198 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
   2199 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2200 	/* 1920x1200@60Hz */
   2201 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
   2202 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
   2203 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2204 	/* 1920x1440@60Hz */
   2205 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
   2206 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
   2207 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2208 	/* 2560x1600@60Hz */
   2209 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
   2210 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
   2211 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
   2212 	/* Terminate */
   2213 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
   2214 };
   2215 
   2216 /**
   2217  * vmw_guess_mode_timing - Provide fake timings for a
   2218  * 60Hz vrefresh mode.
   2219  *
   2220  * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
   2221  * members filled in.
   2222  */
   2223 void vmw_guess_mode_timing(struct drm_display_mode *mode)
   2224 {
   2225 	mode->hsync_start = mode->hdisplay + 50;
   2226 	mode->hsync_end = mode->hsync_start + 50;
   2227 	mode->htotal = mode->hsync_end + 50;
   2228 
   2229 	mode->vsync_start = mode->vdisplay + 50;
   2230 	mode->vsync_end = mode->vsync_start + 50;
   2231 	mode->vtotal = mode->vsync_end + 50;
   2232 
   2233 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
   2234 	mode->vrefresh = drm_mode_vrefresh(mode);
   2235 }
   2236 
   2237 
   2238 int vmw_du_connector_fill_modes(struct drm_connector *connector,
   2239 				uint32_t max_width, uint32_t max_height)
   2240 {
   2241 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
   2242 	struct drm_device *dev = connector->dev;
   2243 	struct vmw_private *dev_priv = vmw_priv(dev);
   2244 	struct drm_display_mode *mode = NULL;
   2245 	struct drm_display_mode *bmode;
   2246 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
   2247 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
   2248 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   2249 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
   2250 	};
   2251 	int i;
   2252 	u32 assumed_bpp = 4;
   2253 
   2254 	if (dev_priv->assume_16bpp)
   2255 		assumed_bpp = 2;
   2256 
   2257 	max_width  = min(max_width,  dev_priv->texture_max_width);
   2258 	max_height = min(max_height, dev_priv->texture_max_height);
   2259 
   2260 	/*
   2261 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
   2262 	 * HEIGHT registers.
   2263 	 */
   2264 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
   2265 		max_width  = min(max_width,  dev_priv->stdu_max_width);
   2266 		max_height = min(max_height, dev_priv->stdu_max_height);
   2267 	}
   2268 
   2269 	/* Add preferred mode */
   2270 	mode = drm_mode_duplicate(dev, &prefmode);
   2271 	if (!mode)
   2272 		return 0;
   2273 	mode->hdisplay = du->pref_width;
   2274 	mode->vdisplay = du->pref_height;
   2275 	vmw_guess_mode_timing(mode);
   2276 
   2277 	if (vmw_kms_validate_mode_vram(dev_priv,
   2278 					mode->hdisplay * assumed_bpp,
   2279 					mode->vdisplay)) {
   2280 		drm_mode_probed_add(connector, mode);
   2281 	} else {
   2282 		drm_mode_destroy(dev, mode);
   2283 		mode = NULL;
   2284 	}
   2285 
   2286 	if (du->pref_mode) {
   2287 		list_del_init(&du->pref_mode->head);
   2288 		drm_mode_destroy(dev, du->pref_mode);
   2289 	}
   2290 
   2291 	/* mode might be null here, this is intended */
   2292 	du->pref_mode = mode;
   2293 
   2294 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
   2295 		bmode = &vmw_kms_connector_builtin[i];
   2296 		if (bmode->hdisplay > max_width ||
   2297 		    bmode->vdisplay > max_height)
   2298 			continue;
   2299 
   2300 		if (!vmw_kms_validate_mode_vram(dev_priv,
   2301 						bmode->hdisplay * assumed_bpp,
   2302 						bmode->vdisplay))
   2303 			continue;
   2304 
   2305 		mode = drm_mode_duplicate(dev, bmode);
   2306 		if (!mode)
   2307 			return 0;
   2308 		mode->vrefresh = drm_mode_vrefresh(mode);
   2309 
   2310 		drm_mode_probed_add(connector, mode);
   2311 	}
   2312 
   2313 	drm_connector_list_update(connector);
   2314 	/* Move the prefered mode first, help apps pick the right mode. */
   2315 	drm_mode_sort(&connector->modes);
   2316 
   2317 	return 1;
   2318 }
   2319 
   2320 /**
   2321  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
   2322  * @dev: drm device for the ioctl
   2323  * @data: data pointer for the ioctl
   2324  * @file_priv: drm file for the ioctl call
   2325  *
   2326  * Update preferred topology of display unit as per ioctl request. The topology
   2327  * is expressed as array of drm_vmw_rect.
   2328  * e.g.
   2329  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
   2330  *
   2331  * NOTE:
   2332  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
   2333  * device limit on topology, x + w and y + h (lower right) cannot be greater
   2334  * than INT_MAX. So topology beyond these limits will return with error.
   2335  *
   2336  * Returns:
   2337  * Zero on success, negative errno on failure.
   2338  */
   2339 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
   2340 				struct drm_file *file_priv)
   2341 {
   2342 	struct vmw_private *dev_priv = vmw_priv(dev);
   2343 	struct drm_mode_config *mode_config = &dev->mode_config;
   2344 	struct drm_vmw_update_layout_arg *arg =
   2345 		(struct drm_vmw_update_layout_arg *)data;
   2346 	void __user *user_rects;
   2347 	struct drm_vmw_rect *rects;
   2348 	struct drm_rect *drm_rects;
   2349 	unsigned rects_size;
   2350 	int ret, i;
   2351 
   2352 	if (!arg->num_outputs) {
   2353 		struct drm_rect def_rect = {0, 0, 800, 600};
   2354 		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
   2355 			      def_rect.x1, def_rect.y1,
   2356 			      def_rect.x2, def_rect.y2);
   2357 		vmw_du_update_layout(dev_priv, 1, &def_rect);
   2358 		return 0;
   2359 	}
   2360 
   2361 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
   2362 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
   2363 			GFP_KERNEL);
   2364 	if (unlikely(!rects))
   2365 		return -ENOMEM;
   2366 
   2367 	user_rects = (void __user *)(unsigned long)arg->rects;
   2368 	ret = copy_from_user(rects, user_rects, rects_size);
   2369 	if (unlikely(ret != 0)) {
   2370 		DRM_ERROR("Failed to get rects.\n");
   2371 		ret = -EFAULT;
   2372 		goto out_free;
   2373 	}
   2374 
   2375 	drm_rects = (struct drm_rect *)rects;
   2376 
   2377 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
   2378 	for (i = 0; i < arg->num_outputs; i++) {
   2379 		struct drm_vmw_rect curr_rect;
   2380 
   2381 		/* Verify user-space for overflow as kernel use drm_rect */
   2382 		if ((rects[i].x + rects[i].w > INT_MAX) ||
   2383 		    (rects[i].y + rects[i].h > INT_MAX)) {
   2384 			ret = -ERANGE;
   2385 			goto out_free;
   2386 		}
   2387 
   2388 		curr_rect = rects[i];
   2389 		drm_rects[i].x1 = curr_rect.x;
   2390 		drm_rects[i].y1 = curr_rect.y;
   2391 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
   2392 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
   2393 
   2394 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
   2395 			      drm_rects[i].x1, drm_rects[i].y1,
   2396 			      drm_rects[i].x2, drm_rects[i].y2);
   2397 
   2398 		/*
   2399 		 * Currently this check is limiting the topology within
   2400 		 * mode_config->max (which actually is max texture size
   2401 		 * supported by virtual device). This limit is here to address
   2402 		 * window managers that create a big framebuffer for whole
   2403 		 * topology.
   2404 		 */
   2405 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
   2406 		    drm_rects[i].x2 > mode_config->max_width ||
   2407 		    drm_rects[i].y2 > mode_config->max_height) {
   2408 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
   2409 				      drm_rects[i].x1, drm_rects[i].y1,
   2410 				      drm_rects[i].x2, drm_rects[i].y2);
   2411 			ret = -EINVAL;
   2412 			goto out_free;
   2413 		}
   2414 	}
   2415 
   2416 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
   2417 
   2418 	if (ret == 0)
   2419 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
   2420 
   2421 out_free:
   2422 	kfree(rects);
   2423 	return ret;
   2424 }
   2425 
   2426 /**
   2427  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
   2428  * on a set of cliprects and a set of display units.
   2429  *
   2430  * @dev_priv: Pointer to a device private structure.
   2431  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
   2432  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
   2433  * Cliprects are given in framebuffer coordinates.
   2434  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
   2435  * be NULL. Cliprects are given in source coordinates.
   2436  * @dest_x: X coordinate offset for the crtc / destination clip rects.
   2437  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
   2438  * @num_clips: Number of cliprects in the @clips or @vclips array.
   2439  * @increment: Integer with which to increment the clip counter when looping.
   2440  * Used to skip a predetermined number of clip rects.
   2441  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
   2442  */
   2443 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
   2444 			 struct vmw_framebuffer *framebuffer,
   2445 			 const struct drm_clip_rect *clips,
   2446 			 const struct drm_vmw_rect *vclips,
   2447 			 s32 dest_x, s32 dest_y,
   2448 			 int num_clips,
   2449 			 int increment,
   2450 			 struct vmw_kms_dirty *dirty)
   2451 {
   2452 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
   2453 	struct drm_crtc *crtc;
   2454 	u32 num_units = 0;
   2455 	u32 i, k;
   2456 
   2457 	dirty->dev_priv = dev_priv;
   2458 
   2459 	/* If crtc is passed, no need to iterate over other display units */
   2460 	if (dirty->crtc) {
   2461 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
   2462 	} else {
   2463 		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
   2464 				    head) {
   2465 			struct drm_plane *plane = crtc->primary;
   2466 
   2467 			if (plane->state->fb == &framebuffer->base)
   2468 				units[num_units++] = vmw_crtc_to_du(crtc);
   2469 		}
   2470 	}
   2471 
   2472 	for (k = 0; k < num_units; k++) {
   2473 		struct vmw_display_unit *unit = units[k];
   2474 		s32 crtc_x = unit->crtc.x;
   2475 		s32 crtc_y = unit->crtc.y;
   2476 		s32 crtc_width = unit->crtc.mode.hdisplay;
   2477 		s32 crtc_height = unit->crtc.mode.vdisplay;
   2478 		const struct drm_clip_rect *clips_ptr = clips;
   2479 		const struct drm_vmw_rect *vclips_ptr = vclips;
   2480 
   2481 		dirty->unit = unit;
   2482 		if (dirty->fifo_reserve_size > 0) {
   2483 			dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
   2484 						      dirty->fifo_reserve_size);
   2485 			if (!dirty->cmd)
   2486 				return -ENOMEM;
   2487 
   2488 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
   2489 		}
   2490 		dirty->num_hits = 0;
   2491 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
   2492 		       vclips_ptr += increment) {
   2493 			s32 clip_left;
   2494 			s32 clip_top;
   2495 
   2496 			/*
   2497 			 * Select clip array type. Note that integer type
   2498 			 * in @clips is unsigned short, whereas in @vclips
   2499 			 * it's 32-bit.
   2500 			 */
   2501 			if (clips) {
   2502 				dirty->fb_x = (s32) clips_ptr->x1;
   2503 				dirty->fb_y = (s32) clips_ptr->y1;
   2504 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
   2505 					crtc_x;
   2506 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
   2507 					crtc_y;
   2508 			} else {
   2509 				dirty->fb_x = vclips_ptr->x;
   2510 				dirty->fb_y = vclips_ptr->y;
   2511 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
   2512 					dest_x - crtc_x;
   2513 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
   2514 					dest_y - crtc_y;
   2515 			}
   2516 
   2517 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
   2518 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
   2519 
   2520 			/* Skip this clip if it's outside the crtc region */
   2521 			if (dirty->unit_x1 >= crtc_width ||
   2522 			    dirty->unit_y1 >= crtc_height ||
   2523 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
   2524 				continue;
   2525 
   2526 			/* Clip right and bottom to crtc limits */
   2527 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
   2528 					       crtc_width);
   2529 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
   2530 					       crtc_height);
   2531 
   2532 			/* Clip left and top to crtc limits */
   2533 			clip_left = min_t(s32, dirty->unit_x1, 0);
   2534 			clip_top = min_t(s32, dirty->unit_y1, 0);
   2535 			dirty->unit_x1 -= clip_left;
   2536 			dirty->unit_y1 -= clip_top;
   2537 			dirty->fb_x -= clip_left;
   2538 			dirty->fb_y -= clip_top;
   2539 
   2540 			dirty->clip(dirty);
   2541 		}
   2542 
   2543 		dirty->fifo_commit(dirty);
   2544 	}
   2545 
   2546 	return 0;
   2547 }
   2548 
   2549 /**
   2550  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
   2551  * cleanup and fencing
   2552  * @dev_priv: Pointer to the device-private struct
   2553  * @file_priv: Pointer identifying the client when user-space fencing is used
   2554  * @ctx: Pointer to the validation context
   2555  * @out_fence: If non-NULL, returned refcounted fence-pointer
   2556  * @user_fence_rep: If non-NULL, pointer to user-space address area
   2557  * in which to copy user-space fence info
   2558  */
   2559 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
   2560 				      struct drm_file *file_priv,
   2561 				      struct vmw_validation_context *ctx,
   2562 				      struct vmw_fence_obj **out_fence,
   2563 				      struct drm_vmw_fence_rep __user *
   2564 				      user_fence_rep)
   2565 {
   2566 	struct vmw_fence_obj *fence = NULL;
   2567 	uint32_t handle = 0;
   2568 	int ret = 0;
   2569 
   2570 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
   2571 	    out_fence)
   2572 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
   2573 						 file_priv ? &handle : NULL);
   2574 	vmw_validation_done(ctx, fence);
   2575 	if (file_priv)
   2576 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
   2577 					    ret, user_fence_rep, fence,
   2578 					    handle, -1, NULL);
   2579 	if (out_fence)
   2580 		*out_fence = fence;
   2581 	else
   2582 		vmw_fence_obj_unreference(&fence);
   2583 }
   2584 
   2585 /**
   2586  * vmw_kms_update_proxy - Helper function to update a proxy surface from
   2587  * its backing MOB.
   2588  *
   2589  * @res: Pointer to the surface resource
   2590  * @clips: Clip rects in framebuffer (surface) space.
   2591  * @num_clips: Number of clips in @clips.
   2592  * @increment: Integer with which to increment the clip counter when looping.
   2593  * Used to skip a predetermined number of clip rects.
   2594  *
   2595  * This function makes sure the proxy surface is updated from its backing MOB
   2596  * using the region given by @clips. The surface resource @res and its backing
   2597  * MOB needs to be reserved and validated on call.
   2598  */
   2599 int vmw_kms_update_proxy(struct vmw_resource *res,
   2600 			 const struct drm_clip_rect *clips,
   2601 			 unsigned num_clips,
   2602 			 int increment)
   2603 {
   2604 	struct vmw_private *dev_priv = res->dev_priv;
   2605 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
   2606 	struct {
   2607 		SVGA3dCmdHeader header;
   2608 		SVGA3dCmdUpdateGBImage body;
   2609 	} *cmd;
   2610 	SVGA3dBox *box;
   2611 	size_t copy_size = 0;
   2612 	int i;
   2613 
   2614 	if (!clips)
   2615 		return 0;
   2616 
   2617 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
   2618 	if (!cmd)
   2619 		return -ENOMEM;
   2620 
   2621 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
   2622 		box = &cmd->body.box;
   2623 
   2624 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
   2625 		cmd->header.size = sizeof(cmd->body);
   2626 		cmd->body.image.sid = res->id;
   2627 		cmd->body.image.face = 0;
   2628 		cmd->body.image.mipmap = 0;
   2629 
   2630 		if (clips->x1 > size->width || clips->x2 > size->width ||
   2631 		    clips->y1 > size->height || clips->y2 > size->height) {
   2632 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
   2633 			return -EINVAL;
   2634 		}
   2635 
   2636 		box->x = clips->x1;
   2637 		box->y = clips->y1;
   2638 		box->z = 0;
   2639 		box->w = clips->x2 - clips->x1;
   2640 		box->h = clips->y2 - clips->y1;
   2641 		box->d = 1;
   2642 
   2643 		copy_size += sizeof(*cmd);
   2644 	}
   2645 
   2646 	vmw_fifo_commit(dev_priv, copy_size);
   2647 
   2648 	return 0;
   2649 }
   2650 
   2651 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
   2652 			    unsigned unit,
   2653 			    u32 max_width,
   2654 			    u32 max_height,
   2655 			    struct drm_connector **p_con,
   2656 			    struct drm_crtc **p_crtc,
   2657 			    struct drm_display_mode **p_mode)
   2658 {
   2659 	struct drm_connector *con;
   2660 	struct vmw_display_unit *du;
   2661 	struct drm_display_mode *mode;
   2662 	int i = 0;
   2663 	int ret = 0;
   2664 
   2665 	mutex_lock(&dev_priv->dev->mode_config.mutex);
   2666 	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
   2667 			    head) {
   2668 		if (i == unit)
   2669 			break;
   2670 
   2671 		++i;
   2672 	}
   2673 
   2674 	if (i != unit) {
   2675 		DRM_ERROR("Could not find initial display unit.\n");
   2676 		ret = -EINVAL;
   2677 		goto out_unlock;
   2678 	}
   2679 
   2680 	if (list_empty(&con->modes))
   2681 		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
   2682 
   2683 	if (list_empty(&con->modes)) {
   2684 		DRM_ERROR("Could not find initial display mode.\n");
   2685 		ret = -EINVAL;
   2686 		goto out_unlock;
   2687 	}
   2688 
   2689 	du = vmw_connector_to_du(con);
   2690 	*p_con = con;
   2691 	*p_crtc = &du->crtc;
   2692 
   2693 	list_for_each_entry(mode, &con->modes, head) {
   2694 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
   2695 			break;
   2696 	}
   2697 
   2698 	if (mode->type & DRM_MODE_TYPE_PREFERRED)
   2699 		*p_mode = mode;
   2700 	else {
   2701 		WARN_ONCE(true, "Could not find initial preferred mode.\n");
   2702 		*p_mode = list_first_entry(&con->modes,
   2703 					   struct drm_display_mode,
   2704 					   head);
   2705 	}
   2706 
   2707  out_unlock:
   2708 	mutex_unlock(&dev_priv->dev->mode_config.mutex);
   2709 
   2710 	return ret;
   2711 }
   2712 
   2713 /**
   2714  * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
   2715  * property.
   2716  *
   2717  * @dev_priv: Pointer to a device private struct.
   2718  *
   2719  * Sets up the implicit placement property unless it's already set up.
   2720  */
   2721 void
   2722 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
   2723 {
   2724 	if (dev_priv->implicit_placement_property)
   2725 		return;
   2726 
   2727 	dev_priv->implicit_placement_property =
   2728 		drm_property_create_range(dev_priv->dev,
   2729 					  DRM_MODE_PROP_IMMUTABLE,
   2730 					  "implicit_placement", 0, 1);
   2731 }
   2732 
   2733 /**
   2734  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
   2735  *
   2736  * @dev: Pointer to the drm device
   2737  * Return: 0 on success. Negative error code on failure.
   2738  */
   2739 int vmw_kms_suspend(struct drm_device *dev)
   2740 {
   2741 	struct vmw_private *dev_priv = vmw_priv(dev);
   2742 
   2743 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
   2744 	if (IS_ERR(dev_priv->suspend_state)) {
   2745 		int ret = PTR_ERR(dev_priv->suspend_state);
   2746 
   2747 		DRM_ERROR("Failed kms suspend: %d\n", ret);
   2748 		dev_priv->suspend_state = NULL;
   2749 
   2750 		return ret;
   2751 	}
   2752 
   2753 	return 0;
   2754 }
   2755 
   2756 
   2757 /**
   2758  * vmw_kms_resume - Re-enable modesetting and restore state
   2759  *
   2760  * @dev: Pointer to the drm device
   2761  * Return: 0 on success. Negative error code on failure.
   2762  *
   2763  * State is resumed from a previous vmw_kms_suspend(). It's illegal
   2764  * to call this function without a previous vmw_kms_suspend().
   2765  */
   2766 int vmw_kms_resume(struct drm_device *dev)
   2767 {
   2768 	struct vmw_private *dev_priv = vmw_priv(dev);
   2769 	int ret;
   2770 
   2771 	if (WARN_ON(!dev_priv->suspend_state))
   2772 		return 0;
   2773 
   2774 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
   2775 	dev_priv->suspend_state = NULL;
   2776 
   2777 	return ret;
   2778 }
   2779 
   2780 /**
   2781  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
   2782  *
   2783  * @dev: Pointer to the drm device
   2784  */
   2785 void vmw_kms_lost_device(struct drm_device *dev)
   2786 {
   2787 	drm_atomic_helper_shutdown(dev);
   2788 }
   2789 
   2790 /**
   2791  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
   2792  * @update: The closure structure.
   2793  *
   2794  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
   2795  * update on display unit.
   2796  *
   2797  * Return: 0 on success or a negative error code on failure.
   2798  */
   2799 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
   2800 {
   2801 	struct drm_plane_state *state = update->plane->state;
   2802 	struct drm_plane_state *old_state = update->old_state;
   2803 	struct drm_atomic_helper_damage_iter iter;
   2804 	struct drm_rect clip;
   2805 	struct drm_rect bb;
   2806 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
   2807 	uint32_t reserved_size = 0;
   2808 	uint32_t submit_size = 0;
   2809 	uint32_t curr_size = 0;
   2810 	uint32_t num_hits = 0;
   2811 	void *cmd_start;
   2812 	char *cmd_next;
   2813 	int ret;
   2814 
   2815 	/*
   2816 	 * Iterate in advance to check if really need plane update and find the
   2817 	 * number of clips that actually are in plane src for fifo allocation.
   2818 	 */
   2819 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
   2820 	drm_atomic_for_each_plane_damage(&iter, &clip)
   2821 		num_hits++;
   2822 
   2823 	if (num_hits == 0)
   2824 		return 0;
   2825 
   2826 	if (update->vfb->bo) {
   2827 		struct vmw_framebuffer_bo *vfbbo =
   2828 			container_of(update->vfb, typeof(*vfbbo), base);
   2829 
   2830 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
   2831 					    update->cpu_blit);
   2832 	} else {
   2833 		struct vmw_framebuffer_surface *vfbs =
   2834 			container_of(update->vfb, typeof(*vfbs), base);
   2835 
   2836 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
   2837 						  0, VMW_RES_DIRTY_NONE, NULL,
   2838 						  NULL);
   2839 	}
   2840 
   2841 	if (ret)
   2842 		return ret;
   2843 
   2844 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
   2845 	if (ret)
   2846 		goto out_unref;
   2847 
   2848 	reserved_size = update->calc_fifo_size(update, num_hits);
   2849 	cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
   2850 	if (!cmd_start) {
   2851 		ret = -ENOMEM;
   2852 		goto out_revert;
   2853 	}
   2854 
   2855 	cmd_next = cmd_start;
   2856 
   2857 	if (update->post_prepare) {
   2858 		curr_size = update->post_prepare(update, cmd_next);
   2859 		cmd_next += curr_size;
   2860 		submit_size += curr_size;
   2861 	}
   2862 
   2863 	if (update->pre_clip) {
   2864 		curr_size = update->pre_clip(update, cmd_next, num_hits);
   2865 		cmd_next += curr_size;
   2866 		submit_size += curr_size;
   2867 	}
   2868 
   2869 	bb.x1 = INT_MAX;
   2870 	bb.y1 = INT_MAX;
   2871 	bb.x2 = INT_MIN;
   2872 	bb.y2 = INT_MIN;
   2873 
   2874 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
   2875 	drm_atomic_for_each_plane_damage(&iter, &clip) {
   2876 		uint32_t fb_x = clip.x1;
   2877 		uint32_t fb_y = clip.y1;
   2878 
   2879 		vmw_du_translate_to_crtc(state, &clip);
   2880 		if (update->clip) {
   2881 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
   2882 						 fb_y);
   2883 			cmd_next += curr_size;
   2884 			submit_size += curr_size;
   2885 		}
   2886 		bb.x1 = min_t(int, bb.x1, clip.x1);
   2887 		bb.y1 = min_t(int, bb.y1, clip.y1);
   2888 		bb.x2 = max_t(int, bb.x2, clip.x2);
   2889 		bb.y2 = max_t(int, bb.y2, clip.y2);
   2890 	}
   2891 
   2892 	curr_size = update->post_clip(update, cmd_next, &bb);
   2893 	submit_size += curr_size;
   2894 
   2895 	if (reserved_size < submit_size)
   2896 		submit_size = 0;
   2897 
   2898 	vmw_fifo_commit(update->dev_priv, submit_size);
   2899 
   2900 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
   2901 					 update->out_fence, NULL);
   2902 	return ret;
   2903 
   2904 out_revert:
   2905 	vmw_validation_revert(&val_ctx);
   2906 
   2907 out_unref:
   2908 	vmw_validation_unref_lists(&val_ctx);
   2909 	return ret;
   2910 }
   2911