Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_fb.c revision 1.1.1.3
      1 /*	$NetBSD: vmwgfx_fb.c,v 1.1.1.3 2018/08/27 01:35:00 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright  2007 David Airlie
      6  * Copyright  2009-2015 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fb.c,v 1.1.1.3 2018/08/27 01:35:00 riastradh Exp $");
     33 
     34 #include <linux/export.h>
     35 
     36 #include <drm/drmP.h>
     37 #include "vmwgfx_drv.h"
     38 #include "vmwgfx_kms.h"
     39 
     40 #include <drm/ttm/ttm_placement.h>
     41 
     42 #define VMW_DIRTY_DELAY (HZ / 30)
     43 
     44 struct vmw_fb_par {
     45 	struct vmw_private *vmw_priv;
     46 
     47 	void *vmalloc;
     48 
     49 	struct mutex bo_mutex;
     50 	struct vmw_dma_buffer *vmw_bo;
     51 	struct ttm_bo_kmap_obj map;
     52 	void *bo_ptr;
     53 	unsigned bo_size;
     54 	struct drm_framebuffer *set_fb;
     55 	struct drm_display_mode *set_mode;
     56 	u32 fb_x;
     57 	u32 fb_y;
     58 	bool bo_iowrite;
     59 
     60 	u32 pseudo_palette[17];
     61 
     62 	unsigned max_width;
     63 	unsigned max_height;
     64 
     65 	struct {
     66 		spinlock_t lock;
     67 		bool active;
     68 		unsigned x1;
     69 		unsigned y1;
     70 		unsigned x2;
     71 		unsigned y2;
     72 	} dirty;
     73 
     74 	struct drm_crtc *crtc;
     75 	struct drm_connector *con;
     76 	struct delayed_work local_work;
     77 };
     78 
     79 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
     80 			    unsigned blue, unsigned transp,
     81 			    struct fb_info *info)
     82 {
     83 	struct vmw_fb_par *par = info->par;
     84 	u32 *pal = par->pseudo_palette;
     85 
     86 	if (regno > 15) {
     87 		DRM_ERROR("Bad regno %u.\n", regno);
     88 		return 1;
     89 	}
     90 
     91 	switch (par->set_fb->depth) {
     92 	case 24:
     93 	case 32:
     94 		pal[regno] = ((red & 0xff00) << 8) |
     95 			      (green & 0xff00) |
     96 			     ((blue  & 0xff00) >> 8);
     97 		break;
     98 	default:
     99 		DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
    100 			  par->set_fb->bits_per_pixel);
    101 		return 1;
    102 	}
    103 
    104 	return 0;
    105 }
    106 
    107 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
    108 			    struct fb_info *info)
    109 {
    110 	int depth = var->bits_per_pixel;
    111 	struct vmw_fb_par *par = info->par;
    112 	struct vmw_private *vmw_priv = par->vmw_priv;
    113 
    114 	switch (var->bits_per_pixel) {
    115 	case 32:
    116 		depth = (var->transp.length > 0) ? 32 : 24;
    117 		break;
    118 	default:
    119 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
    120 		return -EINVAL;
    121 	}
    122 
    123 	switch (depth) {
    124 	case 24:
    125 		var->red.offset = 16;
    126 		var->green.offset = 8;
    127 		var->blue.offset = 0;
    128 		var->red.length = 8;
    129 		var->green.length = 8;
    130 		var->blue.length = 8;
    131 		var->transp.length = 0;
    132 		var->transp.offset = 0;
    133 		break;
    134 	case 32:
    135 		var->red.offset = 16;
    136 		var->green.offset = 8;
    137 		var->blue.offset = 0;
    138 		var->red.length = 8;
    139 		var->green.length = 8;
    140 		var->blue.length = 8;
    141 		var->transp.length = 8;
    142 		var->transp.offset = 24;
    143 		break;
    144 	default:
    145 		DRM_ERROR("Bad depth %u.\n", depth);
    146 		return -EINVAL;
    147 	}
    148 
    149 	if ((var->xoffset + var->xres) > par->max_width ||
    150 	    (var->yoffset + var->yres) > par->max_height) {
    151 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
    152 		return -EINVAL;
    153 	}
    154 
    155 	if (!vmw_kms_validate_mode_vram(vmw_priv,
    156 					var->xres * var->bits_per_pixel/8,
    157 					var->yoffset + var->yres)) {
    158 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
    159 		return -EINVAL;
    160 	}
    161 
    162 	return 0;
    163 }
    164 
    165 static int vmw_fb_blank(int blank, struct fb_info *info)
    166 {
    167 	return 0;
    168 }
    169 
    170 /*
    171  * Dirty code
    172  */
    173 
    174 static void vmw_fb_dirty_flush(struct work_struct *work)
    175 {
    176 	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
    177 					      local_work.work);
    178 	struct vmw_private *vmw_priv = par->vmw_priv;
    179 	struct fb_info *info = vmw_priv->fb_info;
    180 	unsigned long irq_flags;
    181 	s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
    182 	u32 cpp, max_x, max_y;
    183 	struct drm_clip_rect clip;
    184 	struct drm_framebuffer *cur_fb;
    185 	u8 *src_ptr, *dst_ptr;
    186 
    187 	if (vmw_priv->suspended)
    188 		return;
    189 
    190 	mutex_lock(&par->bo_mutex);
    191 	cur_fb = par->set_fb;
    192 	if (!cur_fb)
    193 		goto out_unlock;
    194 
    195 	spin_lock_irqsave(&par->dirty.lock, irq_flags);
    196 	if (!par->dirty.active) {
    197 		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
    198 		goto out_unlock;
    199 	}
    200 
    201 	/*
    202 	 * Handle panning when copying from vmalloc to framebuffer.
    203 	 * Clip dirty area to framebuffer.
    204 	 */
    205 	cpp = (cur_fb->bits_per_pixel + 7) / 8;
    206 	max_x = par->fb_x + cur_fb->width;
    207 	max_y = par->fb_y + cur_fb->height;
    208 
    209 	dst_x1 = par->dirty.x1 - par->fb_x;
    210 	dst_y1 = par->dirty.y1 - par->fb_y;
    211 	dst_x1 = max_t(s32, dst_x1, 0);
    212 	dst_y1 = max_t(s32, dst_y1, 0);
    213 
    214 	dst_x2 = par->dirty.x2 - par->fb_x;
    215 	dst_y2 = par->dirty.y2 - par->fb_y;
    216 	dst_x2 = min_t(s32, dst_x2, max_x);
    217 	dst_y2 = min_t(s32, dst_y2, max_y);
    218 	w = dst_x2 - dst_x1;
    219 	h = dst_y2 - dst_y1;
    220 	w = max_t(s32, 0, w);
    221 	h = max_t(s32, 0, h);
    222 
    223 	par->dirty.x1 = par->dirty.x2 = 0;
    224 	par->dirty.y1 = par->dirty.y2 = 0;
    225 	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
    226 
    227 	if (w && h) {
    228 		dst_ptr = (u8 *)par->bo_ptr  +
    229 			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
    230 		src_ptr = (u8 *)par->vmalloc +
    231 			((dst_y1 + par->fb_y) * info->fix.line_length +
    232 			 (dst_x1 + par->fb_x) * cpp);
    233 
    234 		while (h-- > 0) {
    235 			memcpy(dst_ptr, src_ptr, w*cpp);
    236 			dst_ptr += par->set_fb->pitches[0];
    237 			src_ptr += info->fix.line_length;
    238 		}
    239 
    240 		clip.x1 = dst_x1;
    241 		clip.x2 = dst_x2;
    242 		clip.y1 = dst_y1;
    243 		clip.y2 = dst_y2;
    244 
    245 		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
    246 						       &clip, 1));
    247 		vmw_fifo_flush(vmw_priv, false);
    248 	}
    249 out_unlock:
    250 	mutex_unlock(&par->bo_mutex);
    251 }
    252 
    253 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
    254 			      unsigned x1, unsigned y1,
    255 			      unsigned width, unsigned height)
    256 {
    257 	unsigned long flags;
    258 	unsigned x2 = x1 + width;
    259 	unsigned y2 = y1 + height;
    260 
    261 	spin_lock_irqsave(&par->dirty.lock, flags);
    262 	if (par->dirty.x1 == par->dirty.x2) {
    263 		par->dirty.x1 = x1;
    264 		par->dirty.y1 = y1;
    265 		par->dirty.x2 = x2;
    266 		par->dirty.y2 = y2;
    267 		/* if we are active start the dirty work
    268 		 * we share the work with the defio system */
    269 		if (par->dirty.active)
    270 			schedule_delayed_work(&par->local_work,
    271 					      VMW_DIRTY_DELAY);
    272 	} else {
    273 		if (x1 < par->dirty.x1)
    274 			par->dirty.x1 = x1;
    275 		if (y1 < par->dirty.y1)
    276 			par->dirty.y1 = y1;
    277 		if (x2 > par->dirty.x2)
    278 			par->dirty.x2 = x2;
    279 		if (y2 > par->dirty.y2)
    280 			par->dirty.y2 = y2;
    281 	}
    282 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    283 }
    284 
    285 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
    286 			      struct fb_info *info)
    287 {
    288 	struct vmw_fb_par *par = info->par;
    289 
    290 	if ((var->xoffset + var->xres) > var->xres_virtual ||
    291 	    (var->yoffset + var->yres) > var->yres_virtual) {
    292 		DRM_ERROR("Requested panning can not fit in framebuffer\n");
    293 		return -EINVAL;
    294 	}
    295 
    296 	mutex_lock(&par->bo_mutex);
    297 	par->fb_x = var->xoffset;
    298 	par->fb_y = var->yoffset;
    299 	if (par->set_fb)
    300 		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
    301 				  par->set_fb->height);
    302 	mutex_unlock(&par->bo_mutex);
    303 
    304 	return 0;
    305 }
    306 
    307 static void vmw_deferred_io(struct fb_info *info,
    308 			    struct list_head *pagelist)
    309 {
    310 	struct vmw_fb_par *par = info->par;
    311 	unsigned long start, end, min, max;
    312 	unsigned long flags;
    313 	struct page *page;
    314 	int y1, y2;
    315 
    316 	min = ULONG_MAX;
    317 	max = 0;
    318 	list_for_each_entry(page, pagelist, lru) {
    319 		start = page->index << PAGE_SHIFT;
    320 		end = start + PAGE_SIZE - 1;
    321 		min = min(min, start);
    322 		max = max(max, end);
    323 	}
    324 
    325 	if (min < max) {
    326 		y1 = min / info->fix.line_length;
    327 		y2 = (max / info->fix.line_length) + 1;
    328 
    329 		spin_lock_irqsave(&par->dirty.lock, flags);
    330 		par->dirty.x1 = 0;
    331 		par->dirty.y1 = y1;
    332 		par->dirty.x2 = info->var.xres;
    333 		par->dirty.y2 = y2;
    334 		spin_unlock_irqrestore(&par->dirty.lock, flags);
    335 
    336 		/*
    337 		 * Since we've already waited on this work once, try to
    338 		 * execute asap.
    339 		 */
    340 		cancel_delayed_work(&par->local_work);
    341 		schedule_delayed_work(&par->local_work, 0);
    342 	}
    343 };
    344 
    345 static struct fb_deferred_io vmw_defio = {
    346 	.delay		= VMW_DIRTY_DELAY,
    347 	.deferred_io	= vmw_deferred_io,
    348 };
    349 
    350 /*
    351  * Draw code
    352  */
    353 
    354 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
    355 {
    356 	cfb_fillrect(info, rect);
    357 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
    358 			  rect->width, rect->height);
    359 }
    360 
    361 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
    362 {
    363 	cfb_copyarea(info, region);
    364 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
    365 			  region->width, region->height);
    366 }
    367 
    368 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
    369 {
    370 	cfb_imageblit(info, image);
    371 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
    372 			  image->width, image->height);
    373 }
    374 
    375 /*
    376  * Bring up code
    377  */
    378 
    379 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
    380 			    size_t size, struct vmw_dma_buffer **out)
    381 {
    382 	struct vmw_dma_buffer *vmw_bo;
    383 	int ret;
    384 
    385 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
    386 
    387 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
    388 	if (!vmw_bo) {
    389 		ret = -ENOMEM;
    390 		goto err_unlock;
    391 	}
    392 
    393 	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
    394 			      &vmw_sys_placement,
    395 			      false,
    396 			      &vmw_dmabuf_bo_free);
    397 	if (unlikely(ret != 0))
    398 		goto err_unlock; /* init frees the buffer on failure */
    399 
    400 	*out = vmw_bo;
    401 	ttm_write_unlock(&vmw_priv->reservation_sem);
    402 
    403 	return 0;
    404 
    405 err_unlock:
    406 	ttm_write_unlock(&vmw_priv->reservation_sem);
    407 	return ret;
    408 }
    409 
    410 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
    411 				int *depth)
    412 {
    413 	switch (var->bits_per_pixel) {
    414 	case 32:
    415 		*depth = (var->transp.length > 0) ? 32 : 24;
    416 		break;
    417 	default:
    418 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
    419 		return -EINVAL;
    420 	}
    421 
    422 	return 0;
    423 }
    424 
    425 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
    426 			     bool detach_bo,
    427 			     bool unref_bo)
    428 {
    429 	struct drm_framebuffer *cur_fb = par->set_fb;
    430 	int ret;
    431 
    432 	/* Detach the KMS framebuffer from crtcs */
    433 	if (par->set_mode) {
    434 		struct drm_mode_set set;
    435 
    436 		set.crtc = par->crtc;
    437 		set.x = 0;
    438 		set.y = 0;
    439 		set.mode = NULL;
    440 		set.fb = NULL;
    441 		set.num_connectors = 0;
    442 		set.connectors = &par->con;
    443 		ret = drm_mode_set_config_internal(&set);
    444 		if (ret) {
    445 			DRM_ERROR("Could not unset a mode.\n");
    446 			return ret;
    447 		}
    448 		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
    449 		par->set_mode = NULL;
    450 	}
    451 
    452 	if (cur_fb) {
    453 		drm_framebuffer_unreference(cur_fb);
    454 		par->set_fb = NULL;
    455 	}
    456 
    457 	if (par->vmw_bo && detach_bo) {
    458 		if (par->bo_ptr) {
    459 			ttm_bo_kunmap(&par->map);
    460 			par->bo_ptr = NULL;
    461 		}
    462 		if (unref_bo)
    463 			vmw_dmabuf_unreference(&par->vmw_bo);
    464 		else
    465 			vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
    466 	}
    467 
    468 	return 0;
    469 }
    470 
    471 static int vmw_fb_kms_framebuffer(struct fb_info *info)
    472 {
    473 	struct drm_mode_fb_cmd mode_cmd;
    474 	struct vmw_fb_par *par = info->par;
    475 	struct fb_var_screeninfo *var = &info->var;
    476 	struct drm_framebuffer *cur_fb;
    477 	struct vmw_framebuffer *vfb;
    478 	int ret = 0;
    479 	size_t new_bo_size;
    480 
    481 	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
    482 	if (ret)
    483 		return ret;
    484 
    485 	mode_cmd.width = var->xres;
    486 	mode_cmd.height = var->yres;
    487 	mode_cmd.bpp = var->bits_per_pixel;
    488 	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
    489 
    490 	cur_fb = par->set_fb;
    491 	if (cur_fb && cur_fb->width == mode_cmd.width &&
    492 	    cur_fb->height == mode_cmd.height &&
    493 	    cur_fb->bits_per_pixel == mode_cmd.bpp &&
    494 	    cur_fb->depth == mode_cmd.depth &&
    495 	    cur_fb->pitches[0] == mode_cmd.pitch)
    496 		return 0;
    497 
    498 	/* Need new buffer object ? */
    499 	new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
    500 	ret = vmw_fb_kms_detach(par,
    501 				par->bo_size < new_bo_size ||
    502 				par->bo_size > 2*new_bo_size,
    503 				true);
    504 	if (ret)
    505 		return ret;
    506 
    507 	if (!par->vmw_bo) {
    508 		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
    509 				       &par->vmw_bo);
    510 		if (ret) {
    511 			DRM_ERROR("Failed creating a buffer object for "
    512 				  "fbdev.\n");
    513 			return ret;
    514 		}
    515 		par->bo_size = new_bo_size;
    516 	}
    517 
    518 	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
    519 				      true, &mode_cmd);
    520 	if (IS_ERR(vfb))
    521 		return PTR_ERR(vfb);
    522 
    523 	par->set_fb = &vfb->base;
    524 
    525 	return 0;
    526 }
    527 
    528 static int vmw_fb_set_par(struct fb_info *info)
    529 {
    530 	struct vmw_fb_par *par = info->par;
    531 	struct vmw_private *vmw_priv = par->vmw_priv;
    532 	struct drm_mode_set set;
    533 	struct fb_var_screeninfo *var = &info->var;
    534 	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
    535 		DRM_MODE_TYPE_DRIVER,
    536 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    537 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
    538 	};
    539 	struct drm_display_mode *old_mode;
    540 	struct drm_display_mode *mode;
    541 	int ret;
    542 
    543 	old_mode = par->set_mode;
    544 	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
    545 	if (!mode) {
    546 		DRM_ERROR("Could not create new fb mode.\n");
    547 		return -ENOMEM;
    548 	}
    549 
    550 	mode->hdisplay = var->xres;
    551 	mode->vdisplay = var->yres;
    552 	vmw_guess_mode_timing(mode);
    553 
    554 	if (old_mode && drm_mode_equal(old_mode, mode)) {
    555 		drm_mode_destroy(vmw_priv->dev, mode);
    556 		mode = old_mode;
    557 		old_mode = NULL;
    558 	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
    559 					mode->hdisplay *
    560 					DIV_ROUND_UP(var->bits_per_pixel, 8),
    561 					mode->vdisplay)) {
    562 		drm_mode_destroy(vmw_priv->dev, mode);
    563 		return -EINVAL;
    564 	}
    565 
    566 	mutex_lock(&par->bo_mutex);
    567 	drm_modeset_lock_all(vmw_priv->dev);
    568 	ret = vmw_fb_kms_framebuffer(info);
    569 	if (ret)
    570 		goto out_unlock;
    571 
    572 	par->fb_x = var->xoffset;
    573 	par->fb_y = var->yoffset;
    574 
    575 	set.crtc = par->crtc;
    576 	set.x = 0;
    577 	set.y = 0;
    578 	set.mode = mode;
    579 	set.fb = par->set_fb;
    580 	set.num_connectors = 1;
    581 	set.connectors = &par->con;
    582 
    583 	ret = drm_mode_set_config_internal(&set);
    584 	if (ret)
    585 		goto out_unlock;
    586 
    587 	if (!par->bo_ptr) {
    588 		struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
    589 
    590 		/*
    591 		 * Pin before mapping. Since we don't know in what placement
    592 		 * to pin, call into KMS to do it for us.
    593 		 */
    594 		ret = vfb->pin(vfb);
    595 		if (ret) {
    596 			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
    597 			goto out_unlock;
    598 		}
    599 
    600 		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
    601 				  par->vmw_bo->base.num_pages, &par->map);
    602 		if (ret) {
    603 			vfb->unpin(vfb);
    604 			DRM_ERROR("Could not map the fbdev framebuffer.\n");
    605 			goto out_unlock;
    606 		}
    607 
    608 		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
    609 	}
    610 
    611 
    612 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
    613 			  par->set_fb->width, par->set_fb->height);
    614 
    615 	/* If there already was stuff dirty we wont
    616 	 * schedule a new work, so lets do it now */
    617 
    618 	schedule_delayed_work(&par->local_work, 0);
    619 
    620 out_unlock:
    621 	if (old_mode)
    622 		drm_mode_destroy(vmw_priv->dev, old_mode);
    623 	par->set_mode = mode;
    624 
    625 	drm_modeset_unlock_all(vmw_priv->dev);
    626 	mutex_unlock(&par->bo_mutex);
    627 
    628 	return ret;
    629 }
    630 
    631 
    632 static struct fb_ops vmw_fb_ops = {
    633 	.owner = THIS_MODULE,
    634 	.fb_check_var = vmw_fb_check_var,
    635 	.fb_set_par = vmw_fb_set_par,
    636 	.fb_setcolreg = vmw_fb_setcolreg,
    637 	.fb_fillrect = vmw_fb_fillrect,
    638 	.fb_copyarea = vmw_fb_copyarea,
    639 	.fb_imageblit = vmw_fb_imageblit,
    640 	.fb_pan_display = vmw_fb_pan_display,
    641 	.fb_blank = vmw_fb_blank,
    642 };
    643 
    644 int vmw_fb_init(struct vmw_private *vmw_priv)
    645 {
    646 	struct device *device = &vmw_priv->dev->pdev->dev;
    647 	struct vmw_fb_par *par;
    648 	struct fb_info *info;
    649 	unsigned fb_width, fb_height;
    650 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
    651 	struct drm_display_mode *init_mode;
    652 	int ret;
    653 
    654 	fb_bpp = 32;
    655 	fb_depth = 24;
    656 
    657 	/* XXX As shouldn't these be as well. */
    658 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
    659 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
    660 
    661 	fb_pitch = fb_width * fb_bpp / 8;
    662 	fb_size = fb_pitch * fb_height;
    663 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
    664 
    665 	info = framebuffer_alloc(sizeof(*par), device);
    666 	if (!info)
    667 		return -ENOMEM;
    668 
    669 	/*
    670 	 * Par
    671 	 */
    672 	vmw_priv->fb_info = info;
    673 	par = info->par;
    674 	memset(par, 0, sizeof(*par));
    675 	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
    676 	par->vmw_priv = vmw_priv;
    677 	par->vmalloc = NULL;
    678 	par->max_width = fb_width;
    679 	par->max_height = fb_height;
    680 
    681 	drm_modeset_lock_all(vmw_priv->dev);
    682 	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
    683 				      par->max_height, &par->con,
    684 				      &par->crtc, &init_mode);
    685 	if (ret) {
    686 		drm_modeset_unlock_all(vmw_priv->dev);
    687 		goto err_kms;
    688 	}
    689 
    690 	info->var.xres = init_mode->hdisplay;
    691 	info->var.yres = init_mode->vdisplay;
    692 	drm_modeset_unlock_all(vmw_priv->dev);
    693 
    694 	/*
    695 	 * Create buffers and alloc memory
    696 	 */
    697 	par->vmalloc = vzalloc(fb_size);
    698 	if (unlikely(par->vmalloc == NULL)) {
    699 		ret = -ENOMEM;
    700 		goto err_free;
    701 	}
    702 
    703 	/*
    704 	 * Fixed and var
    705 	 */
    706 	strcpy(info->fix.id, "svgadrmfb");
    707 	info->fix.type = FB_TYPE_PACKED_PIXELS;
    708 	info->fix.visual = FB_VISUAL_TRUECOLOR;
    709 	info->fix.type_aux = 0;
    710 	info->fix.xpanstep = 1; /* doing it in hw */
    711 	info->fix.ypanstep = 1; /* doing it in hw */
    712 	info->fix.ywrapstep = 0;
    713 	info->fix.accel = FB_ACCEL_NONE;
    714 	info->fix.line_length = fb_pitch;
    715 
    716 	info->fix.smem_start = 0;
    717 	info->fix.smem_len = fb_size;
    718 
    719 	info->pseudo_palette = par->pseudo_palette;
    720 	info->screen_base = (char __iomem *)par->vmalloc;
    721 	info->screen_size = fb_size;
    722 
    723 	info->flags = FBINFO_DEFAULT;
    724 	info->fbops = &vmw_fb_ops;
    725 
    726 	/* 24 depth per default */
    727 	info->var.red.offset = 16;
    728 	info->var.green.offset = 8;
    729 	info->var.blue.offset = 0;
    730 	info->var.red.length = 8;
    731 	info->var.green.length = 8;
    732 	info->var.blue.length = 8;
    733 	info->var.transp.offset = 0;
    734 	info->var.transp.length = 0;
    735 
    736 	info->var.xres_virtual = fb_width;
    737 	info->var.yres_virtual = fb_height;
    738 	info->var.bits_per_pixel = fb_bpp;
    739 	info->var.xoffset = 0;
    740 	info->var.yoffset = 0;
    741 	info->var.activate = FB_ACTIVATE_NOW;
    742 	info->var.height = -1;
    743 	info->var.width = -1;
    744 
    745 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
    746 	info->apertures = alloc_apertures(1);
    747 	if (!info->apertures) {
    748 		ret = -ENOMEM;
    749 		goto err_aper;
    750 	}
    751 	info->apertures->ranges[0].base = vmw_priv->vram_start;
    752 	info->apertures->ranges[0].size = vmw_priv->vram_size;
    753 
    754 	/*
    755 	 * Dirty & Deferred IO
    756 	 */
    757 	par->dirty.x1 = par->dirty.x2 = 0;
    758 	par->dirty.y1 = par->dirty.y2 = 0;
    759 	par->dirty.active = true;
    760 	spin_lock_init(&par->dirty.lock);
    761 	mutex_init(&par->bo_mutex);
    762 	info->fbdefio = &vmw_defio;
    763 	fb_deferred_io_init(info);
    764 
    765 	ret = register_framebuffer(info);
    766 	if (unlikely(ret != 0))
    767 		goto err_defio;
    768 
    769 	vmw_fb_set_par(info);
    770 
    771 	return 0;
    772 
    773 err_defio:
    774 	fb_deferred_io_cleanup(info);
    775 err_aper:
    776 err_free:
    777 	vfree(par->vmalloc);
    778 err_kms:
    779 	framebuffer_release(info);
    780 	vmw_priv->fb_info = NULL;
    781 
    782 	return ret;
    783 }
    784 
    785 int vmw_fb_close(struct vmw_private *vmw_priv)
    786 {
    787 	struct fb_info *info;
    788 	struct vmw_fb_par *par;
    789 
    790 	if (!vmw_priv->fb_info)
    791 		return 0;
    792 
    793 	info = vmw_priv->fb_info;
    794 	par = info->par;
    795 
    796 	/* ??? order */
    797 	fb_deferred_io_cleanup(info);
    798 	cancel_delayed_work_sync(&par->local_work);
    799 	unregister_framebuffer(info);
    800 
    801 	(void) vmw_fb_kms_detach(par, true, true);
    802 
    803 	vfree(par->vmalloc);
    804 	framebuffer_release(info);
    805 
    806 	return 0;
    807 }
    808 
    809 int vmw_fb_off(struct vmw_private *vmw_priv)
    810 {
    811 	struct fb_info *info;
    812 	struct vmw_fb_par *par;
    813 	unsigned long flags;
    814 
    815 	if (!vmw_priv->fb_info)
    816 		return -EINVAL;
    817 
    818 	info = vmw_priv->fb_info;
    819 	par = info->par;
    820 
    821 	spin_lock_irqsave(&par->dirty.lock, flags);
    822 	par->dirty.active = false;
    823 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    824 
    825 	flush_delayed_work(&info->deferred_work);
    826 	flush_delayed_work(&par->local_work);
    827 
    828 	mutex_lock(&par->bo_mutex);
    829 	drm_modeset_lock_all(vmw_priv->dev);
    830 	(void) vmw_fb_kms_detach(par, true, false);
    831 	drm_modeset_unlock_all(vmw_priv->dev);
    832 	mutex_unlock(&par->bo_mutex);
    833 
    834 	return 0;
    835 }
    836 
    837 int vmw_fb_on(struct vmw_private *vmw_priv)
    838 {
    839 	struct fb_info *info;
    840 	struct vmw_fb_par *par;
    841 	unsigned long flags;
    842 
    843 	if (!vmw_priv->fb_info)
    844 		return -EINVAL;
    845 
    846 	info = vmw_priv->fb_info;
    847 	par = info->par;
    848 
    849 	vmw_fb_set_par(info);
    850 	spin_lock_irqsave(&par->dirty.lock, flags);
    851 	par->dirty.active = true;
    852 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    853 
    854 	return 0;
    855 }
    856