Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_fb.c revision 1.1.1.2
      1 /**************************************************************************
      2  *
      3  * Copyright  2007 David Airlie
      4  * Copyright  2009 VMware, Inc., Palo Alto, CA., USA
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial portions
     17  * of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  *
     27  **************************************************************************/
     28 
     29 #include <linux/export.h>
     30 
     31 #include <drm/drmP.h>
     32 #include "vmwgfx_drv.h"
     33 
     34 #include <drm/ttm/ttm_placement.h>
     35 
     36 #define VMW_DIRTY_DELAY (HZ / 30)
     37 
     38 struct vmw_fb_par {
     39 	struct vmw_private *vmw_priv;
     40 
     41 	void *vmalloc;
     42 
     43 	struct vmw_dma_buffer *vmw_bo;
     44 	struct ttm_bo_kmap_obj map;
     45 
     46 	u32 pseudo_palette[17];
     47 
     48 	unsigned depth;
     49 	unsigned bpp;
     50 
     51 	unsigned max_width;
     52 	unsigned max_height;
     53 
     54 	void *bo_ptr;
     55 	unsigned bo_size;
     56 	bool bo_iowrite;
     57 
     58 	struct {
     59 		spinlock_t lock;
     60 		bool active;
     61 		unsigned x1;
     62 		unsigned y1;
     63 		unsigned x2;
     64 		unsigned y2;
     65 	} dirty;
     66 };
     67 
     68 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
     69 			    unsigned blue, unsigned transp,
     70 			    struct fb_info *info)
     71 {
     72 	struct vmw_fb_par *par = info->par;
     73 	u32 *pal = par->pseudo_palette;
     74 
     75 	if (regno > 15) {
     76 		DRM_ERROR("Bad regno %u.\n", regno);
     77 		return 1;
     78 	}
     79 
     80 	switch (par->depth) {
     81 	case 24:
     82 	case 32:
     83 		pal[regno] = ((red & 0xff00) << 8) |
     84 			      (green & 0xff00) |
     85 			     ((blue  & 0xff00) >> 8);
     86 		break;
     87 	default:
     88 		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
     89 		return 1;
     90 	}
     91 
     92 	return 0;
     93 }
     94 
     95 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
     96 			    struct fb_info *info)
     97 {
     98 	int depth = var->bits_per_pixel;
     99 	struct vmw_fb_par *par = info->par;
    100 	struct vmw_private *vmw_priv = par->vmw_priv;
    101 
    102 	switch (var->bits_per_pixel) {
    103 	case 32:
    104 		depth = (var->transp.length > 0) ? 32 : 24;
    105 		break;
    106 	default:
    107 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
    108 		return -EINVAL;
    109 	}
    110 
    111 	switch (depth) {
    112 	case 24:
    113 		var->red.offset = 16;
    114 		var->green.offset = 8;
    115 		var->blue.offset = 0;
    116 		var->red.length = 8;
    117 		var->green.length = 8;
    118 		var->blue.length = 8;
    119 		var->transp.length = 0;
    120 		var->transp.offset = 0;
    121 		break;
    122 	case 32:
    123 		var->red.offset = 16;
    124 		var->green.offset = 8;
    125 		var->blue.offset = 0;
    126 		var->red.length = 8;
    127 		var->green.length = 8;
    128 		var->blue.length = 8;
    129 		var->transp.length = 8;
    130 		var->transp.offset = 24;
    131 		break;
    132 	default:
    133 		DRM_ERROR("Bad depth %u.\n", depth);
    134 		return -EINVAL;
    135 	}
    136 
    137 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
    138 	    (var->xoffset != 0 || var->yoffset != 0)) {
    139 		DRM_ERROR("Can not handle panning without display topology\n");
    140 		return -EINVAL;
    141 	}
    142 
    143 	if ((var->xoffset + var->xres) > par->max_width ||
    144 	    (var->yoffset + var->yres) > par->max_height) {
    145 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
    146 		return -EINVAL;
    147 	}
    148 
    149 	if (!vmw_kms_validate_mode_vram(vmw_priv,
    150 					var->xres * var->bits_per_pixel/8,
    151 					var->yoffset + var->yres)) {
    152 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
    153 		return -EINVAL;
    154 	}
    155 
    156 	return 0;
    157 }
    158 
    159 static int vmw_fb_set_par(struct fb_info *info)
    160 {
    161 	struct vmw_fb_par *par = info->par;
    162 	struct vmw_private *vmw_priv = par->vmw_priv;
    163 	int ret;
    164 
    165 	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
    166 
    167 	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
    168 				 info->fix.line_length,
    169 				 par->bpp, par->depth);
    170 	if (ret)
    171 		return ret;
    172 
    173 	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
    174 		/* TODO check if pitch and offset changes */
    175 		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
    176 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
    177 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
    178 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
    179 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
    180 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
    181 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
    182 		vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
    183 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
    184 	}
    185 
    186 	/* This is really helpful since if this fails the user
    187 	 * can probably not see anything on the screen.
    188 	 */
    189 	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
    190 
    191 	return 0;
    192 }
    193 
    194 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
    195 			      struct fb_info *info)
    196 {
    197 	return 0;
    198 }
    199 
    200 static int vmw_fb_blank(int blank, struct fb_info *info)
    201 {
    202 	return 0;
    203 }
    204 
    205 /*
    206  * Dirty code
    207  */
    208 
    209 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
    210 {
    211 	struct vmw_private *vmw_priv = par->vmw_priv;
    212 	struct fb_info *info = vmw_priv->fb_info;
    213 	int stride = (info->fix.line_length / 4);
    214 	int *src = (int *)info->screen_base;
    215 	__le32 __iomem *vram_mem = par->bo_ptr;
    216 	unsigned long flags;
    217 	unsigned x, y, w, h;
    218 	int i, k;
    219 	struct {
    220 		uint32_t header;
    221 		SVGAFifoCmdUpdate body;
    222 	} *cmd;
    223 
    224 	if (vmw_priv->suspended)
    225 		return;
    226 
    227 	spin_lock_irqsave(&par->dirty.lock, flags);
    228 	if (!par->dirty.active) {
    229 		spin_unlock_irqrestore(&par->dirty.lock, flags);
    230 		return;
    231 	}
    232 	x = par->dirty.x1;
    233 	y = par->dirty.y1;
    234 	w = min(par->dirty.x2, info->var.xres) - x;
    235 	h = min(par->dirty.y2, info->var.yres) - y;
    236 	par->dirty.x1 = par->dirty.x2 = 0;
    237 	par->dirty.y1 = par->dirty.y2 = 0;
    238 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    239 
    240 	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
    241 		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
    242 			iowrite32(src[k], vram_mem + k);
    243 	}
    244 
    245 #if 0
    246 	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
    247 #endif
    248 
    249 	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
    250 	if (unlikely(cmd == NULL)) {
    251 		DRM_ERROR("Fifo reserve failed.\n");
    252 		return;
    253 	}
    254 
    255 	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
    256 	cmd->body.x = cpu_to_le32(x);
    257 	cmd->body.y = cpu_to_le32(y);
    258 	cmd->body.width = cpu_to_le32(w);
    259 	cmd->body.height = cpu_to_le32(h);
    260 	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
    261 }
    262 
    263 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
    264 			      unsigned x1, unsigned y1,
    265 			      unsigned width, unsigned height)
    266 {
    267 	struct fb_info *info = par->vmw_priv->fb_info;
    268 	unsigned long flags;
    269 	unsigned x2 = x1 + width;
    270 	unsigned y2 = y1 + height;
    271 
    272 	spin_lock_irqsave(&par->dirty.lock, flags);
    273 	if (par->dirty.x1 == par->dirty.x2) {
    274 		par->dirty.x1 = x1;
    275 		par->dirty.y1 = y1;
    276 		par->dirty.x2 = x2;
    277 		par->dirty.y2 = y2;
    278 		/* if we are active start the dirty work
    279 		 * we share the work with the defio system */
    280 		if (par->dirty.active)
    281 			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
    282 	} else {
    283 		if (x1 < par->dirty.x1)
    284 			par->dirty.x1 = x1;
    285 		if (y1 < par->dirty.y1)
    286 			par->dirty.y1 = y1;
    287 		if (x2 > par->dirty.x2)
    288 			par->dirty.x2 = x2;
    289 		if (y2 > par->dirty.y2)
    290 			par->dirty.y2 = y2;
    291 	}
    292 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    293 }
    294 
    295 static void vmw_deferred_io(struct fb_info *info,
    296 			    struct list_head *pagelist)
    297 {
    298 	struct vmw_fb_par *par = info->par;
    299 	unsigned long start, end, min, max;
    300 	unsigned long flags;
    301 	struct page *page;
    302 	int y1, y2;
    303 
    304 	min = ULONG_MAX;
    305 	max = 0;
    306 	list_for_each_entry(page, pagelist, lru) {
    307 		start = page->index << PAGE_SHIFT;
    308 		end = start + PAGE_SIZE - 1;
    309 		min = min(min, start);
    310 		max = max(max, end);
    311 	}
    312 
    313 	if (min < max) {
    314 		y1 = min / info->fix.line_length;
    315 		y2 = (max / info->fix.line_length) + 1;
    316 
    317 		spin_lock_irqsave(&par->dirty.lock, flags);
    318 		par->dirty.x1 = 0;
    319 		par->dirty.y1 = y1;
    320 		par->dirty.x2 = info->var.xres;
    321 		par->dirty.y2 = y2;
    322 		spin_unlock_irqrestore(&par->dirty.lock, flags);
    323 	}
    324 
    325 	vmw_fb_dirty_flush(par);
    326 };
    327 
    328 struct fb_deferred_io vmw_defio = {
    329 	.delay		= VMW_DIRTY_DELAY,
    330 	.deferred_io	= vmw_deferred_io,
    331 };
    332 
    333 /*
    334  * Draw code
    335  */
    336 
    337 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
    338 {
    339 	cfb_fillrect(info, rect);
    340 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
    341 			  rect->width, rect->height);
    342 }
    343 
    344 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
    345 {
    346 	cfb_copyarea(info, region);
    347 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
    348 			  region->width, region->height);
    349 }
    350 
    351 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
    352 {
    353 	cfb_imageblit(info, image);
    354 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
    355 			  image->width, image->height);
    356 }
    357 
    358 /*
    359  * Bring up code
    360  */
    361 
    362 static struct fb_ops vmw_fb_ops = {
    363 	.owner = THIS_MODULE,
    364 	.fb_check_var = vmw_fb_check_var,
    365 	.fb_set_par = vmw_fb_set_par,
    366 	.fb_setcolreg = vmw_fb_setcolreg,
    367 	.fb_fillrect = vmw_fb_fillrect,
    368 	.fb_copyarea = vmw_fb_copyarea,
    369 	.fb_imageblit = vmw_fb_imageblit,
    370 	.fb_pan_display = vmw_fb_pan_display,
    371 	.fb_blank = vmw_fb_blank,
    372 };
    373 
    374 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
    375 			    size_t size, struct vmw_dma_buffer **out)
    376 {
    377 	struct vmw_dma_buffer *vmw_bo;
    378 	struct ttm_placement ne_placement = vmw_vram_ne_placement;
    379 	int ret;
    380 
    381 	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    382 
    383 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
    384 
    385 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
    386 	if (!vmw_bo) {
    387 		ret = -ENOMEM;
    388 		goto err_unlock;
    389 	}
    390 
    391 	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
    392 			      &ne_placement,
    393 			      false,
    394 			      &vmw_dmabuf_bo_free);
    395 	if (unlikely(ret != 0))
    396 		goto err_unlock; /* init frees the buffer on failure */
    397 
    398 	*out = vmw_bo;
    399 
    400 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
    401 
    402 	return 0;
    403 
    404 err_unlock:
    405 	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
    406 	return ret;
    407 }
    408 
    409 int vmw_fb_init(struct vmw_private *vmw_priv)
    410 {
    411 	struct device *device = &vmw_priv->dev->pdev->dev;
    412 	struct vmw_fb_par *par;
    413 	struct fb_info *info;
    414 	unsigned initial_width, initial_height;
    415 	unsigned fb_width, fb_height;
    416 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
    417 	int ret;
    418 
    419 	fb_bpp = 32;
    420 	fb_depth = 24;
    421 
    422 	/* XXX As shouldn't these be as well. */
    423 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
    424 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
    425 
    426 	initial_width = min(vmw_priv->initial_width, fb_width);
    427 	initial_height = min(vmw_priv->initial_height, fb_height);
    428 
    429 	fb_pitch = fb_width * fb_bpp / 8;
    430 	fb_size = fb_pitch * fb_height;
    431 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
    432 
    433 	info = framebuffer_alloc(sizeof(*par), device);
    434 	if (!info)
    435 		return -ENOMEM;
    436 
    437 	/*
    438 	 * Par
    439 	 */
    440 	vmw_priv->fb_info = info;
    441 	par = info->par;
    442 	par->vmw_priv = vmw_priv;
    443 	par->depth = fb_depth;
    444 	par->bpp = fb_bpp;
    445 	par->vmalloc = NULL;
    446 	par->max_width = fb_width;
    447 	par->max_height = fb_height;
    448 
    449 	/*
    450 	 * Create buffers and alloc memory
    451 	 */
    452 	par->vmalloc = vmalloc(fb_size);
    453 	if (unlikely(par->vmalloc == NULL)) {
    454 		ret = -ENOMEM;
    455 		goto err_free;
    456 	}
    457 
    458 	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
    459 	if (unlikely(ret != 0))
    460 		goto err_free;
    461 
    462 	ret = ttm_bo_kmap(&par->vmw_bo->base,
    463 			  0,
    464 			  par->vmw_bo->base.num_pages,
    465 			  &par->map);
    466 	if (unlikely(ret != 0))
    467 		goto err_unref;
    468 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
    469 	par->bo_size = fb_size;
    470 
    471 	/*
    472 	 * Fixed and var
    473 	 */
    474 	strcpy(info->fix.id, "svgadrmfb");
    475 	info->fix.type = FB_TYPE_PACKED_PIXELS;
    476 	info->fix.visual = FB_VISUAL_TRUECOLOR;
    477 	info->fix.type_aux = 0;
    478 	info->fix.xpanstep = 1; /* doing it in hw */
    479 	info->fix.ypanstep = 1; /* doing it in hw */
    480 	info->fix.ywrapstep = 0;
    481 	info->fix.accel = FB_ACCEL_NONE;
    482 	info->fix.line_length = fb_pitch;
    483 
    484 	info->fix.smem_start = 0;
    485 	info->fix.smem_len = fb_size;
    486 
    487 	info->pseudo_palette = par->pseudo_palette;
    488 	info->screen_base = par->vmalloc;
    489 	info->screen_size = fb_size;
    490 
    491 	info->flags = FBINFO_DEFAULT;
    492 	info->fbops = &vmw_fb_ops;
    493 
    494 	/* 24 depth per default */
    495 	info->var.red.offset = 16;
    496 	info->var.green.offset = 8;
    497 	info->var.blue.offset = 0;
    498 	info->var.red.length = 8;
    499 	info->var.green.length = 8;
    500 	info->var.blue.length = 8;
    501 	info->var.transp.offset = 0;
    502 	info->var.transp.length = 0;
    503 
    504 	info->var.xres_virtual = fb_width;
    505 	info->var.yres_virtual = fb_height;
    506 	info->var.bits_per_pixel = par->bpp;
    507 	info->var.xoffset = 0;
    508 	info->var.yoffset = 0;
    509 	info->var.activate = FB_ACTIVATE_NOW;
    510 	info->var.height = -1;
    511 	info->var.width = -1;
    512 
    513 	info->var.xres = initial_width;
    514 	info->var.yres = initial_height;
    515 
    516 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
    517 
    518 	info->apertures = alloc_apertures(1);
    519 	if (!info->apertures) {
    520 		ret = -ENOMEM;
    521 		goto err_aper;
    522 	}
    523 	info->apertures->ranges[0].base = vmw_priv->vram_start;
    524 	info->apertures->ranges[0].size = vmw_priv->vram_size;
    525 
    526 	/*
    527 	 * Dirty & Deferred IO
    528 	 */
    529 	par->dirty.x1 = par->dirty.x2 = 0;
    530 	par->dirty.y1 = par->dirty.y2 = 0;
    531 	par->dirty.active = true;
    532 	spin_lock_init(&par->dirty.lock);
    533 	info->fbdefio = &vmw_defio;
    534 	fb_deferred_io_init(info);
    535 
    536 	ret = register_framebuffer(info);
    537 	if (unlikely(ret != 0))
    538 		goto err_defio;
    539 
    540 	return 0;
    541 
    542 err_defio:
    543 	fb_deferred_io_cleanup(info);
    544 err_aper:
    545 	ttm_bo_kunmap(&par->map);
    546 err_unref:
    547 	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
    548 err_free:
    549 	vfree(par->vmalloc);
    550 	framebuffer_release(info);
    551 	vmw_priv->fb_info = NULL;
    552 
    553 	return ret;
    554 }
    555 
    556 int vmw_fb_close(struct vmw_private *vmw_priv)
    557 {
    558 	struct fb_info *info;
    559 	struct vmw_fb_par *par;
    560 	struct ttm_buffer_object *bo;
    561 
    562 	if (!vmw_priv->fb_info)
    563 		return 0;
    564 
    565 	info = vmw_priv->fb_info;
    566 	par = info->par;
    567 	bo = &par->vmw_bo->base;
    568 	par->vmw_bo = NULL;
    569 
    570 	/* ??? order */
    571 	fb_deferred_io_cleanup(info);
    572 	unregister_framebuffer(info);
    573 
    574 	ttm_bo_kunmap(&par->map);
    575 	ttm_bo_unref(&bo);
    576 
    577 	vfree(par->vmalloc);
    578 	framebuffer_release(info);
    579 
    580 	return 0;
    581 }
    582 
    583 int vmw_fb_off(struct vmw_private *vmw_priv)
    584 {
    585 	struct fb_info *info;
    586 	struct vmw_fb_par *par;
    587 	unsigned long flags;
    588 
    589 	if (!vmw_priv->fb_info)
    590 		return -EINVAL;
    591 
    592 	info = vmw_priv->fb_info;
    593 	par = info->par;
    594 
    595 	spin_lock_irqsave(&par->dirty.lock, flags);
    596 	par->dirty.active = false;
    597 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    598 
    599 	flush_delayed_work(&info->deferred_work);
    600 
    601 	par->bo_ptr = NULL;
    602 	ttm_bo_kunmap(&par->map);
    603 
    604 	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
    605 
    606 	return 0;
    607 }
    608 
    609 int vmw_fb_on(struct vmw_private *vmw_priv)
    610 {
    611 	struct fb_info *info;
    612 	struct vmw_fb_par *par;
    613 	unsigned long flags;
    614 	bool dummy;
    615 	int ret;
    616 
    617 	if (!vmw_priv->fb_info)
    618 		return -EINVAL;
    619 
    620 	info = vmw_priv->fb_info;
    621 	par = info->par;
    622 
    623 	/* we are already active */
    624 	if (par->bo_ptr != NULL)
    625 		return 0;
    626 
    627 	/* Make sure that all overlays are stoped when we take over */
    628 	vmw_overlay_stop_all(vmw_priv);
    629 
    630 	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
    631 	if (unlikely(ret != 0)) {
    632 		DRM_ERROR("could not move buffer to start of VRAM\n");
    633 		goto err_no_buffer;
    634 	}
    635 
    636 	ret = ttm_bo_kmap(&par->vmw_bo->base,
    637 			  0,
    638 			  par->vmw_bo->base.num_pages,
    639 			  &par->map);
    640 	BUG_ON(ret != 0);
    641 	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
    642 
    643 	spin_lock_irqsave(&par->dirty.lock, flags);
    644 	par->dirty.active = true;
    645 	spin_unlock_irqrestore(&par->dirty.lock, flags);
    646 
    647 err_no_buffer:
    648 	vmw_fb_set_par(info);
    649 
    650 	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
    651 
    652 	/* If there already was stuff dirty we wont
    653 	 * schedule a new work, so lets do it now */
    654 	schedule_delayed_work(&info->deferred_work, 0);
    655 
    656 	return 0;
    657 }
    658