Home | History | Annotate | Line # | Download | only in display
intel_fbdev.c revision 1.2
      1 /*	$NetBSD: intel_fbdev.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2007 David Airlie
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     23  * DEALINGS IN THE SOFTWARE.
     24  *
     25  * Authors:
     26  *     David Airlie
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: intel_fbdev.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
     31 
     32 #include <linux/async.h>
     33 #include <linux/console.h>
     34 #include <linux/delay.h>
     35 #include <linux/errno.h>
     36 #include <linux/init.h>
     37 #include <linux/kernel.h>
     38 #include <linux/mm.h>
     39 #include <linux/module.h>
     40 #include <linux/string.h>
     41 #include <linux/sysrq.h>
     42 #include <linux/tty.h>
     43 #include <linux/vga_switcheroo.h>
     44 
     45 #include <drm/drm_crtc.h>
     46 #include <drm/drm_fb_helper.h>
     47 #include <drm/drm_fourcc.h>
     48 #include <drm/i915_drm.h>
     49 
     50 #include "i915_drv.h"
     51 #include "intel_display_types.h"
     52 #include "intel_fbdev.h"
     53 #include "intel_frontbuffer.h"
     54 
     55 static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
     56 {
     57 	return ifbdev->fb->frontbuffer;
     58 }
     59 
     60 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
     61 {
     62 	intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
     63 }
     64 
     65 #ifdef __NetBSD__
     66 #include "intelfb.h"
     67 #include <linux/nbsd-namespace.h>
     68 #endif
     69 
     70 #ifndef __NetBSD__
     71 static int intel_fbdev_set_par(struct fb_info *info)
     72 {
     73 	struct drm_fb_helper *fb_helper = info->par;
     74 	struct intel_fbdev *ifbdev =
     75 		container_of(fb_helper, struct intel_fbdev, helper);
     76 	int ret;
     77 
     78 	ret = drm_fb_helper_set_par(info);
     79 	if (ret == 0)
     80 		intel_fbdev_invalidate(ifbdev);
     81 
     82 	return ret;
     83 }
     84 
     85 static int intel_fbdev_blank(int blank, struct fb_info *info)
     86 {
     87 	struct drm_fb_helper *fb_helper = info->par;
     88 	struct intel_fbdev *ifbdev =
     89 		container_of(fb_helper, struct intel_fbdev, helper);
     90 	int ret;
     91 
     92 	ret = drm_fb_helper_blank(blank, info);
     93 	if (ret == 0)
     94 		intel_fbdev_invalidate(ifbdev);
     95 
     96 	return ret;
     97 }
     98 
     99 static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
    100 				   struct fb_info *info)
    101 {
    102 	struct drm_fb_helper *fb_helper = info->par;
    103 	struct intel_fbdev *ifbdev =
    104 		container_of(fb_helper, struct intel_fbdev, helper);
    105 	int ret;
    106 
    107 	ret = drm_fb_helper_pan_display(var, info);
    108 	if (ret == 0)
    109 		intel_fbdev_invalidate(ifbdev);
    110 
    111 	return ret;
    112 }
    113 
    114 static const struct fb_ops intelfb_ops = {
    115 	.owner = THIS_MODULE,
    116 	DRM_FB_HELPER_DEFAULT_OPS,
    117 	.fb_set_par = intel_fbdev_set_par,
    118 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
    119 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
    120 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
    121 	.fb_pan_display = intel_fbdev_pan_display,
    122 	.fb_blank = intel_fbdev_blank,
    123 };
    124 #endif
    125 
    126 static int intelfb_alloc(struct drm_fb_helper *helper,
    127 			 struct drm_fb_helper_surface_size *sizes)
    128 {
    129 	struct intel_fbdev *ifbdev =
    130 		container_of(helper, struct intel_fbdev, helper);
    131 	struct drm_framebuffer *fb;
    132 	struct drm_device *dev = helper->dev;
    133 	struct drm_i915_private *dev_priv = to_i915(dev);
    134 	struct drm_mode_fb_cmd2 mode_cmd = {};
    135 	struct drm_i915_gem_object *obj;
    136 	int size;
    137 
    138 	/* we don't do packed 24bpp */
    139 	if (sizes->surface_bpp == 24)
    140 		sizes->surface_bpp = 32;
    141 
    142 	mode_cmd.width = sizes->surface_width;
    143 	mode_cmd.height = sizes->surface_height;
    144 
    145 	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
    146 				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
    147 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
    148 							  sizes->surface_depth);
    149 
    150 	size = mode_cmd.pitches[0] * mode_cmd.height;
    151 	size = PAGE_ALIGN(size);
    152 
    153 	/* If the FB is too big, just don't use it since fbdev is not very
    154 	 * important and we should probably use that space with FBC or other
    155 	 * features. */
    156 	obj = ERR_PTR(-ENODEV);
    157 	if (size * 2 < dev_priv->stolen_usable_size)
    158 		obj = i915_gem_object_create_stolen(dev_priv, size);
    159 	if (IS_ERR(obj))
    160 		obj = i915_gem_object_create_shmem(dev_priv, size);
    161 	if (IS_ERR(obj)) {
    162 		DRM_ERROR("failed to allocate framebuffer\n");
    163 		return PTR_ERR(obj);
    164 	}
    165 
    166 	fb = intel_framebuffer_create(obj, &mode_cmd);
    167 	i915_gem_object_put(obj);
    168 	if (IS_ERR(fb))
    169 		return PTR_ERR(fb);
    170 
    171 	ifbdev->fb = to_intel_framebuffer(fb);
    172 	return 0;
    173 }
    174 
    175 static int intelfb_create(struct drm_fb_helper *helper,
    176 			  struct drm_fb_helper_surface_size *sizes)
    177 {
    178 	struct intel_fbdev *ifbdev =
    179 		container_of(helper, struct intel_fbdev, helper);
    180 	struct intel_framebuffer *intel_fb = ifbdev->fb;
    181 	struct drm_device *dev = helper->dev;
    182 	struct drm_i915_private *dev_priv = to_i915(dev);
    183 	struct pci_dev *pdev = dev_priv->drm.pdev;
    184 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
    185 	const struct i915_ggtt_view view = {
    186 		.type = I915_GGTT_VIEW_NORMAL,
    187 	};
    188 	intel_wakeref_t wakeref;
    189 #ifndef __NetBSD__
    190 	struct fb_info *info;
    191 #endif
    192 	struct i915_vma *vma;
    193 	unsigned long flags = 0;
    194 	bool prealloc = false;
    195 	void __iomem *vaddr;
    196 	int ret;
    197 
    198 	if (intel_fb &&
    199 	    (sizes->fb_width > intel_fb->base.width ||
    200 	     sizes->fb_height > intel_fb->base.height)) {
    201 		DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
    202 			      " releasing it\n",
    203 			      intel_fb->base.width, intel_fb->base.height,
    204 			      sizes->fb_width, sizes->fb_height);
    205 		drm_framebuffer_put(&intel_fb->base);
    206 		intel_fb = ifbdev->fb = NULL;
    207 	}
    208 	if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
    209 		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
    210 		ret = intelfb_alloc(helper, sizes);
    211 		if (ret)
    212 			return ret;
    213 		intel_fb = ifbdev->fb;
    214 	} else {
    215 		DRM_DEBUG_KMS("re-using BIOS fb\n");
    216 		prealloc = true;
    217 		sizes->fb_width = intel_fb->base.width;
    218 		sizes->fb_height = intel_fb->base.height;
    219 	}
    220 
    221 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
    222 
    223 	/* Pin the GGTT vma for our access via info->screen_base.
    224 	 * This also validates that any existing fb inherited from the
    225 	 * BIOS is suitable for own access.
    226 	 */
    227 	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base,
    228 					 &view, false, &flags);
    229 	if (IS_ERR(vma)) {
    230 		ret = PTR_ERR(vma);
    231 		goto out_unlock;
    232 	}
    233 
    234 	intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
    235 
    236 #ifdef __NetBSD__
    237     {
    238 	static const struct intelfb_attach_args zero_ifa;
    239 	struct intelfb_attach_args ifa = zero_ifa;
    240 
    241 	ifa.ifa_drm_dev = dev;
    242 	ifa.ifa_fb_helper = helper;
    243 	ifa.ifa_fb_sizes = *sizes;
    244 	ifa.ifa_fb_bst = dev->pdev->pd_pa.pa_memt;
    245 	ifa.ifa_fb_addr = (dev_priv->gtt.mappable_base +
    246 	    i915_gem_obj_ggtt_offset(obj));
    247 	ifa.ifa_fb_size = size;
    248 	ifa.ifa_fb_zero = (ifbdev->fb->obj->stolen && !prealloc);
    249 
    250 	/*
    251 	 * XXX Should do this asynchronously, since we hold
    252 	 * dev->struct_mutex.
    253 	 */
    254 	helper->fbdev = config_found(dev->dev, &ifa, NULL,
    255 	    CFARGS(.iattr = "intelfbbus"));
    256 	if (helper->fbdev == NULL) {
    257 		DRM_ERROR("unable to attach intelfb\n");
    258 		ret = -ENXIO;
    259 		goto out_unpin;
    260 	}
    261 	fb = &ifbdev->fb->base;
    262 	ifbdev->helper.fb = fb;
    263     }
    264 #else
    265 	info = drm_fb_helper_alloc_fbi(helper);
    266 	if (IS_ERR(info)) {
    267 		DRM_ERROR("Failed to allocate fb_info\n");
    268 		ret = PTR_ERR(info);
    269 		goto out_unpin;
    270 	}
    271 
    272 	ifbdev->helper.fb = &ifbdev->fb->base;
    273 
    274 	info->fbops = &intelfb_ops;
    275 
    276 	/* setup aperture base/size for vesafb takeover */
    277 	info->apertures->ranges[0].base = ggtt->gmadr.start;
    278 	info->apertures->ranges[0].size = ggtt->mappable_end;
    279 
    280 	/* Our framebuffer is the entirety of fbdev's system memory */
    281 	info->fix.smem_start =
    282 		(unsigned long)(ggtt->gmadr.start + vma->node.start);
    283 	info->fix.smem_len = vma->node.size;
    284 
    285 	vaddr = i915_vma_pin_iomap(vma);
    286 	if (IS_ERR(vaddr)) {
    287 		DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
    288 		ret = PTR_ERR(vaddr);
    289 		goto out_unpin;
    290 	}
    291 	info->screen_base = vaddr;
    292 	info->screen_size = vma->node.size;
    293 
    294 	drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
    295 
    296 	/* If the object is shmemfs backed, it will have given us zeroed pages.
    297 	 * If the object is stolen however, it will be full of whatever
    298 	 * garbage was left in there.
    299 	 */
    300 	if (vma->obj->stolen && !prealloc)
    301 		memset_io(info->screen_base, 0, info->screen_size);
    302 #endif
    303 
    304 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
    305 
    306 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08"PRIx64"\n",
    307 		      ifbdev->fb->base.width, ifbdev->fb->base.height,
    308 		      i915_ggtt_offset(vma));
    309 	ifbdev->vma = vma;
    310 	ifbdev->vma_flags = flags;
    311 
    312 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
    313 #ifndef __NetBSD__
    314 	vga_switcheroo_client_fb_set(pdev, info);
    315 #endif
    316 	return 0;
    317 
    318 out_unpin:
    319 	intel_unpin_fb_vma(vma, flags);
    320 out_unlock:
    321 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
    322 	return ret;
    323 }
    324 
    325 static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
    326 	.fb_probe = intelfb_create,
    327 };
    328 
    329 static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
    330 {
    331 	/* We rely on the object-free to release the VMA pinning for
    332 	 * the info->screen_base mmaping. Leaking the VMA is simpler than
    333 	 * trying to rectify all the possible error paths leading here.
    334 	 */
    335 
    336 #ifdef __NetBSD__
    337     {
    338 	int ret;
    339 	/* XXX errno NetBSD->Linux */
    340 	ret = -config_detach(ifbdev->helper.fbdev, DETACH_FORCE);
    341 	if (ret)
    342 		DRM_ERROR("failed to detach intelfb: %d\n", ret);
    343 	ifbdev->helper.fbdev = NULL;
    344     }
    345 #else
    346 	drm_fb_helper_fini(&ifbdev->helper);
    347 #endif
    348 
    349 	if (ifbdev->vma)
    350 		intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
    351 
    352 	if (ifbdev->fb)
    353 		drm_framebuffer_remove(&ifbdev->fb->base);
    354 
    355 	kfree(ifbdev);
    356 }
    357 
    358 /*
    359  * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
    360  * The core display code will have read out the current plane configuration,
    361  * so we use that to figure out if there's an object for us to use as the
    362  * fb, and if so, we re-use it for the fbdev configuration.
    363  *
    364  * Note we only support a single fb shared across pipes for boot (mostly for
    365  * fbcon), so we just find the biggest and use that.
    366  */
    367 static bool intel_fbdev_init_bios(struct drm_device *dev,
    368 				 struct intel_fbdev *ifbdev)
    369 {
    370 	struct intel_framebuffer *fb = NULL;
    371 	struct drm_crtc *crtc;
    372 	struct intel_crtc *intel_crtc;
    373 	unsigned int max_size = 0;
    374 
    375 	/* Find the largest fb */
    376 	for_each_crtc(dev, crtc) {
    377 		struct drm_i915_gem_object *obj =
    378 			intel_fb_obj(crtc->primary->state->fb);
    379 		intel_crtc = to_intel_crtc(crtc);
    380 
    381 		if (!crtc->state->active || !obj) {
    382 			DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
    383 				      pipe_name(intel_crtc->pipe));
    384 			continue;
    385 		}
    386 
    387 		if (obj->base.size > max_size) {
    388 			DRM_DEBUG_KMS("found possible fb from plane %c\n",
    389 				      pipe_name(intel_crtc->pipe));
    390 			fb = to_intel_framebuffer(crtc->primary->state->fb);
    391 			max_size = obj->base.size;
    392 		}
    393 	}
    394 
    395 	if (!fb) {
    396 		DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
    397 		goto out;
    398 	}
    399 
    400 	/* Now make sure all the pipes will fit into it */
    401 	for_each_crtc(dev, crtc) {
    402 		unsigned int cur_size;
    403 
    404 		intel_crtc = to_intel_crtc(crtc);
    405 
    406 		if (!crtc->state->active) {
    407 			DRM_DEBUG_KMS("pipe %c not active, skipping\n",
    408 				      pipe_name(intel_crtc->pipe));
    409 			continue;
    410 		}
    411 
    412 		DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
    413 			      pipe_name(intel_crtc->pipe));
    414 
    415 		/*
    416 		 * See if the plane fb we found above will fit on this
    417 		 * pipe.  Note we need to use the selected fb's pitch and bpp
    418 		 * rather than the current pipe's, since they differ.
    419 		 */
    420 		cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
    421 		cur_size = cur_size * fb->base.format->cpp[0];
    422 		if (fb->base.pitches[0] < cur_size) {
    423 			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
    424 				      pipe_name(intel_crtc->pipe),
    425 				      cur_size, fb->base.pitches[0]);
    426 			fb = NULL;
    427 			break;
    428 		}
    429 
    430 		cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
    431 		cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
    432 		cur_size *= fb->base.pitches[0];
    433 		DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
    434 			      pipe_name(intel_crtc->pipe),
    435 			      crtc->state->adjusted_mode.crtc_hdisplay,
    436 			      crtc->state->adjusted_mode.crtc_vdisplay,
    437 			      fb->base.format->cpp[0] * 8,
    438 			      cur_size);
    439 
    440 		if (cur_size > max_size) {
    441 			DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
    442 				      pipe_name(intel_crtc->pipe),
    443 				      cur_size, max_size);
    444 			fb = NULL;
    445 			break;
    446 		}
    447 
    448 		DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
    449 			      pipe_name(intel_crtc->pipe),
    450 			      max_size, cur_size);
    451 	}
    452 
    453 	if (!fb) {
    454 		DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
    455 		goto out;
    456 	}
    457 
    458 	ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
    459 	ifbdev->fb = fb;
    460 
    461 	drm_framebuffer_get(&ifbdev->fb->base);
    462 
    463 	/* Final pass to check if any active pipes don't have fbs */
    464 	for_each_crtc(dev, crtc) {
    465 		intel_crtc = to_intel_crtc(crtc);
    466 
    467 		if (!crtc->state->active)
    468 			continue;
    469 
    470 		WARN(!crtc->primary->state->fb,
    471 		     "re-used BIOS config but lost an fb on crtc %d\n",
    472 		     crtc->base.id);
    473 	}
    474 
    475 
    476 	DRM_DEBUG_KMS("using BIOS fb for initial console\n");
    477 	return true;
    478 
    479 out:
    480 
    481 	return false;
    482 }
    483 
    484 static void intel_fbdev_suspend_worker(struct work_struct *work)
    485 {
    486 #ifndef __NetBSD__		/* XXX fb suspend */
    487 	intel_fbdev_set_suspend(&container_of(work,
    488 					      struct drm_i915_private,
    489 					      fbdev_suspend_work)->drm,
    490 				FBINFO_STATE_RUNNING,
    491 				true);
    492 #endif
    493 }
    494 
    495 int intel_fbdev_init(struct drm_device *dev)
    496 {
    497 	struct drm_i915_private *dev_priv = to_i915(dev);
    498 	struct intel_fbdev *ifbdev;
    499 	int ret;
    500 
    501 	if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
    502 		return -ENODEV;
    503 
    504 	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
    505 	if (ifbdev == NULL)
    506 		return -ENOMEM;
    507 
    508 	mutex_init(&ifbdev->hpd_lock);
    509 	drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
    510 
    511 	if (!intel_fbdev_init_bios(dev, ifbdev))
    512 		ifbdev->preferred_bpp = 32;
    513 
    514 	ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
    515 	if (ret) {
    516 		kfree(ifbdev);
    517 		return ret;
    518 	}
    519 
    520 	dev_priv->fbdev = ifbdev;
    521 	INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
    522 
    523 	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
    524 
    525 	return 0;
    526 }
    527 
    528 static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
    529 {
    530 	struct intel_fbdev *ifbdev = data;
    531 
    532 	/* Due to peculiar init order wrt to hpd handling this is separate. */
    533 	if (drm_fb_helper_initial_config(&ifbdev->helper,
    534 					 ifbdev->preferred_bpp))
    535 		intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
    536 }
    537 
    538 void intel_fbdev_initial_config_async(struct drm_device *dev)
    539 {
    540 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
    541 
    542 	if (!ifbdev)
    543 		return;
    544 
    545 	ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
    546 }
    547 
    548 static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
    549 {
    550 	if (!ifbdev->cookie)
    551 		return;
    552 
    553 	/* Only serialises with all preceding async calls, hence +1 */
    554 	async_synchronize_cookie(ifbdev->cookie + 1);
    555 	ifbdev->cookie = 0;
    556 }
    557 
    558 void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
    559 {
    560 	struct intel_fbdev *ifbdev = dev_priv->fbdev;
    561 
    562 	if (!ifbdev)
    563 		return;
    564 
    565 	cancel_work_sync(&dev_priv->fbdev_suspend_work);
    566 	if (!current_is_async())
    567 		intel_fbdev_sync(ifbdev);
    568 
    569 	drm_fb_helper_unregister_fbi(&ifbdev->helper);
    570 }
    571 
    572 void intel_fbdev_fini(struct drm_i915_private *dev_priv)
    573 {
    574 	struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
    575 
    576 	if (!ifbdev)
    577 		return;
    578 
    579 	intel_fbdev_destroy(ifbdev);
    580 }
    581 
    582 /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
    583  * processing, fbdev will perform a full connector reprobe if a hotplug event
    584  * was received while HPD was suspended.
    585  */
    586 static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
    587 {
    588 	bool send_hpd = false;
    589 
    590 	mutex_lock(&ifbdev->hpd_lock);
    591 	ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
    592 	send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
    593 	ifbdev->hpd_waiting = false;
    594 	mutex_unlock(&ifbdev->hpd_lock);
    595 
    596 	if (send_hpd) {
    597 		DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
    598 		drm_fb_helper_hotplug_event(&ifbdev->helper);
    599 	}
    600 }
    601 
    602 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
    603 {
    604 #ifndef __NetBSD__		/* XXX fb suspend */
    605 	struct drm_i915_private *dev_priv = to_i915(dev);
    606 	struct intel_fbdev *ifbdev = dev_priv->fbdev;
    607 	struct fb_info *info;
    608 
    609 	if (!ifbdev || !ifbdev->vma)
    610 		return;
    611 
    612 	info = ifbdev->helper.fbdev;
    613 
    614 	if (synchronous) {
    615 		/* Flush any pending work to turn the console on, and then
    616 		 * wait to turn it off. It must be synchronous as we are
    617 		 * about to suspend or unload the driver.
    618 		 *
    619 		 * Note that from within the work-handler, we cannot flush
    620 		 * ourselves, so only flush outstanding work upon suspend!
    621 		 */
    622 		if (state != FBINFO_STATE_RUNNING)
    623 			flush_work(&dev_priv->fbdev_suspend_work);
    624 
    625 		console_lock();
    626 	} else {
    627 		/*
    628 		 * The console lock can be pretty contented on resume due
    629 		 * to all the printk activity.  Try to keep it out of the hot
    630 		 * path of resume if possible.
    631 		 */
    632 		WARN_ON(state != FBINFO_STATE_RUNNING);
    633 		if (!console_trylock()) {
    634 			/* Don't block our own workqueue as this can
    635 			 * be run in parallel with other i915.ko tasks.
    636 			 */
    637 			schedule_work(&dev_priv->fbdev_suspend_work);
    638 			return;
    639 		}
    640 	}
    641 
    642 	/* On resume from hibernation: If the object is shmemfs backed, it has
    643 	 * been restored from swap. If the object is stolen however, it will be
    644 	 * full of whatever garbage was left in there.
    645 	 */
    646 	if (state == FBINFO_STATE_RUNNING &&
    647 	    intel_fb_obj(&ifbdev->fb->base)->stolen)
    648 		memset_io(info->screen_base, 0, info->screen_size);
    649 
    650 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
    651 	console_unlock();
    652 
    653 	intel_fbdev_hpd_set_suspend(ifbdev, state);
    654 #endif
    655 }
    656 
    657 void intel_fbdev_output_poll_changed(struct drm_device *dev)
    658 {
    659 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
    660 	bool send_hpd;
    661 
    662 	if (!ifbdev)
    663 		return;
    664 
    665 	intel_fbdev_sync(ifbdev);
    666 
    667 	mutex_lock(&ifbdev->hpd_lock);
    668 	send_hpd = !ifbdev->hpd_suspended;
    669 	ifbdev->hpd_waiting = true;
    670 	mutex_unlock(&ifbdev->hpd_lock);
    671 
    672 	if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
    673 		drm_fb_helper_hotplug_event(&ifbdev->helper);
    674 }
    675 
    676 void intel_fbdev_restore_mode(struct drm_device *dev)
    677 {
    678 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
    679 
    680 	if (!ifbdev)
    681 		return;
    682 
    683 	intel_fbdev_sync(ifbdev);
    684 	if (!ifbdev->vma)
    685 		return;
    686 
    687 	if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
    688 		intel_fbdev_invalidate(ifbdev);
    689 }
    690