Home | History | Annotate | Line # | Download | only in gvt
      1 /*	$NetBSD: handlers.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Kevin Tian <kevin.tian (at) intel.com>
     27  *    Eddie Dong <eddie.dong (at) intel.com>
     28  *    Zhiyuan Lv <zhiyuan.lv (at) intel.com>
     29  *
     30  * Contributors:
     31  *    Min He <min.he (at) intel.com>
     32  *    Tina Zhang <tina.zhang (at) intel.com>
     33  *    Pei Zhang <pei.zhang (at) intel.com>
     34  *    Niu Bing <bing.niu (at) intel.com>
     35  *    Ping Gao <ping.a.gao (at) intel.com>
     36  *    Zhi Wang <zhi.a.wang (at) intel.com>
     37  *
     38 
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: handlers.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
     43 
     44 #include "i915_drv.h"
     45 #include "gvt.h"
     46 #include "i915_pvinfo.h"
     47 
     48 /* XXX FIXME i915 has changed PP_XXX definition */
     49 #define PCH_PP_STATUS  _MMIO(0xc7200)
     50 #define PCH_PP_CONTROL _MMIO(0xc7204)
     51 #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
     52 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
     53 #define PCH_PP_DIVISOR _MMIO(0xc7210)
     54 
     55 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
     56 {
     57 	if (IS_BROADWELL(gvt->dev_priv))
     58 		return D_BDW;
     59 	else if (IS_SKYLAKE(gvt->dev_priv))
     60 		return D_SKL;
     61 	else if (IS_KABYLAKE(gvt->dev_priv))
     62 		return D_KBL;
     63 	else if (IS_BROXTON(gvt->dev_priv))
     64 		return D_BXT;
     65 	else if (IS_COFFEELAKE(gvt->dev_priv))
     66 		return D_CFL;
     67 
     68 	return 0;
     69 }
     70 
     71 bool intel_gvt_match_device(struct intel_gvt *gvt,
     72 		unsigned long device)
     73 {
     74 	return intel_gvt_get_device_type(gvt) & device;
     75 }
     76 
     77 static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
     78 	void *p_data, unsigned int bytes)
     79 {
     80 	memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
     81 }
     82 
     83 static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
     84 	void *p_data, unsigned int bytes)
     85 {
     86 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
     87 }
     88 
     89 static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
     90 						  unsigned int offset)
     91 {
     92 	struct intel_gvt_mmio_info *e;
     93 
     94 	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
     95 		if (e->offset == offset)
     96 			return e;
     97 	}
     98 	return NULL;
     99 }
    100 
    101 static int new_mmio_info(struct intel_gvt *gvt,
    102 		u32 offset, u8 flags, u32 size,
    103 		u32 addr_mask, u32 ro_mask, u32 device,
    104 		gvt_mmio_func read, gvt_mmio_func write)
    105 {
    106 	struct intel_gvt_mmio_info *info, *p;
    107 	u32 start, end, i;
    108 
    109 	if (!intel_gvt_match_device(gvt, device))
    110 		return 0;
    111 
    112 	if (WARN_ON(!IS_ALIGNED(offset, 4)))
    113 		return -EINVAL;
    114 
    115 	start = offset;
    116 	end = offset + size;
    117 
    118 	for (i = start; i < end; i += 4) {
    119 		info = kzalloc(sizeof(*info), GFP_KERNEL);
    120 		if (!info)
    121 			return -ENOMEM;
    122 
    123 		info->offset = i;
    124 		p = find_mmio_info(gvt, info->offset);
    125 		if (p) {
    126 			WARN(1, "dup mmio definition offset %x\n",
    127 				info->offset);
    128 			kfree(info);
    129 
    130 			/* We return -EEXIST here to make GVT-g load fail.
    131 			 * So duplicated MMIO can be found as soon as
    132 			 * possible.
    133 			 */
    134 			return -EEXIST;
    135 		}
    136 
    137 		info->ro_mask = ro_mask;
    138 		info->device = device;
    139 		info->read = read ? read : intel_vgpu_default_mmio_read;
    140 		info->write = write ? write : intel_vgpu_default_mmio_write;
    141 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
    142 		INIT_HLIST_NODE(&info->node);
    143 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
    144 		gvt->mmio.num_tracked_mmio++;
    145 	}
    146 	return 0;
    147 }
    148 
    149 /**
    150  * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
    151  * @gvt: a GVT device
    152  * @offset: register offset
    153  *
    154  * Returns:
    155  * Ring ID on success, negative error code if failed.
    156  */
    157 int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
    158 		unsigned int offset)
    159 {
    160 	enum intel_engine_id id;
    161 	struct intel_engine_cs *engine;
    162 
    163 	offset &= ~GENMASK(11, 0);
    164 	for_each_engine(engine, gvt->dev_priv, id) {
    165 		if (engine->mmio_base == offset)
    166 			return id;
    167 	}
    168 	return -ENODEV;
    169 }
    170 
    171 #define offset_to_fence_num(offset) \
    172 	((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
    173 
    174 #define fence_num_to_offset(num) \
    175 	(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
    176 
    177 
    178 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
    179 {
    180 	switch (reason) {
    181 	case GVT_FAILSAFE_UNSUPPORTED_GUEST:
    182 		pr_err("Detected your guest driver doesn't support GVT-g.\n");
    183 		break;
    184 	case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
    185 		pr_err("Graphics resource is not enough for the guest\n");
    186 		break;
    187 	case GVT_FAILSAFE_GUEST_ERR:
    188 		pr_err("GVT Internal error  for the guest\n");
    189 		break;
    190 	default:
    191 		break;
    192 	}
    193 	pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
    194 	vgpu->failsafe = true;
    195 }
    196 
    197 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
    198 		unsigned int fence_num, void *p_data, unsigned int bytes)
    199 {
    200 	unsigned int max_fence = vgpu_fence_sz(vgpu);
    201 
    202 	if (fence_num >= max_fence) {
    203 		gvt_vgpu_err("access oob fence reg %d/%d\n",
    204 			     fence_num, max_fence);
    205 
    206 		/* When guest access oob fence regs without access
    207 		 * pv_info first, we treat guest not supporting GVT,
    208 		 * and we will let vgpu enter failsafe mode.
    209 		 */
    210 		if (!vgpu->pv_notified)
    211 			enter_failsafe_mode(vgpu,
    212 					GVT_FAILSAFE_UNSUPPORTED_GUEST);
    213 
    214 		memset(p_data, 0, bytes);
    215 		return -EINVAL;
    216 	}
    217 	return 0;
    218 }
    219 
    220 static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
    221 		unsigned int offset, void *p_data, unsigned int bytes)
    222 {
    223 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
    224 
    225 	if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
    226 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
    227 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
    228 		else if (!ips)
    229 			gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
    230 		else {
    231 			/* All engines must be enabled together for vGPU,
    232 			 * since we don't know which engine the ppgtt will
    233 			 * bind to when shadowing.
    234 			 */
    235 			gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
    236 				     ips);
    237 			return -EINVAL;
    238 		}
    239 	}
    240 
    241 	write_vreg(vgpu, offset, p_data, bytes);
    242 	return 0;
    243 }
    244 
    245 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
    246 		void *p_data, unsigned int bytes)
    247 {
    248 	int ret;
    249 
    250 	ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
    251 			p_data, bytes);
    252 	if (ret)
    253 		return ret;
    254 	read_vreg(vgpu, off, p_data, bytes);
    255 	return 0;
    256 }
    257 
    258 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
    259 		void *p_data, unsigned int bytes)
    260 {
    261 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    262 	unsigned int fence_num = offset_to_fence_num(off);
    263 	int ret;
    264 
    265 	ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
    266 	if (ret)
    267 		return ret;
    268 	write_vreg(vgpu, off, p_data, bytes);
    269 
    270 	mmio_hw_access_pre(dev_priv);
    271 	intel_vgpu_write_fence(vgpu, fence_num,
    272 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
    273 	mmio_hw_access_post(dev_priv);
    274 	return 0;
    275 }
    276 
    277 #define CALC_MODE_MASK_REG(old, new) \
    278 	(((new) & GENMASK(31, 16)) \
    279 	 | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
    280 	 | ((new) & ((new) >> 16))))
    281 
    282 static int mul_force_wake_write(struct intel_vgpu *vgpu,
    283 		unsigned int offset, void *p_data, unsigned int bytes)
    284 {
    285 	u32 old, new;
    286 	u32 ack_reg_offset;
    287 
    288 	old = vgpu_vreg(vgpu, offset);
    289 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
    290 
    291 	if (INTEL_GEN(vgpu->gvt->dev_priv)  >=  9) {
    292 		switch (offset) {
    293 		case FORCEWAKE_RENDER_GEN9_REG:
    294 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
    295 			break;
    296 		case FORCEWAKE_BLITTER_GEN9_REG:
    297 			ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
    298 			break;
    299 		case FORCEWAKE_MEDIA_GEN9_REG:
    300 			ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
    301 			break;
    302 		default:
    303 			/*should not hit here*/
    304 			gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
    305 			return -EINVAL;
    306 		}
    307 	} else {
    308 		ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
    309 	}
    310 
    311 	vgpu_vreg(vgpu, offset) = new;
    312 	vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
    313 	return 0;
    314 }
    315 
    316 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    317 			    void *p_data, unsigned int bytes)
    318 {
    319 	intel_engine_mask_t engine_mask = 0;
    320 	u32 data;
    321 
    322 	write_vreg(vgpu, offset, p_data, bytes);
    323 	data = vgpu_vreg(vgpu, offset);
    324 
    325 	if (data & GEN6_GRDOM_FULL) {
    326 		gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
    327 		engine_mask = ALL_ENGINES;
    328 	} else {
    329 		if (data & GEN6_GRDOM_RENDER) {
    330 			gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
    331 			engine_mask |= BIT(RCS0);
    332 		}
    333 		if (data & GEN6_GRDOM_MEDIA) {
    334 			gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
    335 			engine_mask |= BIT(VCS0);
    336 		}
    337 		if (data & GEN6_GRDOM_BLT) {
    338 			gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
    339 			engine_mask |= BIT(BCS0);
    340 		}
    341 		if (data & GEN6_GRDOM_VECS) {
    342 			gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
    343 			engine_mask |= BIT(VECS0);
    344 		}
    345 		if (data & GEN8_GRDOM_MEDIA2) {
    346 			gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
    347 			engine_mask |= BIT(VCS1);
    348 		}
    349 		if (data & GEN9_GRDOM_GUC) {
    350 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
    351 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
    352 		}
    353 		engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
    354 	}
    355 
    356 	/* vgpu_lock already hold by emulate mmio r/w */
    357 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
    358 
    359 	/* sw will wait for the device to ack the reset request */
    360 	vgpu_vreg(vgpu, offset) = 0;
    361 
    362 	return 0;
    363 }
    364 
    365 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
    366 		void *p_data, unsigned int bytes)
    367 {
    368 	return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
    369 }
    370 
    371 static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    372 		void *p_data, unsigned int bytes)
    373 {
    374 	return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
    375 }
    376 
    377 static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
    378 		unsigned int offset, void *p_data, unsigned int bytes)
    379 {
    380 	write_vreg(vgpu, offset, p_data, bytes);
    381 
    382 	if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
    383 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
    384 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
    385 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
    386 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
    387 
    388 	} else
    389 		vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
    390 			~(PP_ON | PP_SEQUENCE_POWER_DOWN
    391 					| PP_CYCLE_DELAY_ACTIVE);
    392 	return 0;
    393 }
    394 
    395 static int transconf_mmio_write(struct intel_vgpu *vgpu,
    396 		unsigned int offset, void *p_data, unsigned int bytes)
    397 {
    398 	write_vreg(vgpu, offset, p_data, bytes);
    399 
    400 	if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
    401 		vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
    402 	else
    403 		vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
    404 	return 0;
    405 }
    406 
    407 static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    408 		void *p_data, unsigned int bytes)
    409 {
    410 	write_vreg(vgpu, offset, p_data, bytes);
    411 
    412 	if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
    413 		vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
    414 	else
    415 		vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
    416 
    417 	if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
    418 		vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
    419 	else
    420 		vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
    421 
    422 	return 0;
    423 }
    424 
    425 static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
    426 		void *p_data, unsigned int bytes)
    427 {
    428 	switch (offset) {
    429 	case 0xe651c:
    430 	case 0xe661c:
    431 	case 0xe671c:
    432 	case 0xe681c:
    433 		vgpu_vreg(vgpu, offset) = 1 << 17;
    434 		break;
    435 	case 0xe6c04:
    436 		vgpu_vreg(vgpu, offset) = 0x3;
    437 		break;
    438 	case 0xe6e1c:
    439 		vgpu_vreg(vgpu, offset) = 0x2f << 16;
    440 		break;
    441 	default:
    442 		return -EINVAL;
    443 	}
    444 
    445 	read_vreg(vgpu, offset, p_data, bytes);
    446 	return 0;
    447 }
    448 
    449 static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    450 		void *p_data, unsigned int bytes)
    451 {
    452 	u32 data;
    453 
    454 	write_vreg(vgpu, offset, p_data, bytes);
    455 	data = vgpu_vreg(vgpu, offset);
    456 
    457 	if (data & PIPECONF_ENABLE)
    458 		vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
    459 	else
    460 		vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
    461 	/* vgpu_lock already hold by emulate mmio r/w */
    462 	mutex_unlock(&vgpu->vgpu_lock);
    463 	intel_gvt_check_vblank_emulation(vgpu->gvt);
    464 	mutex_lock(&vgpu->vgpu_lock);
    465 	return 0;
    466 }
    467 
    468 /* ascendingly sorted */
    469 static i915_reg_t force_nonpriv_white_list[] = {
    470 	GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
    471 	GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
    472 	PS_INVOCATION_COUNT,//_MMIO(0x2348)
    473 	GEN8_CS_CHICKEN1,//_MMIO(0x2580)
    474 	_MMIO(0x2690),
    475 	_MMIO(0x2694),
    476 	_MMIO(0x2698),
    477 	_MMIO(0x2754),
    478 	_MMIO(0x28a0),
    479 	_MMIO(0x4de0),
    480 	_MMIO(0x4de4),
    481 	_MMIO(0x4dfc),
    482 	GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
    483 	_MMIO(0x7014),
    484 	HDC_CHICKEN0,//_MMIO(0x7300)
    485 	GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
    486 	_MMIO(0x7700),
    487 	_MMIO(0x7704),
    488 	_MMIO(0x7708),
    489 	_MMIO(0x770c),
    490 	_MMIO(0x83a8),
    491 	_MMIO(0xb110),
    492 	GEN8_L3SQCREG4,//_MMIO(0xb118)
    493 	_MMIO(0xe100),
    494 	_MMIO(0xe18c),
    495 	_MMIO(0xe48c),
    496 	_MMIO(0xe5f4),
    497 };
    498 
    499 /* a simple bsearch */
    500 static inline bool in_whitelist(unsigned int reg)
    501 {
    502 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
    503 	i915_reg_t *array = force_nonpriv_white_list;
    504 
    505 	while (left < right) {
    506 		int mid = (left + right)/2;
    507 
    508 		if (reg > array[mid].reg)
    509 			left = mid + 1;
    510 		else if (reg < array[mid].reg)
    511 			right = mid;
    512 		else
    513 			return true;
    514 	}
    515 	return false;
    516 }
    517 
    518 static int force_nonpriv_write(struct intel_vgpu *vgpu,
    519 	unsigned int offset, void *p_data, unsigned int bytes)
    520 {
    521 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
    522 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
    523 	u32 ring_base;
    524 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    525 	int ret = -EINVAL;
    526 
    527 	if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
    528 		gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
    529 			vgpu->id, ring_id, offset, bytes);
    530 		return ret;
    531 	}
    532 
    533 	ring_base = dev_priv->engine[ring_id]->mmio_base;
    534 
    535 	if (in_whitelist(reg_nonpriv) ||
    536 		reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
    537 		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
    538 			bytes);
    539 	} else
    540 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
    541 			vgpu->id, *(u32 *)p_data, offset);
    542 
    543 	return 0;
    544 }
    545 
    546 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    547 		void *p_data, unsigned int bytes)
    548 {
    549 	write_vreg(vgpu, offset, p_data, bytes);
    550 
    551 	if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
    552 		vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
    553 	} else {
    554 		vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
    555 		if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
    556 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
    557 				&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
    558 	}
    559 	return 0;
    560 }
    561 
    562 static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
    563 		unsigned int offset, void *p_data, unsigned int bytes)
    564 {
    565 	vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
    566 	return 0;
    567 }
    568 
    569 #define FDI_LINK_TRAIN_PATTERN1         0
    570 #define FDI_LINK_TRAIN_PATTERN2         1
    571 
    572 static int fdi_auto_training_started(struct intel_vgpu *vgpu)
    573 {
    574 	u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
    575 	u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
    576 	u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
    577 
    578 	if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
    579 			(rx_ctl & FDI_RX_ENABLE) &&
    580 			(rx_ctl & FDI_AUTO_TRAINING) &&
    581 			(tx_ctl & DP_TP_CTL_ENABLE) &&
    582 			(tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
    583 		return 1;
    584 	else
    585 		return 0;
    586 }
    587 
    588 static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
    589 		enum pipe pipe, unsigned int train_pattern)
    590 {
    591 	i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
    592 	unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
    593 	unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
    594 	unsigned int fdi_iir_check_bits;
    595 
    596 	fdi_rx_imr = FDI_RX_IMR(pipe);
    597 	fdi_tx_ctl = FDI_TX_CTL(pipe);
    598 	fdi_rx_ctl = FDI_RX_CTL(pipe);
    599 
    600 	if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
    601 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
    602 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
    603 		fdi_iir_check_bits = FDI_RX_BIT_LOCK;
    604 	} else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
    605 		fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
    606 		fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
    607 		fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
    608 	} else {
    609 		gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
    610 		return -EINVAL;
    611 	}
    612 
    613 	fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
    614 	fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
    615 
    616 	/* If imr bit has been masked */
    617 	if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
    618 		return 0;
    619 
    620 	if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
    621 			== fdi_tx_check_bits)
    622 		&& ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
    623 			== fdi_rx_check_bits))
    624 		return 1;
    625 	else
    626 		return 0;
    627 }
    628 
    629 #define INVALID_INDEX (~0U)
    630 
    631 static unsigned int calc_index(unsigned int offset, unsigned int start,
    632 	unsigned int next, unsigned int end, i915_reg_t i915_end)
    633 {
    634 	unsigned int range = next - start;
    635 
    636 	if (!end)
    637 		end = i915_mmio_reg_offset(i915_end);
    638 	if (offset < start || offset > end)
    639 		return INVALID_INDEX;
    640 	offset -= start;
    641 	return offset / range;
    642 }
    643 
    644 #define FDI_RX_CTL_TO_PIPE(offset) \
    645 	calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
    646 
    647 #define FDI_TX_CTL_TO_PIPE(offset) \
    648 	calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
    649 
    650 #define FDI_RX_IMR_TO_PIPE(offset) \
    651 	calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
    652 
    653 static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
    654 		unsigned int offset, void *p_data, unsigned int bytes)
    655 {
    656 	i915_reg_t fdi_rx_iir;
    657 	unsigned int index;
    658 	int ret;
    659 
    660 	if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
    661 		index = FDI_RX_CTL_TO_PIPE(offset);
    662 	else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
    663 		index = FDI_TX_CTL_TO_PIPE(offset);
    664 	else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
    665 		index = FDI_RX_IMR_TO_PIPE(offset);
    666 	else {
    667 		gvt_vgpu_err("Unsupport registers %x\n", offset);
    668 		return -EINVAL;
    669 	}
    670 
    671 	write_vreg(vgpu, offset, p_data, bytes);
    672 
    673 	fdi_rx_iir = FDI_RX_IIR(index);
    674 
    675 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
    676 	if (ret < 0)
    677 		return ret;
    678 	if (ret)
    679 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
    680 
    681 	ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
    682 	if (ret < 0)
    683 		return ret;
    684 	if (ret)
    685 		vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
    686 
    687 	if (offset == _FDI_RXA_CTL)
    688 		if (fdi_auto_training_started(vgpu))
    689 			vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
    690 				DP_TP_STATUS_AUTOTRAIN_DONE;
    691 	return 0;
    692 }
    693 
    694 #define DP_TP_CTL_TO_PORT(offset) \
    695 	calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
    696 
    697 static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    698 		void *p_data, unsigned int bytes)
    699 {
    700 	i915_reg_t status_reg;
    701 	unsigned int index;
    702 	u32 data;
    703 
    704 	write_vreg(vgpu, offset, p_data, bytes);
    705 
    706 	index = DP_TP_CTL_TO_PORT(offset);
    707 	data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
    708 	if (data == 0x2) {
    709 		status_reg = DP_TP_STATUS(index);
    710 		vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
    711 	}
    712 	return 0;
    713 }
    714 
    715 static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
    716 		unsigned int offset, void *p_data, unsigned int bytes)
    717 {
    718 	u32 reg_val;
    719 	u32 sticky_mask;
    720 
    721 	reg_val = *((u32 *)p_data);
    722 	sticky_mask = GENMASK(27, 26) | (1 << 24);
    723 
    724 	vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
    725 		(vgpu_vreg(vgpu, offset) & sticky_mask);
    726 	vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
    727 	return 0;
    728 }
    729 
    730 static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
    731 		unsigned int offset, void *p_data, unsigned int bytes)
    732 {
    733 	u32 data;
    734 
    735 	write_vreg(vgpu, offset, p_data, bytes);
    736 	data = vgpu_vreg(vgpu, offset);
    737 
    738 	if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
    739 		vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
    740 	return 0;
    741 }
    742 
    743 static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
    744 		unsigned int offset, void *p_data, unsigned int bytes)
    745 {
    746 	u32 data;
    747 
    748 	write_vreg(vgpu, offset, p_data, bytes);
    749 	data = vgpu_vreg(vgpu, offset);
    750 
    751 	if (data & FDI_MPHY_IOSFSB_RESET_CTL)
    752 		vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
    753 	else
    754 		vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
    755 	return 0;
    756 }
    757 
    758 #define DSPSURF_TO_PIPE(offset) \
    759 	calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
    760 
    761 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    762 		void *p_data, unsigned int bytes)
    763 {
    764 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    765 	u32 pipe = DSPSURF_TO_PIPE(offset);
    766 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
    767 
    768 	write_vreg(vgpu, offset, p_data, bytes);
    769 	vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
    770 
    771 	vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
    772 
    773 	if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
    774 		intel_vgpu_trigger_virtual_event(vgpu, event);
    775 	else
    776 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
    777 
    778 	return 0;
    779 }
    780 
    781 #define SPRSURF_TO_PIPE(offset) \
    782 	calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
    783 
    784 static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
    785 		void *p_data, unsigned int bytes)
    786 {
    787 	u32 pipe = SPRSURF_TO_PIPE(offset);
    788 	int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
    789 
    790 	write_vreg(vgpu, offset, p_data, bytes);
    791 	vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
    792 
    793 	if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
    794 		intel_vgpu_trigger_virtual_event(vgpu, event);
    795 	else
    796 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
    797 
    798 	return 0;
    799 }
    800 
    801 static int reg50080_mmio_write(struct intel_vgpu *vgpu,
    802 			       unsigned int offset, void *p_data,
    803 			       unsigned int bytes)
    804 {
    805 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    806 	enum pipe pipe = REG_50080_TO_PIPE(offset);
    807 	enum plane_id plane = REG_50080_TO_PLANE(offset);
    808 	int event = SKL_FLIP_EVENT(pipe, plane);
    809 
    810 	write_vreg(vgpu, offset, p_data, bytes);
    811 	if (plane == PLANE_PRIMARY) {
    812 		vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
    813 		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
    814 	} else {
    815 		vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
    816 	}
    817 
    818 	if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
    819 		intel_vgpu_trigger_virtual_event(vgpu, event);
    820 	else
    821 		set_bit(event, vgpu->irq.flip_done_event[pipe]);
    822 
    823 	return 0;
    824 }
    825 
    826 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
    827 		unsigned int reg)
    828 {
    829 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    830 	enum intel_gvt_event_type event;
    831 
    832 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
    833 		event = AUX_CHANNEL_A;
    834 	else if (reg == _PCH_DPB_AUX_CH_CTL ||
    835 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
    836 		event = AUX_CHANNEL_B;
    837 	else if (reg == _PCH_DPC_AUX_CH_CTL ||
    838 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
    839 		event = AUX_CHANNEL_C;
    840 	else if (reg == _PCH_DPD_AUX_CH_CTL ||
    841 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
    842 		event = AUX_CHANNEL_D;
    843 	else {
    844 		WARN_ON(true);
    845 		return -EINVAL;
    846 	}
    847 
    848 	intel_vgpu_trigger_virtual_event(vgpu, event);
    849 	return 0;
    850 }
    851 
    852 static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
    853 		unsigned int reg, int len, bool data_valid)
    854 {
    855 	/* mark transaction done */
    856 	value |= DP_AUX_CH_CTL_DONE;
    857 	value &= ~DP_AUX_CH_CTL_SEND_BUSY;
    858 	value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
    859 
    860 	if (data_valid)
    861 		value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
    862 	else
    863 		value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
    864 
    865 	/* message size */
    866 	value &= ~(0xf << 20);
    867 	value |= (len << 20);
    868 	vgpu_vreg(vgpu, reg) = value;
    869 
    870 	if (value & DP_AUX_CH_CTL_INTERRUPT)
    871 		return trigger_aux_channel_interrupt(vgpu, reg);
    872 	return 0;
    873 }
    874 
    875 static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
    876 		u8 t)
    877 {
    878 	if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
    879 		/* training pattern 1 for CR */
    880 		/* set LANE0_CR_DONE, LANE1_CR_DONE */
    881 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
    882 		/* set LANE2_CR_DONE, LANE3_CR_DONE */
    883 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
    884 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
    885 			DPCD_TRAINING_PATTERN_2) {
    886 		/* training pattern 2 for EQ */
    887 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane0_1 */
    888 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
    889 		dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
    890 		/* Set CHANNEL_EQ_DONE and  SYMBOL_LOCKED for Lane2_3 */
    891 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
    892 		dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
    893 		/* set INTERLANE_ALIGN_DONE */
    894 		dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
    895 			DPCD_INTERLANE_ALIGN_DONE;
    896 	} else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
    897 			DPCD_LINK_TRAINING_DISABLED) {
    898 		/* finish link training */
    899 		/* set sink status as synchronized */
    900 		dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
    901 	}
    902 }
    903 
    904 #define _REG_HSW_DP_AUX_CH_CTL(dp) \
    905 	((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
    906 
    907 #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
    908 
    909 #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
    910 
    911 #define dpy_is_valid_port(port)	\
    912 		(((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
    913 
    914 static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
    915 		unsigned int offset, void *p_data, unsigned int bytes)
    916 {
    917 	struct intel_vgpu_display *display = &vgpu->display;
    918 	int msg, addr, ctrl, op, len;
    919 	int port_index = OFFSET_TO_DP_AUX_PORT(offset);
    920 	struct intel_vgpu_dpcd_data *dpcd = NULL;
    921 	struct intel_vgpu_port *port = NULL;
    922 	u32 data;
    923 
    924 	if (!dpy_is_valid_port(port_index)) {
    925 		gvt_vgpu_err("Unsupported DP port access!\n");
    926 		return 0;
    927 	}
    928 
    929 	write_vreg(vgpu, offset, p_data, bytes);
    930 	data = vgpu_vreg(vgpu, offset);
    931 
    932 	if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
    933 		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
    934 		/* SKL DPB/C/D aux ctl register changed */
    935 		return 0;
    936 	} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
    937 		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
    938 		/* write to the data registers */
    939 		return 0;
    940 	}
    941 
    942 	if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
    943 		/* just want to clear the sticky bits */
    944 		vgpu_vreg(vgpu, offset) = 0;
    945 		return 0;
    946 	}
    947 
    948 	port = &display->ports[port_index];
    949 	dpcd = port->dpcd;
    950 
    951 	/* read out message from DATA1 register */
    952 	msg = vgpu_vreg(vgpu, offset + 4);
    953 	addr = (msg >> 8) & 0xffff;
    954 	ctrl = (msg >> 24) & 0xff;
    955 	len = msg & 0xff;
    956 	op = ctrl >> 4;
    957 
    958 	if (op == GVT_AUX_NATIVE_WRITE) {
    959 		int t;
    960 		u8 buf[16];
    961 
    962 		if ((addr + len + 1) >= DPCD_SIZE) {
    963 			/*
    964 			 * Write request exceeds what we supported,
    965 			 * DCPD spec: When a Source Device is writing a DPCD
    966 			 * address not supported by the Sink Device, the Sink
    967 			 * Device shall reply with AUX NACK and M equal to
    968 			 * zero.
    969 			 */
    970 
    971 			/* NAK the write */
    972 			vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
    973 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
    974 			return 0;
    975 		}
    976 
    977 		/*
    978 		 * Write request format: Headr (command + address + size) occupies
    979 		 * 4 bytes, followed by (len + 1) bytes of data. See details at
    980 		 * intel_dp_aux_transfer().
    981 		 */
    982 		if ((len + 1 + 4) > AUX_BURST_SIZE) {
    983 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
    984 			return -EINVAL;
    985 		}
    986 
    987 		/* unpack data from vreg to buf */
    988 		for (t = 0; t < 4; t++) {
    989 			u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
    990 
    991 			buf[t * 4] = (r >> 24) & 0xff;
    992 			buf[t * 4 + 1] = (r >> 16) & 0xff;
    993 			buf[t * 4 + 2] = (r >> 8) & 0xff;
    994 			buf[t * 4 + 3] = r & 0xff;
    995 		}
    996 
    997 		/* write to virtual DPCD */
    998 		if (dpcd && dpcd->data_valid) {
    999 			for (t = 0; t <= len; t++) {
   1000 				int p = addr + t;
   1001 
   1002 				dpcd->data[p] = buf[t];
   1003 				/* check for link training */
   1004 				if (p == DPCD_TRAINING_PATTERN_SET)
   1005 					dp_aux_ch_ctl_link_training(dpcd,
   1006 							buf[t]);
   1007 			}
   1008 		}
   1009 
   1010 		/* ACK the write */
   1011 		vgpu_vreg(vgpu, offset + 4) = 0;
   1012 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
   1013 				dpcd && dpcd->data_valid);
   1014 		return 0;
   1015 	}
   1016 
   1017 	if (op == GVT_AUX_NATIVE_READ) {
   1018 		int idx, i, ret = 0;
   1019 
   1020 		if ((addr + len + 1) >= DPCD_SIZE) {
   1021 			/*
   1022 			 * read request exceeds what we supported
   1023 			 * DPCD spec: A Sink Device receiving a Native AUX CH
   1024 			 * read request for an unsupported DPCD address must
   1025 			 * reply with an AUX ACK and read data set equal to
   1026 			 * zero instead of replying with AUX NACK.
   1027 			 */
   1028 
   1029 			/* ACK the READ*/
   1030 			vgpu_vreg(vgpu, offset + 4) = 0;
   1031 			vgpu_vreg(vgpu, offset + 8) = 0;
   1032 			vgpu_vreg(vgpu, offset + 12) = 0;
   1033 			vgpu_vreg(vgpu, offset + 16) = 0;
   1034 			vgpu_vreg(vgpu, offset + 20) = 0;
   1035 
   1036 			dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
   1037 					true);
   1038 			return 0;
   1039 		}
   1040 
   1041 		for (idx = 1; idx <= 5; idx++) {
   1042 			/* clear the data registers */
   1043 			vgpu_vreg(vgpu, offset + 4 * idx) = 0;
   1044 		}
   1045 
   1046 		/*
   1047 		 * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
   1048 		 */
   1049 		if ((len + 2) > AUX_BURST_SIZE) {
   1050 			gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
   1051 			return -EINVAL;
   1052 		}
   1053 
   1054 		/* read from virtual DPCD to vreg */
   1055 		/* first 4 bytes: [ACK][addr][addr+1][addr+2] */
   1056 		if (dpcd && dpcd->data_valid) {
   1057 			for (i = 1; i <= (len + 1); i++) {
   1058 				int t;
   1059 
   1060 				t = dpcd->data[addr + i - 1];
   1061 				t <<= (24 - 8 * (i % 4));
   1062 				ret |= t;
   1063 
   1064 				if ((i % 4 == 3) || (i == (len + 1))) {
   1065 					vgpu_vreg(vgpu, offset +
   1066 							(i / 4 + 1) * 4) = ret;
   1067 					ret = 0;
   1068 				}
   1069 			}
   1070 		}
   1071 		dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
   1072 				dpcd && dpcd->data_valid);
   1073 		return 0;
   1074 	}
   1075 
   1076 	/* i2c transaction starts */
   1077 	intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
   1078 
   1079 	if (data & DP_AUX_CH_CTL_INTERRUPT)
   1080 		trigger_aux_channel_interrupt(vgpu, offset);
   1081 	return 0;
   1082 }
   1083 
   1084 static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
   1085 		void *p_data, unsigned int bytes)
   1086 {
   1087 	*(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
   1088 	write_vreg(vgpu, offset, p_data, bytes);
   1089 	return 0;
   1090 }
   1091 
   1092 static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1093 		void *p_data, unsigned int bytes)
   1094 {
   1095 	bool vga_disable;
   1096 
   1097 	write_vreg(vgpu, offset, p_data, bytes);
   1098 	vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
   1099 
   1100 	gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
   1101 			vga_disable ? "Disable" : "Enable");
   1102 	return 0;
   1103 }
   1104 
   1105 static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
   1106 		unsigned int sbi_offset)
   1107 {
   1108 	struct intel_vgpu_display *display = &vgpu->display;
   1109 	int num = display->sbi.number;
   1110 	int i;
   1111 
   1112 	for (i = 0; i < num; ++i)
   1113 		if (display->sbi.registers[i].offset == sbi_offset)
   1114 			break;
   1115 
   1116 	if (i == num)
   1117 		return 0;
   1118 
   1119 	return display->sbi.registers[i].value;
   1120 }
   1121 
   1122 static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
   1123 		unsigned int offset, u32 value)
   1124 {
   1125 	struct intel_vgpu_display *display = &vgpu->display;
   1126 	int num = display->sbi.number;
   1127 	int i;
   1128 
   1129 	for (i = 0; i < num; ++i) {
   1130 		if (display->sbi.registers[i].offset == offset)
   1131 			break;
   1132 	}
   1133 
   1134 	if (i == num) {
   1135 		if (num == SBI_REG_MAX) {
   1136 			gvt_vgpu_err("SBI caching meets maximum limits\n");
   1137 			return;
   1138 		}
   1139 		display->sbi.number++;
   1140 	}
   1141 
   1142 	display->sbi.registers[i].offset = offset;
   1143 	display->sbi.registers[i].value = value;
   1144 }
   1145 
   1146 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   1147 		void *p_data, unsigned int bytes)
   1148 {
   1149 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
   1150 				SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
   1151 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
   1152 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
   1153 		vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
   1154 				sbi_offset);
   1155 	}
   1156 	read_vreg(vgpu, offset, p_data, bytes);
   1157 	return 0;
   1158 }
   1159 
   1160 static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1161 		void *p_data, unsigned int bytes)
   1162 {
   1163 	u32 data;
   1164 
   1165 	write_vreg(vgpu, offset, p_data, bytes);
   1166 	data = vgpu_vreg(vgpu, offset);
   1167 
   1168 	data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
   1169 	data |= SBI_READY;
   1170 
   1171 	data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
   1172 	data |= SBI_RESPONSE_SUCCESS;
   1173 
   1174 	vgpu_vreg(vgpu, offset) = data;
   1175 
   1176 	if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
   1177 				SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
   1178 		unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
   1179 				SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
   1180 
   1181 		write_virtual_sbi_register(vgpu, sbi_offset,
   1182 					   vgpu_vreg_t(vgpu, SBI_DATA));
   1183 	}
   1184 	return 0;
   1185 }
   1186 
   1187 #define _vgtif_reg(x) \
   1188 	(VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
   1189 
   1190 static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   1191 		void *p_data, unsigned int bytes)
   1192 {
   1193 	bool invalid_read = false;
   1194 
   1195 	read_vreg(vgpu, offset, p_data, bytes);
   1196 
   1197 	switch (offset) {
   1198 	case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
   1199 		if (offset + bytes > _vgtif_reg(vgt_id) + 4)
   1200 			invalid_read = true;
   1201 		break;
   1202 	case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
   1203 		_vgtif_reg(avail_rs.fence_num):
   1204 		if (offset + bytes >
   1205 			_vgtif_reg(avail_rs.fence_num) + 4)
   1206 			invalid_read = true;
   1207 		break;
   1208 	case 0x78010:	/* vgt_caps */
   1209 	case 0x7881c:
   1210 		break;
   1211 	default:
   1212 		invalid_read = true;
   1213 		break;
   1214 	}
   1215 	if (invalid_read)
   1216 		gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
   1217 				offset, bytes, *(u32 *)p_data);
   1218 	vgpu->pv_notified = true;
   1219 	return 0;
   1220 }
   1221 
   1222 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
   1223 {
   1224 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
   1225 	struct intel_vgpu_mm *mm;
   1226 	u64 *pdps;
   1227 
   1228 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
   1229 
   1230 	switch (notification) {
   1231 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
   1232 		root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
   1233 		/* fall through */
   1234 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
   1235 		mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
   1236 		return PTR_ERR_OR_ZERO(mm);
   1237 	case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
   1238 	case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
   1239 		return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
   1240 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
   1241 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
   1242 	case 1:	/* Remove this in guest driver. */
   1243 		break;
   1244 	default:
   1245 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
   1246 	}
   1247 	return 0;
   1248 }
   1249 
   1250 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
   1251 {
   1252 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
   1253 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
   1254 	char *env[3] = {NULL, NULL, NULL};
   1255 	char vmid_str[20];
   1256 	char display_ready_str[20];
   1257 
   1258 	snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
   1259 	env[0] = display_ready_str;
   1260 
   1261 	snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
   1262 	env[1] = vmid_str;
   1263 
   1264 	return kobject_uevent_env(kobj, KOBJ_ADD, env);
   1265 }
   1266 
   1267 static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1268 		void *p_data, unsigned int bytes)
   1269 {
   1270 	u32 data = *(u32 *)p_data;
   1271 	bool invalid_write = false;
   1272 
   1273 	switch (offset) {
   1274 	case _vgtif_reg(display_ready):
   1275 		send_display_ready_uevent(vgpu, data ? 1 : 0);
   1276 		break;
   1277 	case _vgtif_reg(g2v_notify):
   1278 		handle_g2v_notification(vgpu, data);
   1279 		break;
   1280 	/* add xhot and yhot to handled list to avoid error log */
   1281 	case _vgtif_reg(cursor_x_hot):
   1282 	case _vgtif_reg(cursor_y_hot):
   1283 	case _vgtif_reg(pdp[0].lo):
   1284 	case _vgtif_reg(pdp[0].hi):
   1285 	case _vgtif_reg(pdp[1].lo):
   1286 	case _vgtif_reg(pdp[1].hi):
   1287 	case _vgtif_reg(pdp[2].lo):
   1288 	case _vgtif_reg(pdp[2].hi):
   1289 	case _vgtif_reg(pdp[3].lo):
   1290 	case _vgtif_reg(pdp[3].hi):
   1291 	case _vgtif_reg(execlist_context_descriptor_lo):
   1292 	case _vgtif_reg(execlist_context_descriptor_hi):
   1293 		break;
   1294 	case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
   1295 		invalid_write = true;
   1296 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
   1297 		break;
   1298 	default:
   1299 		invalid_write = true;
   1300 		gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
   1301 				offset, bytes, data);
   1302 		break;
   1303 	}
   1304 
   1305 	if (!invalid_write)
   1306 		write_vreg(vgpu, offset, p_data, bytes);
   1307 
   1308 	return 0;
   1309 }
   1310 
   1311 static int pf_write(struct intel_vgpu *vgpu,
   1312 		unsigned int offset, void *p_data, unsigned int bytes)
   1313 {
   1314 	u32 val = *(u32 *)p_data;
   1315 
   1316 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
   1317 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
   1318 	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
   1319 		WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
   1320 			  vgpu->id);
   1321 		return 0;
   1322 	}
   1323 
   1324 	return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
   1325 }
   1326 
   1327 static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
   1328 		unsigned int offset, void *p_data, unsigned int bytes)
   1329 {
   1330 	write_vreg(vgpu, offset, p_data, bytes);
   1331 
   1332 	if (vgpu_vreg(vgpu, offset) &
   1333 	    HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
   1334 		vgpu_vreg(vgpu, offset) |=
   1335 			HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
   1336 	else
   1337 		vgpu_vreg(vgpu, offset) &=
   1338 			~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
   1339 	return 0;
   1340 }
   1341 
   1342 static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
   1343 		unsigned int offset, void *p_data, unsigned int bytes)
   1344 {
   1345 	write_vreg(vgpu, offset, p_data, bytes);
   1346 
   1347 	if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
   1348 		vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
   1349 	else
   1350 		vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
   1351 
   1352 	return 0;
   1353 }
   1354 
   1355 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
   1356 	unsigned int offset, void *p_data, unsigned int bytes)
   1357 {
   1358 	write_vreg(vgpu, offset, p_data, bytes);
   1359 
   1360 	if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
   1361 		vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
   1362 	return 0;
   1363 }
   1364 
   1365 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
   1366 		void *p_data, unsigned int bytes)
   1367 {
   1368 	u32 mode;
   1369 
   1370 	write_vreg(vgpu, offset, p_data, bytes);
   1371 	mode = vgpu_vreg(vgpu, offset);
   1372 
   1373 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
   1374 		WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
   1375 				vgpu->id);
   1376 		return 0;
   1377 	}
   1378 
   1379 	return 0;
   1380 }
   1381 
   1382 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
   1383 		void *p_data, unsigned int bytes)
   1384 {
   1385 	u32 trtte = *(u32 *)p_data;
   1386 
   1387 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
   1388 		WARN(1, "VM(%d): Use physical address for TRTT!\n",
   1389 				vgpu->id);
   1390 		return -EINVAL;
   1391 	}
   1392 	write_vreg(vgpu, offset, p_data, bytes);
   1393 
   1394 	return 0;
   1395 }
   1396 
   1397 static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
   1398 		void *p_data, unsigned int bytes)
   1399 {
   1400 	write_vreg(vgpu, offset, p_data, bytes);
   1401 	return 0;
   1402 }
   1403 
   1404 static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
   1405 		void *p_data, unsigned int bytes)
   1406 {
   1407 	u32 v = 0;
   1408 
   1409 	if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
   1410 		v |= (1 << 0);
   1411 
   1412 	if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
   1413 		v |= (1 << 8);
   1414 
   1415 	if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
   1416 		v |= (1 << 16);
   1417 
   1418 	if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
   1419 		v |= (1 << 24);
   1420 
   1421 	vgpu_vreg(vgpu, offset) = v;
   1422 
   1423 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1424 }
   1425 
   1426 static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
   1427 		void *p_data, unsigned int bytes)
   1428 {
   1429 	u32 value = *(u32 *)p_data;
   1430 	u32 cmd = value & 0xff;
   1431 	u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
   1432 
   1433 	switch (cmd) {
   1434 	case GEN9_PCODE_READ_MEM_LATENCY:
   1435 		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
   1436 			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
   1437 			 || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
   1438 			/**
   1439 			 * "Read memory latency" command on gen9.
   1440 			 * Below memory latency values are read
   1441 			 * from skylake platform.
   1442 			 */
   1443 			if (!*data0)
   1444 				*data0 = 0x1e1a1100;
   1445 			else
   1446 				*data0 = 0x61514b3d;
   1447 		} else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
   1448 			/**
   1449 			 * "Read memory latency" command on gen9.
   1450 			 * Below memory latency values are read
   1451 			 * from Broxton MRB.
   1452 			 */
   1453 			if (!*data0)
   1454 				*data0 = 0x16080707;
   1455 			else
   1456 				*data0 = 0x16161616;
   1457 		}
   1458 		break;
   1459 	case SKL_PCODE_CDCLK_CONTROL:
   1460 		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
   1461 			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
   1462 			 || IS_COFFEELAKE(vgpu->gvt->dev_priv))
   1463 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
   1464 		break;
   1465 	case GEN6_PCODE_READ_RC6VIDS:
   1466 		*data0 |= 0x1;
   1467 		break;
   1468 	}
   1469 
   1470 	gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
   1471 		     vgpu->id, value, *data0);
   1472 	/**
   1473 	 * PCODE_READY clear means ready for pcode read/write,
   1474 	 * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
   1475 	 * always emulate as pcode read/write success and ready for access
   1476 	 * anytime, since we don't touch real physical registers here.
   1477 	 */
   1478 	value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
   1479 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
   1480 }
   1481 
   1482 static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
   1483 		void *p_data, unsigned int bytes)
   1484 {
   1485 	u32 value = *(u32 *)p_data;
   1486 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
   1487 
   1488 	if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
   1489 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
   1490 			      offset, value);
   1491 		return -EINVAL;
   1492 	}
   1493 	/*
   1494 	 * Need to emulate all the HWSP register write to ensure host can
   1495 	 * update the VM CSB status correctly. Here listed registers can
   1496 	 * support BDW, SKL or other platforms with same HWSP registers.
   1497 	 */
   1498 	if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
   1499 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
   1500 			     offset);
   1501 		return -EINVAL;
   1502 	}
   1503 	vgpu->hws_pga[ring_id] = value;
   1504 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
   1505 		     vgpu->id, value, offset);
   1506 
   1507 	return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
   1508 }
   1509 
   1510 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
   1511 		unsigned int offset, void *p_data, unsigned int bytes)
   1512 {
   1513 	u32 v = *(u32 *)p_data;
   1514 
   1515 	if (IS_BROXTON(vgpu->gvt->dev_priv))
   1516 		v &= (1 << 31) | (1 << 29);
   1517 	else
   1518 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
   1519 			(1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
   1520 	v |= (v >> 1);
   1521 
   1522 	return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
   1523 }
   1524 
   1525 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
   1526 		void *p_data, unsigned int bytes)
   1527 {
   1528 	u32 v = *(u32 *)p_data;
   1529 
   1530 	/* other bits are MBZ. */
   1531 	v &= (1 << 31) | (1 << 30);
   1532 	v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
   1533 
   1534 	vgpu_vreg(vgpu, offset) = v;
   1535 
   1536 	return 0;
   1537 }
   1538 
   1539 static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
   1540 		unsigned int offset, void *p_data, unsigned int bytes)
   1541 {
   1542 	u32 v = *(u32 *)p_data;
   1543 
   1544 	if (v & BXT_DE_PLL_PLL_ENABLE)
   1545 		v |= BXT_DE_PLL_LOCK;
   1546 
   1547 	vgpu_vreg(vgpu, offset) = v;
   1548 
   1549 	return 0;
   1550 }
   1551 
   1552 static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
   1553 		unsigned int offset, void *p_data, unsigned int bytes)
   1554 {
   1555 	u32 v = *(u32 *)p_data;
   1556 
   1557 	if (v & PORT_PLL_ENABLE)
   1558 		v |= PORT_PLL_LOCK;
   1559 
   1560 	vgpu_vreg(vgpu, offset) = v;
   1561 
   1562 	return 0;
   1563 }
   1564 
   1565 static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
   1566 		unsigned int offset, void *p_data, unsigned int bytes)
   1567 {
   1568 	u32 v = *(u32 *)p_data;
   1569 	u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
   1570 
   1571 	switch (offset) {
   1572 	case _PHY_CTL_FAMILY_EDP:
   1573 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
   1574 		break;
   1575 	case _PHY_CTL_FAMILY_DDI:
   1576 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
   1577 		vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
   1578 		break;
   1579 	}
   1580 
   1581 	vgpu_vreg(vgpu, offset) = v;
   1582 
   1583 	return 0;
   1584 }
   1585 
   1586 static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
   1587 		unsigned int offset, void *p_data, unsigned int bytes)
   1588 {
   1589 	u32 v = vgpu_vreg(vgpu, offset);
   1590 
   1591 	v &= ~UNIQUE_TRANGE_EN_METHOD;
   1592 
   1593 	vgpu_vreg(vgpu, offset) = v;
   1594 
   1595 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1596 }
   1597 
   1598 static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
   1599 		unsigned int offset, void *p_data, unsigned int bytes)
   1600 {
   1601 	u32 v = *(u32 *)p_data;
   1602 
   1603 	if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
   1604 		vgpu_vreg(vgpu, offset - 0x600) = v;
   1605 		vgpu_vreg(vgpu, offset - 0x800) = v;
   1606 	} else {
   1607 		vgpu_vreg(vgpu, offset - 0x400) = v;
   1608 		vgpu_vreg(vgpu, offset - 0x600) = v;
   1609 	}
   1610 
   1611 	vgpu_vreg(vgpu, offset) = v;
   1612 
   1613 	return 0;
   1614 }
   1615 
   1616 static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
   1617 		unsigned int offset, void *p_data, unsigned int bytes)
   1618 {
   1619 	u32 v = *(u32 *)p_data;
   1620 
   1621 	if (v & BIT(0)) {
   1622 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
   1623 			~PHY_RESERVED;
   1624 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
   1625 			PHY_POWER_GOOD;
   1626 	}
   1627 
   1628 	if (v & BIT(1)) {
   1629 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
   1630 			~PHY_RESERVED;
   1631 		vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
   1632 			PHY_POWER_GOOD;
   1633 	}
   1634 
   1635 
   1636 	vgpu_vreg(vgpu, offset) = v;
   1637 
   1638 	return 0;
   1639 }
   1640 
   1641 static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
   1642 		unsigned int offset, void *p_data, unsigned int bytes)
   1643 {
   1644 	vgpu_vreg(vgpu, offset) = 0;
   1645 	return 0;
   1646 }
   1647 
   1648 static int guc_status_read(struct intel_vgpu *vgpu,
   1649 			   unsigned int offset, void *p_data,
   1650 			   unsigned int bytes)
   1651 {
   1652 	/* keep MIA_IN_RESET before clearing */
   1653 	read_vreg(vgpu, offset, p_data, bytes);
   1654 	vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
   1655 	return 0;
   1656 }
   1657 
   1658 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
   1659 		unsigned int offset, void *p_data, unsigned int bytes)
   1660 {
   1661 	struct intel_gvt *gvt = vgpu->gvt;
   1662 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   1663 	int ring_id;
   1664 	u32 ring_base;
   1665 
   1666 	ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
   1667 	/**
   1668 	 * Read HW reg in following case
   1669 	 * a. the offset isn't a ring mmio
   1670 	 * b. the offset's ring is running on hw.
   1671 	 * c. the offset is ring time stamp mmio
   1672 	 */
   1673 	if (ring_id >= 0)
   1674 		ring_base = dev_priv->engine[ring_id]->mmio_base;
   1675 
   1676 	if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
   1677 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
   1678 	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
   1679 		mmio_hw_access_pre(dev_priv);
   1680 		vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
   1681 		mmio_hw_access_post(dev_priv);
   1682 	}
   1683 
   1684 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
   1685 }
   1686 
   1687 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1688 		void *p_data, unsigned int bytes)
   1689 {
   1690 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
   1691 	struct intel_vgpu_execlist *execlist;
   1692 	u32 data = *(u32 *)p_data;
   1693 	int ret = 0;
   1694 
   1695 	if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
   1696 		return -EINVAL;
   1697 
   1698 	execlist = &vgpu->submission.execlist[ring_id];
   1699 
   1700 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
   1701 	if (execlist->elsp_dwords.index == 3) {
   1702 		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
   1703 		if(ret)
   1704 			gvt_vgpu_err("fail submit workload on ring %d\n",
   1705 				ring_id);
   1706 	}
   1707 
   1708 	++execlist->elsp_dwords.index;
   1709 	execlist->elsp_dwords.index &= 0x3;
   1710 	return ret;
   1711 }
   1712 
   1713 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   1714 		void *p_data, unsigned int bytes)
   1715 {
   1716 	u32 data = *(u32 *)p_data;
   1717 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
   1718 	bool enable_execlist;
   1719 	int ret;
   1720 
   1721 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
   1722 	if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
   1723 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
   1724 	write_vreg(vgpu, offset, p_data, bytes);
   1725 
   1726 	if (data & _MASKED_BIT_ENABLE(1)) {
   1727 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   1728 		return 0;
   1729 	}
   1730 
   1731 	if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
   1732 	    data & _MASKED_BIT_ENABLE(2)) {
   1733 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   1734 		return 0;
   1735 	}
   1736 
   1737 	/* when PPGTT mode enabled, we will check if guest has called
   1738 	 * pvinfo, if not, we will treat this guest as non-gvtg-aware
   1739 	 * guest, and stop emulating its cfg space, mmio, gtt, etc.
   1740 	 */
   1741 	if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
   1742 			(data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
   1743 			&& !vgpu->pv_notified) {
   1744 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   1745 		return 0;
   1746 	}
   1747 	if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
   1748 			|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
   1749 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
   1750 
   1751 		gvt_dbg_core("EXECLIST %s on ring %d\n",
   1752 				(enable_execlist ? "enabling" : "disabling"),
   1753 				ring_id);
   1754 
   1755 		if (!enable_execlist)
   1756 			return 0;
   1757 
   1758 		ret = intel_vgpu_select_submission_ops(vgpu,
   1759 			       BIT(ring_id),
   1760 			       INTEL_VGPU_EXECLIST_SUBMISSION);
   1761 		if (ret)
   1762 			return ret;
   1763 
   1764 		intel_vgpu_start_schedule(vgpu);
   1765 	}
   1766 	return 0;
   1767 }
   1768 
   1769 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
   1770 		unsigned int offset, void *p_data, unsigned int bytes)
   1771 {
   1772 	unsigned int id = 0;
   1773 
   1774 	write_vreg(vgpu, offset, p_data, bytes);
   1775 	vgpu_vreg(vgpu, offset) = 0;
   1776 
   1777 	switch (offset) {
   1778 	case 0x4260:
   1779 		id = RCS0;
   1780 		break;
   1781 	case 0x4264:
   1782 		id = VCS0;
   1783 		break;
   1784 	case 0x4268:
   1785 		id = VCS1;
   1786 		break;
   1787 	case 0x426c:
   1788 		id = BCS0;
   1789 		break;
   1790 	case 0x4270:
   1791 		id = VECS0;
   1792 		break;
   1793 	default:
   1794 		return -EINVAL;
   1795 	}
   1796 	set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
   1797 
   1798 	return 0;
   1799 }
   1800 
   1801 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
   1802 	unsigned int offset, void *p_data, unsigned int bytes)
   1803 {
   1804 	u32 data;
   1805 
   1806 	write_vreg(vgpu, offset, p_data, bytes);
   1807 	data = vgpu_vreg(vgpu, offset);
   1808 
   1809 	if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
   1810 		data |= RESET_CTL_READY_TO_RESET;
   1811 	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
   1812 		data &= ~RESET_CTL_READY_TO_RESET;
   1813 
   1814 	vgpu_vreg(vgpu, offset) = data;
   1815 	return 0;
   1816 }
   1817 
   1818 static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
   1819 				    unsigned int offset, void *p_data,
   1820 				    unsigned int bytes)
   1821 {
   1822 	u32 data = *(u32 *)p_data;
   1823 
   1824 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
   1825 	write_vreg(vgpu, offset, p_data, bytes);
   1826 
   1827 	if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
   1828 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
   1829 
   1830 	return 0;
   1831 }
   1832 
   1833 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
   1834 	ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
   1835 		f, s, am, rm, d, r, w); \
   1836 	if (ret) \
   1837 		return ret; \
   1838 } while (0)
   1839 
   1840 #define MMIO_D(reg, d) \
   1841 	MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
   1842 
   1843 #define MMIO_DH(reg, d, r, w) \
   1844 	MMIO_F(reg, 4, 0, 0, 0, d, r, w)
   1845 
   1846 #define MMIO_DFH(reg, d, f, r, w) \
   1847 	MMIO_F(reg, 4, f, 0, 0, d, r, w)
   1848 
   1849 #define MMIO_GM(reg, d, r, w) \
   1850 	MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
   1851 
   1852 #define MMIO_GM_RDR(reg, d, r, w) \
   1853 	MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
   1854 
   1855 #define MMIO_RO(reg, d, f, rm, r, w) \
   1856 	MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
   1857 
   1858 #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
   1859 	MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
   1860 	MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
   1861 	MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
   1862 	MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
   1863 	if (HAS_ENGINE(dev_priv, VCS1)) \
   1864 		MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
   1865 } while (0)
   1866 
   1867 #define MMIO_RING_D(prefix, d) \
   1868 	MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
   1869 
   1870 #define MMIO_RING_DFH(prefix, d, f, r, w) \
   1871 	MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
   1872 
   1873 #define MMIO_RING_GM(prefix, d, r, w) \
   1874 	MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
   1875 
   1876 #define MMIO_RING_GM_RDR(prefix, d, r, w) \
   1877 	MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
   1878 
   1879 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
   1880 	MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
   1881 
   1882 static int init_generic_mmio_info(struct intel_gvt *gvt)
   1883 {
   1884 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   1885 	int ret;
   1886 
   1887 	MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
   1888 		intel_vgpu_reg_imr_handler);
   1889 
   1890 	MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
   1891 	MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
   1892 	MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
   1893 	MMIO_D(SDEISR, D_ALL);
   1894 
   1895 	MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1896 
   1897 	MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
   1898 		gamw_echo_dev_rw_ia_write);
   1899 
   1900 	MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   1901 	MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   1902 	MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
   1903 
   1904 #define RING_REG(base) _MMIO((base) + 0x28)
   1905 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1906 #undef RING_REG
   1907 
   1908 #define RING_REG(base) _MMIO((base) + 0x134)
   1909 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1910 #undef RING_REG
   1911 
   1912 #define RING_REG(base) _MMIO((base) + 0x6c)
   1913 	MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
   1914 #undef RING_REG
   1915 	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
   1916 
   1917 	MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
   1918 	MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
   1919 	MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
   1920 	MMIO_D(GEN7_CXT_SIZE, D_ALL);
   1921 
   1922 	MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1923 	MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1924 	MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1925 	MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
   1926 	MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
   1927 
   1928 	/* RING MODE */
   1929 #define RING_REG(base) _MMIO((base) + 0x29c)
   1930 	MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
   1931 		ring_mode_mmio_write);
   1932 #undef RING_REG
   1933 
   1934 	MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   1935 		NULL, NULL);
   1936 	MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   1937 			NULL, NULL);
   1938 	MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
   1939 			mmio_read_from_hw, NULL);
   1940 	MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
   1941 			mmio_read_from_hw, NULL);
   1942 
   1943 	MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1944 	MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   1945 		NULL, NULL);
   1946 	MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1947 	MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1948 	MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1949 
   1950 	MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1951 	MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1952 	MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1953 	MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
   1954 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1955 	MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1956 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
   1957 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   1958 		NULL, NULL);
   1959 	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
   1960 		 NULL, NULL);
   1961 	MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1962 	MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1963 	MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1964 	MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1965 	MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1966 	MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1967 	MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   1968 	MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1969 	MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1970 	MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   1971 
   1972 	/* display */
   1973 	MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
   1974 	MMIO_D(_MMIO(0x602a0), D_ALL);
   1975 
   1976 	MMIO_D(_MMIO(0x65050), D_ALL);
   1977 	MMIO_D(_MMIO(0x650b4), D_ALL);
   1978 
   1979 	MMIO_D(_MMIO(0xc4040), D_ALL);
   1980 	MMIO_D(DERRMR, D_ALL);
   1981 
   1982 	MMIO_D(PIPEDSL(PIPE_A), D_ALL);
   1983 	MMIO_D(PIPEDSL(PIPE_B), D_ALL);
   1984 	MMIO_D(PIPEDSL(PIPE_C), D_ALL);
   1985 	MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
   1986 
   1987 	MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
   1988 	MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
   1989 	MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
   1990 	MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
   1991 
   1992 	MMIO_D(PIPESTAT(PIPE_A), D_ALL);
   1993 	MMIO_D(PIPESTAT(PIPE_B), D_ALL);
   1994 	MMIO_D(PIPESTAT(PIPE_C), D_ALL);
   1995 	MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
   1996 
   1997 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
   1998 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
   1999 	MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
   2000 	MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
   2001 
   2002 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
   2003 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
   2004 	MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
   2005 	MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
   2006 
   2007 	MMIO_D(CURCNTR(PIPE_A), D_ALL);
   2008 	MMIO_D(CURCNTR(PIPE_B), D_ALL);
   2009 	MMIO_D(CURCNTR(PIPE_C), D_ALL);
   2010 
   2011 	MMIO_D(CURPOS(PIPE_A), D_ALL);
   2012 	MMIO_D(CURPOS(PIPE_B), D_ALL);
   2013 	MMIO_D(CURPOS(PIPE_C), D_ALL);
   2014 
   2015 	MMIO_D(CURBASE(PIPE_A), D_ALL);
   2016 	MMIO_D(CURBASE(PIPE_B), D_ALL);
   2017 	MMIO_D(CURBASE(PIPE_C), D_ALL);
   2018 
   2019 	MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
   2020 	MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
   2021 	MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
   2022 
   2023 	MMIO_D(_MMIO(0x700ac), D_ALL);
   2024 	MMIO_D(_MMIO(0x710ac), D_ALL);
   2025 	MMIO_D(_MMIO(0x720ac), D_ALL);
   2026 
   2027 	MMIO_D(_MMIO(0x70090), D_ALL);
   2028 	MMIO_D(_MMIO(0x70094), D_ALL);
   2029 	MMIO_D(_MMIO(0x70098), D_ALL);
   2030 	MMIO_D(_MMIO(0x7009c), D_ALL);
   2031 
   2032 	MMIO_D(DSPCNTR(PIPE_A), D_ALL);
   2033 	MMIO_D(DSPADDR(PIPE_A), D_ALL);
   2034 	MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
   2035 	MMIO_D(DSPPOS(PIPE_A), D_ALL);
   2036 	MMIO_D(DSPSIZE(PIPE_A), D_ALL);
   2037 	MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
   2038 	MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
   2039 	MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
   2040 	MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
   2041 		reg50080_mmio_write);
   2042 
   2043 	MMIO_D(DSPCNTR(PIPE_B), D_ALL);
   2044 	MMIO_D(DSPADDR(PIPE_B), D_ALL);
   2045 	MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
   2046 	MMIO_D(DSPPOS(PIPE_B), D_ALL);
   2047 	MMIO_D(DSPSIZE(PIPE_B), D_ALL);
   2048 	MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
   2049 	MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
   2050 	MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
   2051 	MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
   2052 		reg50080_mmio_write);
   2053 
   2054 	MMIO_D(DSPCNTR(PIPE_C), D_ALL);
   2055 	MMIO_D(DSPADDR(PIPE_C), D_ALL);
   2056 	MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
   2057 	MMIO_D(DSPPOS(PIPE_C), D_ALL);
   2058 	MMIO_D(DSPSIZE(PIPE_C), D_ALL);
   2059 	MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
   2060 	MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
   2061 	MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
   2062 	MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
   2063 		reg50080_mmio_write);
   2064 
   2065 	MMIO_D(SPRCTL(PIPE_A), D_ALL);
   2066 	MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
   2067 	MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
   2068 	MMIO_D(SPRPOS(PIPE_A), D_ALL);
   2069 	MMIO_D(SPRSIZE(PIPE_A), D_ALL);
   2070 	MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
   2071 	MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
   2072 	MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
   2073 	MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
   2074 	MMIO_D(SPROFFSET(PIPE_A), D_ALL);
   2075 	MMIO_D(SPRSCALE(PIPE_A), D_ALL);
   2076 	MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
   2077 	MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
   2078 		reg50080_mmio_write);
   2079 
   2080 	MMIO_D(SPRCTL(PIPE_B), D_ALL);
   2081 	MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
   2082 	MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
   2083 	MMIO_D(SPRPOS(PIPE_B), D_ALL);
   2084 	MMIO_D(SPRSIZE(PIPE_B), D_ALL);
   2085 	MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
   2086 	MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
   2087 	MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
   2088 	MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
   2089 	MMIO_D(SPROFFSET(PIPE_B), D_ALL);
   2090 	MMIO_D(SPRSCALE(PIPE_B), D_ALL);
   2091 	MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
   2092 	MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
   2093 		reg50080_mmio_write);
   2094 
   2095 	MMIO_D(SPRCTL(PIPE_C), D_ALL);
   2096 	MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
   2097 	MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
   2098 	MMIO_D(SPRPOS(PIPE_C), D_ALL);
   2099 	MMIO_D(SPRSIZE(PIPE_C), D_ALL);
   2100 	MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
   2101 	MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
   2102 	MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
   2103 	MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
   2104 	MMIO_D(SPROFFSET(PIPE_C), D_ALL);
   2105 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
   2106 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
   2107 	MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
   2108 		reg50080_mmio_write);
   2109 
   2110 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
   2111 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
   2112 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
   2113 	MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
   2114 	MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
   2115 	MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
   2116 	MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
   2117 	MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
   2118 	MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
   2119 
   2120 	MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
   2121 	MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
   2122 	MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
   2123 	MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
   2124 	MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
   2125 	MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
   2126 	MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
   2127 	MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
   2128 	MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
   2129 
   2130 	MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
   2131 	MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
   2132 	MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
   2133 	MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
   2134 	MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
   2135 	MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
   2136 	MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
   2137 	MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
   2138 	MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
   2139 
   2140 	MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
   2141 	MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
   2142 	MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
   2143 	MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
   2144 	MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
   2145 	MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
   2146 	MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
   2147 	MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
   2148 
   2149 	MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
   2150 	MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
   2151 	MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
   2152 	MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
   2153 	MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
   2154 	MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
   2155 	MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
   2156 	MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
   2157 
   2158 	MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
   2159 	MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
   2160 	MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
   2161 	MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
   2162 	MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
   2163 	MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
   2164 	MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
   2165 	MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
   2166 
   2167 	MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
   2168 	MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
   2169 	MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
   2170 	MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
   2171 	MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
   2172 	MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
   2173 	MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
   2174 	MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
   2175 
   2176 	MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
   2177 	MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
   2178 	MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
   2179 	MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
   2180 	MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
   2181 	MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
   2182 	MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
   2183 	MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
   2184 
   2185 	MMIO_D(PF_CTL(PIPE_A), D_ALL);
   2186 	MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
   2187 	MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
   2188 	MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
   2189 	MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
   2190 
   2191 	MMIO_D(PF_CTL(PIPE_B), D_ALL);
   2192 	MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
   2193 	MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
   2194 	MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
   2195 	MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
   2196 
   2197 	MMIO_D(PF_CTL(PIPE_C), D_ALL);
   2198 	MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
   2199 	MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
   2200 	MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
   2201 	MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
   2202 
   2203 	MMIO_D(WM0_PIPEA_ILK, D_ALL);
   2204 	MMIO_D(WM0_PIPEB_ILK, D_ALL);
   2205 	MMIO_D(WM0_PIPEC_IVB, D_ALL);
   2206 	MMIO_D(WM1_LP_ILK, D_ALL);
   2207 	MMIO_D(WM2_LP_ILK, D_ALL);
   2208 	MMIO_D(WM3_LP_ILK, D_ALL);
   2209 	MMIO_D(WM1S_LP_ILK, D_ALL);
   2210 	MMIO_D(WM2S_LP_IVB, D_ALL);
   2211 	MMIO_D(WM3S_LP_IVB, D_ALL);
   2212 
   2213 	MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
   2214 	MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
   2215 	MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
   2216 	MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
   2217 
   2218 	MMIO_D(_MMIO(0x48268), D_ALL);
   2219 
   2220 	MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
   2221 		gmbus_mmio_write);
   2222 	MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
   2223 	MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
   2224 
   2225 	MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2226 		dp_aux_ch_ctl_mmio_write);
   2227 	MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2228 		dp_aux_ch_ctl_mmio_write);
   2229 	MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
   2230 		dp_aux_ch_ctl_mmio_write);
   2231 
   2232 	MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
   2233 
   2234 	MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
   2235 	MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
   2236 
   2237 	MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2238 	MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2239 	MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
   2240 	MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
   2241 	MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
   2242 	MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
   2243 	MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
   2244 	MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
   2245 	MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
   2246 
   2247 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
   2248 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
   2249 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
   2250 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
   2251 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
   2252 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
   2253 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
   2254 
   2255 	MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
   2256 	MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
   2257 	MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
   2258 	MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
   2259 	MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
   2260 	MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
   2261 	MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
   2262 
   2263 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
   2264 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
   2265 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
   2266 	MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
   2267 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
   2268 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
   2269 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
   2270 	MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
   2271 
   2272 	MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
   2273 	MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
   2274 	MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
   2275 
   2276 	MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
   2277 	MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
   2278 	MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
   2279 
   2280 	MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
   2281 	MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
   2282 	MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
   2283 
   2284 	MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
   2285 	MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
   2286 	MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
   2287 
   2288 	MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
   2289 	MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
   2290 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
   2291 	MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
   2292 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
   2293 	MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
   2294 
   2295 	MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
   2296 	MMIO_D(PCH_PP_DIVISOR, D_ALL);
   2297 	MMIO_D(PCH_PP_STATUS,  D_ALL);
   2298 	MMIO_D(PCH_LVDS, D_ALL);
   2299 	MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
   2300 	MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
   2301 	MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
   2302 	MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
   2303 	MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
   2304 	MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
   2305 	MMIO_D(PCH_DREF_CONTROL, D_ALL);
   2306 	MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
   2307 	MMIO_D(PCH_DPLL_SEL, D_ALL);
   2308 
   2309 	MMIO_D(_MMIO(0x61208), D_ALL);
   2310 	MMIO_D(_MMIO(0x6120c), D_ALL);
   2311 	MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
   2312 	MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
   2313 
   2314 	MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
   2315 	MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
   2316 	MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
   2317 	MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
   2318 	MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
   2319 	MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
   2320 
   2321 	MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
   2322 		PORTA_HOTPLUG_STATUS_MASK
   2323 		| PORTB_HOTPLUG_STATUS_MASK
   2324 		| PORTC_HOTPLUG_STATUS_MASK
   2325 		| PORTD_HOTPLUG_STATUS_MASK,
   2326 		NULL, NULL);
   2327 
   2328 	MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
   2329 	MMIO_D(FUSE_STRAP, D_ALL);
   2330 	MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
   2331 
   2332 	MMIO_D(DISP_ARB_CTL, D_ALL);
   2333 	MMIO_D(DISP_ARB_CTL2, D_ALL);
   2334 
   2335 	MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
   2336 	MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
   2337 	MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
   2338 
   2339 	MMIO_D(SOUTH_CHICKEN1, D_ALL);
   2340 	MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
   2341 	MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
   2342 	MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
   2343 	MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
   2344 	MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
   2345 	MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
   2346 
   2347 	MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
   2348 	MMIO_D(ILK_DPFC_CONTROL, D_ALL);
   2349 	MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
   2350 	MMIO_D(ILK_DPFC_STATUS, D_ALL);
   2351 	MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
   2352 	MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
   2353 	MMIO_D(ILK_FBC_RT_BASE, D_ALL);
   2354 
   2355 	MMIO_D(IPS_CTL, D_ALL);
   2356 
   2357 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
   2358 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
   2359 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
   2360 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
   2361 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
   2362 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
   2363 	MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
   2364 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
   2365 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
   2366 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
   2367 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
   2368 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
   2369 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
   2370 
   2371 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
   2372 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
   2373 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
   2374 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
   2375 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
   2376 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
   2377 	MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
   2378 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
   2379 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
   2380 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
   2381 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
   2382 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
   2383 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
   2384 
   2385 	MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
   2386 	MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
   2387 	MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
   2388 	MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
   2389 	MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
   2390 	MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
   2391 	MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
   2392 	MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
   2393 	MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
   2394 	MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
   2395 	MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
   2396 	MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
   2397 	MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
   2398 
   2399 	MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
   2400 	MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
   2401 	MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
   2402 
   2403 	MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
   2404 	MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
   2405 	MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
   2406 
   2407 	MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
   2408 	MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
   2409 	MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
   2410 
   2411 	MMIO_D(_MMIO(0x60110), D_ALL);
   2412 	MMIO_D(_MMIO(0x61110), D_ALL);
   2413 	MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
   2414 	MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
   2415 	MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
   2416 	MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2417 	MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2418 	MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2419 	MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2420 	MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2421 	MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
   2422 
   2423 	MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
   2424 	MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
   2425 	MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
   2426 	MMIO_D(SPLL_CTL, D_ALL);
   2427 	MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
   2428 	MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
   2429 	MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
   2430 	MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
   2431 	MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
   2432 	MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
   2433 	MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
   2434 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
   2435 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
   2436 	MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
   2437 
   2438 	MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
   2439 	MMIO_D(_MMIO(0x46508), D_ALL);
   2440 
   2441 	MMIO_D(_MMIO(0x49080), D_ALL);
   2442 	MMIO_D(_MMIO(0x49180), D_ALL);
   2443 	MMIO_D(_MMIO(0x49280), D_ALL);
   2444 
   2445 	MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
   2446 	MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
   2447 	MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
   2448 
   2449 	MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
   2450 	MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
   2451 	MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
   2452 
   2453 	MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
   2454 	MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
   2455 	MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
   2456 
   2457 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
   2458 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
   2459 	MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
   2460 
   2461 	MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
   2462 	MMIO_D(SBI_ADDR, D_ALL);
   2463 	MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
   2464 	MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
   2465 	MMIO_D(PIXCLK_GATE, D_ALL);
   2466 
   2467 	MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
   2468 		dp_aux_ch_ctl_mmio_write);
   2469 
   2470 	MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2471 	MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2472 	MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2473 	MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2474 	MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
   2475 
   2476 	MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2477 	MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2478 	MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2479 	MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2480 	MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
   2481 
   2482 	MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
   2483 	MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
   2484 	MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
   2485 	MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
   2486 	MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
   2487 
   2488 	MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
   2489 	MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
   2490 	MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
   2491 	MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
   2492 	MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
   2493 
   2494 	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
   2495 	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
   2496 	MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
   2497 
   2498 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
   2499 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
   2500 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
   2501 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
   2502 
   2503 	MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
   2504 	MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
   2505 	MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
   2506 	MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
   2507 
   2508 	MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
   2509 	MMIO_D(FORCEWAKE_ACK, D_ALL);
   2510 	MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
   2511 	MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
   2512 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2513 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2514 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
   2515 	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
   2516 	MMIO_D(ECOBUS, D_ALL);
   2517 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
   2518 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
   2519 	MMIO_D(GEN6_RPNSWREQ, D_ALL);
   2520 	MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
   2521 	MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
   2522 	MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
   2523 	MMIO_D(GEN6_RPSTAT1, D_ALL);
   2524 	MMIO_D(GEN6_RP_CONTROL, D_ALL);
   2525 	MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
   2526 	MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
   2527 	MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
   2528 	MMIO_D(GEN6_RP_CUR_UP, D_ALL);
   2529 	MMIO_D(GEN6_RP_PREV_UP, D_ALL);
   2530 	MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
   2531 	MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
   2532 	MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
   2533 	MMIO_D(GEN6_RP_UP_EI, D_ALL);
   2534 	MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
   2535 	MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
   2536 	MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
   2537 	MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
   2538 	MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
   2539 	MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
   2540 	MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
   2541 	MMIO_D(GEN6_RC_SLEEP, D_ALL);
   2542 	MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
   2543 	MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
   2544 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
   2545 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
   2546 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
   2547 	MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
   2548 	MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
   2549 	MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
   2550 	MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
   2551 	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
   2552 	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
   2553 
   2554 	MMIO_D(RSTDBYCTL, D_ALL);
   2555 
   2556 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
   2557 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
   2558 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
   2559 
   2560 	MMIO_D(TILECTL, D_ALL);
   2561 
   2562 	MMIO_D(GEN6_UCGCTL1, D_ALL);
   2563 	MMIO_D(GEN6_UCGCTL2, D_ALL);
   2564 
   2565 	MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
   2566 
   2567 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
   2568 	MMIO_D(_MMIO(0x13812c), D_ALL);
   2569 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
   2570 	MMIO_D(HSW_EDRAM_CAP, D_ALL);
   2571 	MMIO_D(HSW_IDICR, D_ALL);
   2572 	MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
   2573 
   2574 	MMIO_D(_MMIO(0x3c), D_ALL);
   2575 	MMIO_D(_MMIO(0x860), D_ALL);
   2576 	MMIO_D(ECOSKPD, D_ALL);
   2577 	MMIO_D(_MMIO(0x121d0), D_ALL);
   2578 	MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
   2579 	MMIO_D(_MMIO(0x41d0), D_ALL);
   2580 	MMIO_D(GAC_ECO_BITS, D_ALL);
   2581 	MMIO_D(_MMIO(0x6200), D_ALL);
   2582 	MMIO_D(_MMIO(0x6204), D_ALL);
   2583 	MMIO_D(_MMIO(0x6208), D_ALL);
   2584 	MMIO_D(_MMIO(0x7118), D_ALL);
   2585 	MMIO_D(_MMIO(0x7180), D_ALL);
   2586 	MMIO_D(_MMIO(0x7408), D_ALL);
   2587 	MMIO_D(_MMIO(0x7c00), D_ALL);
   2588 	MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
   2589 	MMIO_D(_MMIO(0x911c), D_ALL);
   2590 	MMIO_D(_MMIO(0x9120), D_ALL);
   2591 	MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2592 
   2593 	MMIO_D(GAB_CTL, D_ALL);
   2594 	MMIO_D(_MMIO(0x48800), D_ALL);
   2595 	MMIO_D(_MMIO(0xce044), D_ALL);
   2596 	MMIO_D(_MMIO(0xe6500), D_ALL);
   2597 	MMIO_D(_MMIO(0xe6504), D_ALL);
   2598 	MMIO_D(_MMIO(0xe6600), D_ALL);
   2599 	MMIO_D(_MMIO(0xe6604), D_ALL);
   2600 	MMIO_D(_MMIO(0xe6700), D_ALL);
   2601 	MMIO_D(_MMIO(0xe6704), D_ALL);
   2602 	MMIO_D(_MMIO(0xe6800), D_ALL);
   2603 	MMIO_D(_MMIO(0xe6804), D_ALL);
   2604 	MMIO_D(PCH_GMBUS4, D_ALL);
   2605 	MMIO_D(PCH_GMBUS5, D_ALL);
   2606 
   2607 	MMIO_D(_MMIO(0x902c), D_ALL);
   2608 	MMIO_D(_MMIO(0xec008), D_ALL);
   2609 	MMIO_D(_MMIO(0xec00c), D_ALL);
   2610 	MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
   2611 	MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
   2612 	MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
   2613 	MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
   2614 	MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
   2615 	MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
   2616 	MMIO_D(_MMIO(0xec408), D_ALL);
   2617 	MMIO_D(_MMIO(0xec40c), D_ALL);
   2618 	MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
   2619 	MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
   2620 	MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
   2621 	MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
   2622 	MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
   2623 	MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
   2624 	MMIO_D(_MMIO(0xfc810), D_ALL);
   2625 	MMIO_D(_MMIO(0xfc81c), D_ALL);
   2626 	MMIO_D(_MMIO(0xfc828), D_ALL);
   2627 	MMIO_D(_MMIO(0xfc834), D_ALL);
   2628 	MMIO_D(_MMIO(0xfcc00), D_ALL);
   2629 	MMIO_D(_MMIO(0xfcc0c), D_ALL);
   2630 	MMIO_D(_MMIO(0xfcc18), D_ALL);
   2631 	MMIO_D(_MMIO(0xfcc24), D_ALL);
   2632 	MMIO_D(_MMIO(0xfd000), D_ALL);
   2633 	MMIO_D(_MMIO(0xfd00c), D_ALL);
   2634 	MMIO_D(_MMIO(0xfd018), D_ALL);
   2635 	MMIO_D(_MMIO(0xfd024), D_ALL);
   2636 	MMIO_D(_MMIO(0xfd034), D_ALL);
   2637 
   2638 	MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
   2639 	MMIO_D(_MMIO(0x2054), D_ALL);
   2640 	MMIO_D(_MMIO(0x12054), D_ALL);
   2641 	MMIO_D(_MMIO(0x22054), D_ALL);
   2642 	MMIO_D(_MMIO(0x1a054), D_ALL);
   2643 
   2644 	MMIO_D(_MMIO(0x44070), D_ALL);
   2645 	MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2646 	MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2647 	MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2648 	MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2649 	MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2650 
   2651 	MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
   2652 	MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
   2653 	MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
   2654 	MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2655 	MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2656 	MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2657 
   2658 	MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2659 	MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2660 	MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2661 
   2662 	MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2663 	MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2664 	MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2665 	MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2666 	MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2667 	MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2668 	MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2669 	MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2670 	MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2671 	MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2672 	MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
   2673 	MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2674 	MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2675 	MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2676 	MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2677 	MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
   2678 	MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2679 
   2680 	MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2681 	MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
   2682 	MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2683 	MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2684 	MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
   2685 	MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2686 	MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
   2687 	MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2688 	MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2689 	MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2690 	MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2691 
   2692 	MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
   2693 	MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
   2694 	MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
   2695 
   2696 	return 0;
   2697 }
   2698 
   2699 static int init_bdw_mmio_info(struct intel_gvt *gvt)
   2700 {
   2701 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   2702 	int ret;
   2703 
   2704 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2705 	MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2706 	MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2707 	MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
   2708 
   2709 	MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2710 	MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2711 	MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2712 	MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
   2713 
   2714 	MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2715 	MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2716 	MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2717 	MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
   2718 
   2719 	MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2720 	MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2721 	MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2722 	MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
   2723 
   2724 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
   2725 		intel_vgpu_reg_imr_handler);
   2726 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
   2727 		intel_vgpu_reg_ier_handler);
   2728 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
   2729 		intel_vgpu_reg_iir_handler);
   2730 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
   2731 
   2732 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
   2733 		intel_vgpu_reg_imr_handler);
   2734 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
   2735 		intel_vgpu_reg_ier_handler);
   2736 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
   2737 		intel_vgpu_reg_iir_handler);
   2738 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
   2739 
   2740 	MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
   2741 		intel_vgpu_reg_imr_handler);
   2742 	MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
   2743 		intel_vgpu_reg_ier_handler);
   2744 	MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
   2745 		intel_vgpu_reg_iir_handler);
   2746 	MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
   2747 
   2748 	MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2749 	MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2750 	MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2751 	MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
   2752 
   2753 	MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2754 	MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2755 	MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2756 	MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
   2757 
   2758 	MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
   2759 	MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
   2760 	MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
   2761 	MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
   2762 
   2763 	MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
   2764 		intel_vgpu_reg_master_irq_handler);
   2765 
   2766 	MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
   2767 		mmio_read_from_hw, NULL);
   2768 
   2769 #define RING_REG(base) _MMIO((base) + 0xd0)
   2770 	MMIO_RING_F(RING_REG, 4, F_RO, 0,
   2771 		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
   2772 		ring_reset_ctl_write);
   2773 #undef RING_REG
   2774 
   2775 #define RING_REG(base) _MMIO((base) + 0x230)
   2776 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
   2777 #undef RING_REG
   2778 
   2779 #define RING_REG(base) _MMIO((base) + 0x234)
   2780 	MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
   2781 		NULL, NULL);
   2782 #undef RING_REG
   2783 
   2784 #define RING_REG(base) _MMIO((base) + 0x244)
   2785 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2786 #undef RING_REG
   2787 
   2788 #define RING_REG(base) _MMIO((base) + 0x370)
   2789 	MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
   2790 #undef RING_REG
   2791 
   2792 #define RING_REG(base) _MMIO((base) + 0x3a0)
   2793 	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
   2794 #undef RING_REG
   2795 
   2796 	MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
   2797 	MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
   2798 	MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
   2799 	MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
   2800 	MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
   2801 	MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
   2802 	MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
   2803 
   2804 	MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
   2805 
   2806 	MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
   2807 	MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
   2808 
   2809 	MMIO_D(GAMTARBMODE, D_BDW_PLUS);
   2810 
   2811 #define RING_REG(base) _MMIO((base) + 0x270)
   2812 	MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
   2813 #undef RING_REG
   2814 
   2815 	MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
   2816 
   2817 	MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2818 
   2819 	MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
   2820 	MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
   2821 	MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
   2822 
   2823 	MMIO_D(WM_MISC, D_BDW);
   2824 	MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
   2825 
   2826 	MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
   2827 	MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
   2828 	MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
   2829 
   2830 	MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
   2831 
   2832 	MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
   2833 	MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
   2834 	MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
   2835 
   2836 	MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
   2837 	MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2838 		NULL, NULL);
   2839 	MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   2840 		NULL, NULL);
   2841 	MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2842 
   2843 	MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2844 	MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2845 	MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2846 	MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2847 	MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2848 	MMIO_D(_MMIO(0xb110), D_BDW);
   2849 
   2850 	MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
   2851 		NULL, force_nonpriv_write);
   2852 
   2853 	MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
   2854 	MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
   2855 
   2856 	MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2857 	MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
   2858 
   2859 	MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2860 
   2861 	MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
   2862 
   2863 	MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
   2864 
   2865 	MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
   2866 	MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
   2867 
   2868 	MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2869 	MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2870 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2871 	MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   2872 
   2873 	MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
   2874 
   2875 	MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2876 	MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2877 	MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2878 	MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2879 	MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2880 	MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2881 	MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2882 	MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2883 	MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2884 	MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
   2885 	return 0;
   2886 }
   2887 
   2888 static int init_skl_mmio_info(struct intel_gvt *gvt)
   2889 {
   2890 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   2891 	int ret;
   2892 
   2893 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2894 	MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
   2895 	MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2896 	MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
   2897 	MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
   2898 	MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
   2899 
   2900 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2901 						dp_aux_ch_ctl_mmio_write);
   2902 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2903 						dp_aux_ch_ctl_mmio_write);
   2904 	MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
   2905 						dp_aux_ch_ctl_mmio_write);
   2906 
   2907 	MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
   2908 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
   2909 
   2910 	MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
   2911 
   2912 	MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
   2913 	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
   2914 	MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
   2915 	MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   2916 	MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
   2917 	MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
   2918 	MMIO_D(DC_STATE_EN, D_SKL_PLUS);
   2919 	MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
   2920 	MMIO_D(CDCLK_CTL, D_SKL_PLUS);
   2921 	MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
   2922 	MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
   2923 	MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
   2924 	MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
   2925 	MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
   2926 	MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
   2927 	MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
   2928 	MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
   2929 	MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
   2930 	MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
   2931 	MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
   2932 
   2933 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2934 	MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2935 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2936 	MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2937 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2938 	MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2939 
   2940 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2941 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2942 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2943 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2944 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2945 	MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2946 
   2947 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
   2948 	MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
   2949 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
   2950 	MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
   2951 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
   2952 	MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
   2953 
   2954 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   2955 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   2956 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   2957 	MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
   2958 
   2959 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   2960 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   2961 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   2962 	MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
   2963 
   2964 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   2965 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   2966 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   2967 	MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
   2968 
   2969 	MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
   2970 	MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
   2971 	MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
   2972 
   2973 	MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2974 	MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2975 	MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2976 
   2977 	MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2978 	MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2979 	MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2980 
   2981 	MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2982 	MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2983 	MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2984 
   2985 	MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2986 	MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2987 	MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
   2988 
   2989 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   2990 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   2991 	MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   2992 
   2993 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   2994 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   2995 	MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   2996 
   2997 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   2998 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   2999 	MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   3000 
   3001 	MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
   3002 	MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
   3003 	MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
   3004 
   3005 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
   3006 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
   3007 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
   3008 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
   3009 
   3010 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
   3011 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
   3012 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
   3013 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
   3014 
   3015 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
   3016 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
   3017 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
   3018 	MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
   3019 
   3020 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
   3021 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
   3022 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
   3023 	MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
   3024 
   3025 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
   3026 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
   3027 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
   3028 	MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
   3029 
   3030 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
   3031 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
   3032 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
   3033 	MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
   3034 
   3035 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
   3036 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
   3037 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
   3038 	MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
   3039 
   3040 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
   3041 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
   3042 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
   3043 	MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
   3044 
   3045 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
   3046 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
   3047 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
   3048 	MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
   3049 
   3050 	MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
   3051 	MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
   3052 	MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
   3053 	MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
   3054 	MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
   3055 
   3056 	MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
   3057 	MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
   3058 	MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
   3059 
   3060 	MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3061 
   3062 	MMIO_D(SKL_DFSM, D_SKL_PLUS);
   3063 	MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
   3064 
   3065 	MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
   3066 		NULL, NULL);
   3067 	MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
   3068 		NULL, NULL);
   3069 
   3070 	MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
   3071 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
   3072 	MMIO_D(RC6_LOCATION, D_SKL_PLUS);
   3073 	MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
   3074 		 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
   3075 	MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   3076 		NULL, NULL);
   3077 
   3078 	/* TRTT */
   3079 	MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3080 	MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3081 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3082 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3083 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3084 	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
   3085 		NULL, gen9_trtte_write);
   3086 	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
   3087 
   3088 	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
   3089 
   3090 	MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
   3091 
   3092 	MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
   3093 	MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
   3094 	MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
   3095 
   3096 	MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
   3097 	MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
   3098 	MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
   3099 	MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
   3100 	MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
   3101 	MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
   3102 	MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
   3103 	MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
   3104 	MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
   3105 	MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
   3106 
   3107 	MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
   3108 	MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
   3109 	MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
   3110 
   3111 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
   3112 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
   3113 	MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
   3114 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
   3115 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
   3116 	MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
   3117 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
   3118 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
   3119 	MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
   3120 
   3121 	MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
   3122 #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
   3123 	MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   3124 		      NULL, csfe_chicken1_mmio_write);
   3125 #undef CSFE_CHICKEN1_REG
   3126 	MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   3127 		 NULL, NULL);
   3128 	MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
   3129 		 NULL, NULL);
   3130 
   3131 	MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
   3132 	MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
   3133 
   3134 	return 0;
   3135 }
   3136 
   3137 static int init_bxt_mmio_info(struct intel_gvt *gvt)
   3138 {
   3139 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   3140 	int ret;
   3141 
   3142 	MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
   3143 
   3144 	MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
   3145 	MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
   3146 	MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
   3147 	MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
   3148 	MMIO_D(ERROR_GEN6, D_BXT);
   3149 	MMIO_D(DONE_REG, D_BXT);
   3150 	MMIO_D(EIR, D_BXT);
   3151 	MMIO_D(PGTBL_ER, D_BXT);
   3152 	MMIO_D(_MMIO(0x4194), D_BXT);
   3153 	MMIO_D(_MMIO(0x4294), D_BXT);
   3154 	MMIO_D(_MMIO(0x4494), D_BXT);
   3155 
   3156 	MMIO_RING_D(RING_PSMI_CTL, D_BXT);
   3157 	MMIO_RING_D(RING_DMA_FADD, D_BXT);
   3158 	MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
   3159 	MMIO_RING_D(RING_IPEHR, D_BXT);
   3160 	MMIO_RING_D(RING_INSTPS, D_BXT);
   3161 	MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
   3162 	MMIO_RING_D(RING_BBSTATE, D_BXT);
   3163 	MMIO_RING_D(RING_IPEIR, D_BXT);
   3164 
   3165 	MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
   3166 
   3167 	MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
   3168 	MMIO_D(BXT_RP_STATE_CAP, D_BXT);
   3169 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
   3170 		NULL, bxt_phy_ctl_family_write);
   3171 	MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
   3172 		NULL, bxt_phy_ctl_family_write);
   3173 	MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
   3174 	MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
   3175 	MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
   3176 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
   3177 		NULL, bxt_port_pll_enable_write);
   3178 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
   3179 		NULL, bxt_port_pll_enable_write);
   3180 	MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
   3181 		bxt_port_pll_enable_write);
   3182 
   3183 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
   3184 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
   3185 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
   3186 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
   3187 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
   3188 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
   3189 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
   3190 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
   3191 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
   3192 
   3193 	MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
   3194 	MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
   3195 	MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
   3196 	MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
   3197 	MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
   3198 	MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
   3199 	MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
   3200 	MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
   3201 	MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
   3202 
   3203 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
   3204 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
   3205 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
   3206 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
   3207 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
   3208 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
   3209 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
   3210 		NULL, bxt_pcs_dw12_grp_write);
   3211 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
   3212 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
   3213 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
   3214 		bxt_port_tx_dw3_read, NULL);
   3215 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
   3216 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
   3217 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
   3218 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
   3219 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
   3220 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
   3221 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
   3222 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
   3223 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
   3224 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
   3225 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
   3226 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
   3227 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
   3228 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
   3229 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
   3230 
   3231 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
   3232 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
   3233 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
   3234 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
   3235 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
   3236 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
   3237 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
   3238 		NULL, bxt_pcs_dw12_grp_write);
   3239 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
   3240 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
   3241 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
   3242 		bxt_port_tx_dw3_read, NULL);
   3243 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
   3244 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
   3245 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
   3246 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
   3247 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
   3248 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
   3249 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
   3250 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
   3251 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
   3252 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
   3253 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
   3254 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
   3255 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
   3256 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
   3257 	MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
   3258 
   3259 	MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
   3260 	MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
   3261 	MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
   3262 	MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
   3263 	MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
   3264 	MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
   3265 	MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
   3266 		NULL, bxt_pcs_dw12_grp_write);
   3267 	MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
   3268 	MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
   3269 	MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
   3270 		bxt_port_tx_dw3_read, NULL);
   3271 	MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
   3272 	MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
   3273 	MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
   3274 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
   3275 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
   3276 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
   3277 	MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
   3278 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
   3279 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
   3280 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
   3281 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
   3282 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
   3283 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
   3284 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
   3285 	MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
   3286 
   3287 	MMIO_D(BXT_DE_PLL_CTL, D_BXT);
   3288 	MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
   3289 	MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
   3290 	MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
   3291 
   3292 	MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
   3293 	MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
   3294 
   3295 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
   3296 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
   3297 	MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
   3298 
   3299 	MMIO_D(RC6_CTX_BASE, D_BXT);
   3300 
   3301 	MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
   3302 	MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
   3303 	MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
   3304 	MMIO_D(GEN6_GFXPAUSE, D_BXT);
   3305 	MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
   3306 
   3307 	MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
   3308 
   3309 	return 0;
   3310 }
   3311 
   3312 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
   3313 					      unsigned int offset)
   3314 {
   3315 	unsigned long device = intel_gvt_get_device_type(gvt);
   3316 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
   3317 	int num = gvt->mmio.num_mmio_block;
   3318 	int i;
   3319 
   3320 	for (i = 0; i < num; i++, block++) {
   3321 		if (!(device & block->device))
   3322 			continue;
   3323 		if (offset >= i915_mmio_reg_offset(block->offset) &&
   3324 		    offset < i915_mmio_reg_offset(block->offset) + block->size)
   3325 			return block;
   3326 	}
   3327 	return NULL;
   3328 }
   3329 
   3330 /**
   3331  * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
   3332  * @gvt: GVT device
   3333  *
   3334  * This function is called at the driver unloading stage, to clean up the MMIO
   3335  * information table of GVT device
   3336  *
   3337  */
   3338 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
   3339 {
   3340 	struct hlist_node *tmp;
   3341 	struct intel_gvt_mmio_info *e;
   3342 	int i;
   3343 
   3344 	hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
   3345 		kfree(e);
   3346 
   3347 	vfree(gvt->mmio.mmio_attribute);
   3348 	gvt->mmio.mmio_attribute = NULL;
   3349 }
   3350 
   3351 /* Special MMIO blocks. */
   3352 static struct gvt_mmio_block mmio_blocks[] = {
   3353 	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
   3354 	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
   3355 	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
   3356 		pvinfo_mmio_read, pvinfo_mmio_write},
   3357 	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
   3358 	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
   3359 	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
   3360 };
   3361 
   3362 /**
   3363  * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
   3364  * @gvt: GVT device
   3365  *
   3366  * This function is called at the initialization stage, to setup the MMIO
   3367  * information table for GVT device
   3368  *
   3369  * Returns:
   3370  * zero on success, negative if failed.
   3371  */
   3372 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
   3373 {
   3374 	struct intel_gvt_device_info *info = &gvt->device_info;
   3375 	struct drm_i915_private *dev_priv = gvt->dev_priv;
   3376 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
   3377 	int ret;
   3378 
   3379 	gvt->mmio.mmio_attribute = vzalloc(size);
   3380 	if (!gvt->mmio.mmio_attribute)
   3381 		return -ENOMEM;
   3382 
   3383 	ret = init_generic_mmio_info(gvt);
   3384 	if (ret)
   3385 		goto err;
   3386 
   3387 	if (IS_BROADWELL(dev_priv)) {
   3388 		ret = init_bdw_mmio_info(gvt);
   3389 		if (ret)
   3390 			goto err;
   3391 	} else if (IS_SKYLAKE(dev_priv)
   3392 		|| IS_KABYLAKE(dev_priv)
   3393 		|| IS_COFFEELAKE(dev_priv)) {
   3394 		ret = init_bdw_mmio_info(gvt);
   3395 		if (ret)
   3396 			goto err;
   3397 		ret = init_skl_mmio_info(gvt);
   3398 		if (ret)
   3399 			goto err;
   3400 	} else if (IS_BROXTON(dev_priv)) {
   3401 		ret = init_bdw_mmio_info(gvt);
   3402 		if (ret)
   3403 			goto err;
   3404 		ret = init_skl_mmio_info(gvt);
   3405 		if (ret)
   3406 			goto err;
   3407 		ret = init_bxt_mmio_info(gvt);
   3408 		if (ret)
   3409 			goto err;
   3410 	}
   3411 
   3412 	gvt->mmio.mmio_block = mmio_blocks;
   3413 	gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
   3414 
   3415 	return 0;
   3416 err:
   3417 	intel_gvt_clean_mmio_info(gvt);
   3418 	return ret;
   3419 }
   3420 
   3421 /**
   3422  * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
   3423  * @gvt: a GVT device
   3424  * @handler: the handler
   3425  * @data: private data given to handler
   3426  *
   3427  * Returns:
   3428  * Zero on success, negative error code if failed.
   3429  */
   3430 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
   3431 	int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
   3432 	void *data)
   3433 {
   3434 	struct gvt_mmio_block *block = gvt->mmio.mmio_block;
   3435 	struct intel_gvt_mmio_info *e;
   3436 	int i, j, ret;
   3437 
   3438 	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
   3439 		ret = handler(gvt, e->offset, data);
   3440 		if (ret)
   3441 			return ret;
   3442 	}
   3443 
   3444 	for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
   3445 		/* pvinfo data doesn't come from hw mmio */
   3446 		if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
   3447 			continue;
   3448 
   3449 		for (j = 0; j < block->size; j += 4) {
   3450 			ret = handler(gvt,
   3451 				      i915_mmio_reg_offset(block->offset) + j,
   3452 				      data);
   3453 			if (ret)
   3454 				return ret;
   3455 		}
   3456 	}
   3457 	return 0;
   3458 }
   3459 
   3460 /**
   3461  * intel_vgpu_default_mmio_read - default MMIO read handler
   3462  * @vgpu: a vGPU
   3463  * @offset: access offset
   3464  * @p_data: data return buffer
   3465  * @bytes: access data length
   3466  *
   3467  * Returns:
   3468  * Zero on success, negative error code if failed.
   3469  */
   3470 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
   3471 		void *p_data, unsigned int bytes)
   3472 {
   3473 	read_vreg(vgpu, offset, p_data, bytes);
   3474 	return 0;
   3475 }
   3476 
   3477 /**
   3478  * intel_t_default_mmio_write - default MMIO write handler
   3479  * @vgpu: a vGPU
   3480  * @offset: access offset
   3481  * @p_data: write data buffer
   3482  * @bytes: access data length
   3483  *
   3484  * Returns:
   3485  * Zero on success, negative error code if failed.
   3486  */
   3487 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   3488 		void *p_data, unsigned int bytes)
   3489 {
   3490 	write_vreg(vgpu, offset, p_data, bytes);
   3491 	return 0;
   3492 }
   3493 
   3494 /**
   3495  * intel_vgpu_mask_mmio_write - write mask register
   3496  * @vgpu: a vGPU
   3497  * @offset: access offset
   3498  * @p_data: write data buffer
   3499  * @bytes: access data length
   3500  *
   3501  * Returns:
   3502  * Zero on success, negative error code if failed.
   3503  */
   3504 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
   3505 		void *p_data, unsigned int bytes)
   3506 {
   3507 	u32 mask, old_vreg;
   3508 
   3509 	old_vreg = vgpu_vreg(vgpu, offset);
   3510 	write_vreg(vgpu, offset, p_data, bytes);
   3511 	mask = vgpu_vreg(vgpu, offset) >> 16;
   3512 	vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
   3513 				(vgpu_vreg(vgpu, offset) & mask);
   3514 
   3515 	return 0;
   3516 }
   3517 
   3518 /**
   3519  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
   3520  * force-nopriv register
   3521  *
   3522  * @gvt: a GVT device
   3523  * @offset: register offset
   3524  *
   3525  * Returns:
   3526  * True if the register is in force-nonpriv whitelist;
   3527  * False if outside;
   3528  */
   3529 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
   3530 					  unsigned int offset)
   3531 {
   3532 	return in_whitelist(offset);
   3533 }
   3534 
   3535 /**
   3536  * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
   3537  * @vgpu: a vGPU
   3538  * @offset: register offset
   3539  * @pdata: data buffer
   3540  * @bytes: data length
   3541  * @is_read: read or write
   3542  *
   3543  * Returns:
   3544  * Zero on success, negative error code if failed.
   3545  */
   3546 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
   3547 			   void *pdata, unsigned int bytes, bool is_read)
   3548 {
   3549 	struct intel_gvt *gvt = vgpu->gvt;
   3550 	struct intel_gvt_mmio_info *mmio_info;
   3551 	struct gvt_mmio_block *mmio_block;
   3552 	gvt_mmio_func func;
   3553 	int ret;
   3554 
   3555 	if (WARN_ON(bytes > 8))
   3556 		return -EINVAL;
   3557 
   3558 	/*
   3559 	 * Handle special MMIO blocks.
   3560 	 */
   3561 	mmio_block = find_mmio_block(gvt, offset);
   3562 	if (mmio_block) {
   3563 		func = is_read ? mmio_block->read : mmio_block->write;
   3564 		if (func)
   3565 			return func(vgpu, offset, pdata, bytes);
   3566 		goto default_rw;
   3567 	}
   3568 
   3569 	/*
   3570 	 * Normal tracked MMIOs.
   3571 	 */
   3572 	mmio_info = find_mmio_info(gvt, offset);
   3573 	if (!mmio_info) {
   3574 		gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
   3575 		goto default_rw;
   3576 	}
   3577 
   3578 	if (is_read)
   3579 		return mmio_info->read(vgpu, offset, pdata, bytes);
   3580 	else {
   3581 		u64 ro_mask = mmio_info->ro_mask;
   3582 		u32 old_vreg = 0;
   3583 		u64 data = 0;
   3584 
   3585 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
   3586 			old_vreg = vgpu_vreg(vgpu, offset);
   3587 		}
   3588 
   3589 		if (likely(!ro_mask))
   3590 			ret = mmio_info->write(vgpu, offset, pdata, bytes);
   3591 		else if (!~ro_mask) {
   3592 			gvt_vgpu_err("try to write RO reg %x\n", offset);
   3593 			return 0;
   3594 		} else {
   3595 			/* keep the RO bits in the virtual register */
   3596 			memcpy(&data, pdata, bytes);
   3597 			data &= ~ro_mask;
   3598 			data |= vgpu_vreg(vgpu, offset) & ro_mask;
   3599 			ret = mmio_info->write(vgpu, offset, &data, bytes);
   3600 		}
   3601 
   3602 		/* higher 16bits of mode ctl regs are mask bits for change */
   3603 		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
   3604 			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
   3605 
   3606 			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
   3607 					| (vgpu_vreg(vgpu, offset) & mask);
   3608 		}
   3609 	}
   3610 
   3611 	return ret;
   3612 
   3613 default_rw:
   3614 	return is_read ?
   3615 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
   3616 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
   3617 }
   3618