1 /* $NetBSD: i915_drv.h,v 1.49 2024/04/16 14:34:02 riastradh Exp $ */ 2 3 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 4 */ 5 /* 6 * 7 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 8 * All Rights Reserved. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a 11 * copy of this software and associated documentation files (the 12 * "Software"), to deal in the Software without restriction, including 13 * without limitation the rights to use, copy, modify, merge, publish, 14 * distribute, sub license, and/or sell copies of the Software, and to 15 * permit persons to whom the Software is furnished to do so, subject to 16 * the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the 19 * next paragraph) shall be included in all copies or substantial portions 20 * of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 23 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 25 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 26 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 28 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 29 * 30 */ 31 32 #ifndef _I915_DRV_H_ 33 #define _I915_DRV_H_ 34 35 #include <uapi/drm/i915_drm.h> 36 #include <uapi/drm/drm_fourcc.h> 37 38 #include <linux/io-mapping.h> 39 #include <linux/i2c.h> 40 #include <linux/i2c-algo-bit.h> 41 #include <linux/backlight.h> 42 #include <linux/hash.h> 43 #include <linux/intel-iommu.h> 44 #include <linux/kref.h> 45 #include <linux/mm_types.h> 46 #include <linux/perf_event.h> 47 #include <linux/pm_qos.h> 48 #include <linux/dma-resv.h> 49 #include <linux/shmem_fs.h> 50 #include <linux/stackdepot.h> 51 #include <linux/xarray.h> 52 #include <linux/uuid.h> 53 #include <linux/acpi.h> 54 55 #include <drm/intel-gtt.h> 56 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 57 #include <drm/drm_gem.h> 58 #include <drm/drm_auth.h> 59 #include <drm/drm_cache.h> 60 #include <drm/drm_util.h> 61 #include <drm/drm_dsc.h> 62 #include <drm/drm_atomic.h> 63 #include <drm/drm_connector.h> 64 #include <drm/i915_mei_hdcp_interface.h> 65 66 #include "i915_fixed.h" 67 #include "i915_params.h" 68 #include "i915_reg.h" 69 #include "i915_utils.h" 70 71 #include "display/intel_bios.h" 72 #include "display/intel_display.h" 73 #include "display/intel_display_power.h" 74 #include "display/intel_dpll_mgr.h" 75 #include "display/intel_dsb.h" 76 #include "display/intel_frontbuffer.h" 77 #include "display/intel_gmbus.h" 78 #include "display/intel_opregion.h" 79 80 #include "gem/i915_gem_context_types.h" 81 #include "gem/i915_gem_shrinker.h" 82 #include "gem/i915_gem_stolen.h" 83 84 #include "gt/intel_lrc.h" 85 #include "gt/intel_engine.h" 86 #include "gt/intel_gt_types.h" 87 #include "gt/intel_workarounds.h" 88 #include "gt/uc/intel_uc.h" 89 90 #include "intel_device_info.h" 91 #include "intel_pch.h" 92 #include "intel_runtime_pm.h" 93 #include "intel_memory_region.h" 94 #include "intel_uncore.h" 95 #include "intel_wakeref.h" 96 #include "intel_wopcm.h" 97 98 #include "i915_gem.h" 99 #include "i915_gem_fence_reg.h" 100 #include "i915_gem_gtt.h" 101 #include "i915_gpu_error.h" 102 #include "i915_perf_types.h" 103 #include "i915_request.h" 104 #include "i915_scheduler.h" 105 #include "gt/intel_timeline.h" 106 #include "i915_vma.h" 107 #include "i915_irq.h" 108 109 #include "intel_region_lmem.h" 110 111 #include "intel_gvt.h" 112 113 /* General customization: 114 */ 115 116 #define DRIVER_NAME "i915" 117 #define DRIVER_DESC "Intel Graphics" 118 #define DRIVER_DATE "20200114" 119 #define DRIVER_TIMESTAMP 1579001978 120 121 struct drm_i915_gem_object; 122 123 enum hpd_pin { 124 HPD_NONE = 0, 125 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 126 HPD_CRT, 127 HPD_SDVO_B, 128 HPD_SDVO_C, 129 HPD_PORT_A, 130 HPD_PORT_B, 131 HPD_PORT_C, 132 HPD_PORT_D, 133 HPD_PORT_E, 134 HPD_PORT_F, 135 HPD_PORT_G, 136 HPD_PORT_H, 137 HPD_PORT_I, 138 139 HPD_NUM_PINS 140 }; 141 142 #define for_each_hpd_pin(__pin) \ 143 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 144 145 /* Threshold == 5 for long IRQs, 50 for short */ 146 #define HPD_STORM_DEFAULT_THRESHOLD 50 147 148 struct i915_hotplug { 149 struct delayed_work hotplug_work; 150 151 struct { 152 unsigned long last_jiffies; 153 int count; 154 enum { 155 HPD_ENABLED = 0, 156 HPD_DISABLED = 1, 157 HPD_MARK_DISABLED = 2 158 } state; 159 } stats[HPD_NUM_PINS]; 160 u32 event_bits; 161 u32 retry_bits; 162 struct delayed_work reenable_work; 163 164 u32 long_port_mask; 165 u32 short_port_mask; 166 struct work_struct dig_port_work; 167 168 struct work_struct poll_init_work; 169 bool poll_enabled; 170 171 unsigned int hpd_storm_threshold; 172 /* Whether or not to count short HPD IRQs in HPD storms */ 173 u8 hpd_short_storm_enabled; 174 175 /* 176 * if we get a HPD irq from DP and a HPD irq from non-DP 177 * the non-DP HPD could block the workqueue on a mode config 178 * mutex getting, that userspace may have taken. However 179 * userspace is waiting on the DP workqueue to run which is 180 * blocked behind the non-DP one. 181 */ 182 struct workqueue_struct *dp_wq; 183 }; 184 185 #define I915_GEM_GPU_DOMAINS \ 186 (I915_GEM_DOMAIN_RENDER | \ 187 I915_GEM_DOMAIN_SAMPLER | \ 188 I915_GEM_DOMAIN_COMMAND | \ 189 I915_GEM_DOMAIN_INSTRUCTION | \ 190 I915_GEM_DOMAIN_VERTEX) 191 192 struct drm_i915_private; 193 struct i915_mm_struct; 194 struct i915_mmu_object; 195 196 struct drm_i915_file_private { 197 struct drm_i915_private *dev_priv; 198 199 union { 200 struct drm_file *file; 201 struct rcu_head rcu; 202 }; 203 204 struct { 205 spinlock_t lock; 206 struct list_head request_list; 207 } mm; 208 209 struct xarray context_xa; 210 211 struct idr vm_idr; 212 struct mutex vm_idr_lock; /* guards vm_idr */ 213 214 unsigned int bsd_engine; 215 216 /* 217 * Every context ban increments per client ban score. Also 218 * hangs in short succession increments ban score. If ban threshold 219 * is reached, client is considered banned and submitting more work 220 * will fail. This is a stop gap measure to limit the badly behaving 221 * clients access to gpu. Note that unbannable contexts never increment 222 * the client ban score. 223 */ 224 #define I915_CLIENT_SCORE_HANG_FAST 1 225 #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 226 #define I915_CLIENT_SCORE_CONTEXT_BAN 3 227 #define I915_CLIENT_SCORE_BANNED 9 228 /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 229 atomic_t ban_score; 230 unsigned long hang_timestamp; 231 }; 232 233 /* Interface history: 234 * 235 * 1.1: Original. 236 * 1.2: Add Power Management 237 * 1.3: Add vblank support 238 * 1.4: Fix cmdbuffer path, add heap destroy 239 * 1.5: Add vblank pipe configuration 240 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 241 * - Support vertical blank on secondary display pipe 242 */ 243 #define DRIVER_MAJOR 1 244 #define DRIVER_MINOR 6 245 #define DRIVER_PATCHLEVEL 0 246 247 struct intel_overlay; 248 struct intel_overlay_error_state; 249 250 struct sdvo_device_mapping { 251 u8 initialized; 252 u8 dvo_port; 253 u8 slave_addr; 254 u8 dvo_wiring; 255 u8 i2c_pin; 256 u8 ddc_pin; 257 }; 258 259 struct intel_connector; 260 struct intel_encoder; 261 struct intel_atomic_state; 262 struct intel_crtc_state; 263 struct intel_initial_plane_config; 264 struct intel_crtc; 265 struct intel_limit; 266 struct dpll; 267 struct intel_cdclk_state; 268 269 struct drm_i915_display_funcs { 270 void (*get_cdclk)(struct drm_i915_private *dev_priv, 271 struct intel_cdclk_state *cdclk_state); 272 void (*set_cdclk)(struct drm_i915_private *dev_priv, 273 const struct intel_cdclk_state *cdclk_state, 274 enum pipe pipe); 275 int (*get_fifo_size)(struct drm_i915_private *dev_priv, 276 enum i9xx_plane_id i9xx_plane); 277 int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state); 278 int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state); 279 void (*initial_watermarks)(struct intel_atomic_state *state, 280 struct intel_crtc *crtc); 281 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 282 struct intel_crtc *crtc); 283 void (*optimize_watermarks)(struct intel_atomic_state *state, 284 struct intel_crtc *crtc); 285 int (*compute_global_watermarks)(struct intel_atomic_state *state); 286 void (*update_wm)(struct intel_crtc *crtc); 287 int (*modeset_calc_cdclk)(struct intel_atomic_state *state); 288 u8 (*calc_voltage_level)(int cdclk); 289 /* Returns the active state of the crtc, and if the crtc is active, 290 * fills out the pipe-config with the hw state. */ 291 bool (*get_pipe_config)(struct intel_crtc *, 292 struct intel_crtc_state *); 293 void (*get_initial_plane_config)(struct intel_crtc *, 294 struct intel_initial_plane_config *); 295 int (*crtc_compute_clock)(struct intel_crtc *crtc, 296 struct intel_crtc_state *crtc_state); 297 void (*crtc_enable)(struct intel_atomic_state *state, 298 struct intel_crtc *crtc); 299 void (*crtc_disable)(struct intel_atomic_state *state, 300 struct intel_crtc *crtc); 301 void (*commit_modeset_enables)(struct intel_atomic_state *state); 302 void (*commit_modeset_disables)(struct intel_atomic_state *state); 303 void (*audio_codec_enable)(struct intel_encoder *encoder, 304 const struct intel_crtc_state *crtc_state, 305 const struct drm_connector_state *conn_state); 306 void (*audio_codec_disable)(struct intel_encoder *encoder, 307 const struct intel_crtc_state *old_crtc_state, 308 const struct drm_connector_state *old_conn_state); 309 void (*fdi_link_train)(struct intel_crtc *crtc, 310 const struct intel_crtc_state *crtc_state); 311 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 312 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 313 /* clock updates for mode set */ 314 /* cursor updates */ 315 /* render clock increase/decrease */ 316 /* display clock increase/decrease */ 317 /* pll clock increase/decrease */ 318 319 int (*color_check)(struct intel_crtc_state *crtc_state); 320 /* 321 * Program double buffered color management registers during 322 * vblank evasion. The registers should then latch during the 323 * next vblank start, alongside any other double buffered registers 324 * involved with the same commit. 325 */ 326 void (*color_commit)(const struct intel_crtc_state *crtc_state); 327 /* 328 * Load LUTs (and other single buffered color management 329 * registers). Will (hopefully) be called during the vblank 330 * following the latching of any double buffered registers 331 * involved with the same commit. 332 */ 333 void (*load_luts)(const struct intel_crtc_state *crtc_state); 334 void (*read_luts)(struct intel_crtc_state *crtc_state); 335 }; 336 337 struct intel_csr { 338 struct work_struct work; 339 const char *fw_path; 340 u32 required_version; 341 u32 max_fw_size; /* bytes */ 342 u32 *dmc_payload; 343 u32 dmc_fw_size; /* dwords */ 344 u32 version; 345 u32 mmio_count; 346 i915_reg_t mmioaddr[20]; 347 u32 mmiodata[20]; 348 u32 dc_state; 349 u32 target_dc_state; 350 u32 allowed_dc_mask; 351 intel_wakeref_t wakeref; 352 }; 353 354 enum i915_cache_level { 355 I915_CACHE_NONE = 0, 356 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 357 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 358 caches, eg sampler/render caches, and the 359 large Last-Level-Cache. LLC is coherent with 360 the CPU, but L3 is only visible to the GPU. */ 361 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 362 }; 363 364 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 365 366 struct intel_fbc { 367 /* This is always the inner lock when overlapping with struct_mutex and 368 * it's the outer lock when overlapping with stolen_lock. */ 369 struct mutex lock; 370 unsigned threshold; 371 unsigned int possible_framebuffer_bits; 372 unsigned int busy_bits; 373 struct intel_crtc *crtc; 374 375 struct drm_mm_node compressed_fb; 376 struct drm_mm_node *compressed_llb; 377 378 bool false_color; 379 380 bool active; 381 bool activated; 382 bool flip_pending; 383 384 bool underrun_detected; 385 struct work_struct underrun_work; 386 387 /* 388 * Due to the atomic rules we can't access some structures without the 389 * appropriate locking, so we cache information here in order to avoid 390 * these problems. 391 */ 392 struct intel_fbc_state_cache { 393 struct { 394 unsigned int mode_flags; 395 u32 hsw_bdw_pixel_rate; 396 } crtc; 397 398 struct { 399 unsigned int rotation; 400 int src_w; 401 int src_h; 402 bool visible; 403 /* 404 * Display surface base address adjustement for 405 * pageflips. Note that on gen4+ this only adjusts up 406 * to a tile, offsets within a tile are handled in 407 * the hw itself (with the TILEOFF register). 408 */ 409 int adjusted_x; 410 int adjusted_y; 411 412 int y; 413 414 u16 pixel_blend_mode; 415 } plane; 416 417 struct { 418 const struct drm_format_info *format; 419 unsigned int stride; 420 } fb; 421 u16 gen9_wa_cfb_stride; 422 s8 fence_id; 423 } state_cache; 424 425 /* 426 * This structure contains everything that's relevant to program the 427 * hardware registers. When we want to figure out if we need to disable 428 * and re-enable FBC for a new configuration we just check if there's 429 * something different in the struct. The genx_fbc_activate functions 430 * are supposed to read from it in order to program the registers. 431 */ 432 struct intel_fbc_reg_params { 433 struct { 434 enum pipe pipe; 435 enum i9xx_plane_id i9xx_plane; 436 unsigned int fence_y_offset; 437 } crtc; 438 439 struct { 440 const struct drm_format_info *format; 441 unsigned int stride; 442 } fb; 443 444 int cfb_size; 445 u16 gen9_wa_cfb_stride; 446 s8 fence_id; 447 bool plane_visible; 448 } params; 449 450 const char *no_fbc_reason; 451 }; 452 453 /* 454 * HIGH_RR is the highest eDP panel refresh rate read from EDID 455 * LOW_RR is the lowest eDP panel refresh rate found from EDID 456 * parsing for same resolution. 457 */ 458 enum drrs_refresh_rate_type { 459 DRRS_HIGH_RR, 460 DRRS_LOW_RR, 461 DRRS_MAX_RR, /* RR count */ 462 }; 463 464 enum drrs_support_type { 465 DRRS_NOT_SUPPORTED = 0, 466 STATIC_DRRS_SUPPORT = 1, 467 SEAMLESS_DRRS_SUPPORT = 2 468 }; 469 470 struct intel_dp; 471 struct i915_drrs { 472 struct mutex mutex; 473 struct delayed_work work; 474 struct intel_dp *dp; 475 unsigned busy_frontbuffer_bits; 476 enum drrs_refresh_rate_type refresh_rate_type; 477 enum drrs_support_type type; 478 }; 479 480 struct i915_psr { 481 struct mutex lock; 482 483 #define I915_PSR_DEBUG_MODE_MASK 0x0f 484 #define I915_PSR_DEBUG_DEFAULT 0x00 485 #define I915_PSR_DEBUG_DISABLE 0x01 486 #define I915_PSR_DEBUG_ENABLE 0x02 487 #define I915_PSR_DEBUG_FORCE_PSR1 0x03 488 #define I915_PSR_DEBUG_IRQ 0x10 489 490 u32 debug; 491 bool sink_support; 492 bool enabled; 493 struct intel_dp *dp; 494 enum pipe pipe; 495 enum transcoder transcoder; 496 bool active; 497 struct work_struct work; 498 unsigned busy_frontbuffer_bits; 499 bool sink_psr2_support; 500 bool link_standby; 501 bool colorimetry_support; 502 bool psr2_enabled; 503 u8 sink_sync_latency; 504 ktime_t last_entry_attempt; 505 ktime_t last_exit; 506 bool sink_not_reliable; 507 bool irq_aux_error; 508 u16 su_x_granularity; 509 bool dc3co_enabled; 510 u32 dc3co_exit_delay; 511 struct delayed_work idle_work; 512 bool initially_probed; 513 }; 514 515 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 516 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 517 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 518 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 519 #define QUIRK_INCREASE_T12_DELAY (1<<6) 520 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) 521 522 #ifdef __NetBSD__ 523 /* NetBSD hack to note version was called and thus mmap flags valid. */ 524 #define QUIRK_NETBSD_VERSION_CALLED (1ul<<31) 525 #endif 526 527 struct intel_fbdev; 528 struct intel_fbc_work; 529 530 struct intel_gmbus { 531 struct i2c_adapter adapter; 532 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 533 u32 force_bit; 534 u32 reg0; 535 i915_reg_t gpio_reg; 536 struct i2c_algo_bit_data bit_algo; 537 struct drm_i915_private *dev_priv; 538 }; 539 540 struct i915_suspend_saved_registers { 541 u32 saveDSPARB; 542 u32 saveFBC_CONTROL; 543 u32 saveCACHE_MODE_0; 544 u32 saveMI_ARB_STATE; 545 u32 saveSWF0[16]; 546 u32 saveSWF1[16]; 547 u32 saveSWF3[3]; 548 u64 saveFENCE[I915_MAX_NUM_FENCES]; 549 u32 savePCH_PORT_HOTPLUG; 550 u16 saveGCDGMBUS; 551 }; 552 553 struct vlv_s0ix_state; 554 555 #define MAX_L3_SLICES 2 556 struct intel_l3_parity { 557 u32 *remap_info[MAX_L3_SLICES]; 558 struct work_struct error_work; 559 int which_slice; 560 }; 561 562 struct i915_gem_mm { 563 /** Memory allocator for GTT stolen memory */ 564 struct drm_mm stolen; 565 /** Protects the usage of the GTT stolen memory allocator. This is 566 * always the inner lock when overlapping with struct_mutex. */ 567 struct mutex stolen_lock; 568 569 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 570 spinlock_t obj_lock; 571 572 /** 573 * List of objects which are purgeable. 574 */ 575 struct list_head purge_list; 576 577 /** 578 * List of objects which have allocated pages and are shrinkable. 579 */ 580 struct list_head shrink_list; 581 582 /** 583 * List of objects which are pending destruction. 584 */ 585 struct llist_head free_list; 586 struct work_struct free_work; 587 /** 588 * Count of objects pending destructions. Used to skip needlessly 589 * waiting on an RCU barrier if no objects are waiting to be freed. 590 */ 591 atomic_t free_count; 592 593 /** 594 * Small stash of WC pages 595 */ 596 struct pagestash wc_stash; 597 598 /** 599 * tmpfs instance used for shmem backed objects 600 */ 601 struct vfsmount *gemfs; 602 603 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 604 605 struct notifier_block oom_notifier; 606 struct notifier_block vmap_notifier; 607 struct shrinker shrinker; 608 609 /** 610 * Workqueue to fault in userptr pages, flushed by the execbuf 611 * when required but otherwise left to userspace to try again 612 * on EAGAIN. 613 */ 614 struct workqueue_struct *userptr_wq; 615 616 /* shrinker accounting, also useful for userland debugging */ 617 u64 shrink_memory; 618 u32 shrink_count; 619 }; 620 621 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ 622 623 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 624 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 625 626 #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ 627 #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ 628 629 #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ 630 631 /* Amount of SAGV/QGV points, BSpec precisely defines this */ 632 #define I915_NUM_QGV_POINTS 8 633 634 struct ddi_vbt_port_info { 635 /* Non-NULL if port present. */ 636 const struct child_device_config *child; 637 638 int max_tmds_clock; 639 640 /* This is an index in the HDMI/DVI DDI buffer translation table. */ 641 u8 hdmi_level_shift; 642 u8 hdmi_level_shift_set:1; 643 644 u8 supports_dvi:1; 645 u8 supports_hdmi:1; 646 u8 supports_dp:1; 647 u8 supports_edp:1; 648 u8 supports_typec_usb:1; 649 u8 supports_tbt:1; 650 651 u8 alternate_aux_channel; 652 u8 alternate_ddc_pin; 653 654 u8 dp_boost_level; 655 u8 hdmi_boost_level; 656 int dp_max_link_rate; /* 0 for not limited by VBT */ 657 }; 658 659 enum psr_lines_to_wait { 660 PSR_0_LINES_TO_WAIT = 0, 661 PSR_1_LINE_TO_WAIT, 662 PSR_4_LINES_TO_WAIT, 663 PSR_8_LINES_TO_WAIT 664 }; 665 666 struct intel_vbt_data { 667 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 668 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 669 670 /* Feature bits */ 671 unsigned int int_tv_support:1; 672 unsigned int lvds_dither:1; 673 unsigned int int_crt_support:1; 674 unsigned int lvds_use_ssc:1; 675 unsigned int int_lvds_support:1; 676 unsigned int display_clock_mode:1; 677 unsigned int fdi_rx_polarity_inverted:1; 678 unsigned int panel_type:4; 679 int lvds_ssc_freq; 680 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 681 enum drm_panel_orientation orientation; 682 683 enum drrs_support_type drrs_type; 684 685 struct { 686 int rate; 687 int lanes; 688 int preemphasis; 689 int vswing; 690 bool low_vswing; 691 bool initialized; 692 int bpp; 693 struct edp_power_seq pps; 694 } edp; 695 696 struct { 697 bool enable; 698 bool full_link; 699 bool require_aux_wakeup; 700 int idle_frames; 701 enum psr_lines_to_wait lines_to_wait; 702 int tp1_wakeup_time_us; 703 int tp2_tp3_wakeup_time_us; 704 int psr2_tp2_tp3_wakeup_time_us; 705 } psr; 706 707 struct { 708 u16 pwm_freq_hz; 709 bool present; 710 bool active_low_pwm; 711 u8 min_brightness; /* min_brightness/255 of max */ 712 u8 controller; /* brightness controller number */ 713 enum intel_backlight_type type; 714 } backlight; 715 716 /* MIPI DSI */ 717 struct { 718 u16 panel_id; 719 struct mipi_config *config; 720 struct mipi_pps_data *pps; 721 u16 bl_ports; 722 u16 cabc_ports; 723 u8 seq_version; 724 u32 size; 725 u8 *data; 726 const u8 *sequence[MIPI_SEQ_MAX]; 727 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 728 enum drm_panel_orientation orientation; 729 } dsi; 730 731 int crt_ddc_pin; 732 733 struct list_head display_devices; 734 735 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 736 struct sdvo_device_mapping sdvo_mappings[2]; 737 }; 738 739 enum intel_ddb_partitioning { 740 INTEL_DDB_PART_1_2, 741 INTEL_DDB_PART_5_6, /* IVB+ */ 742 }; 743 744 struct intel_wm_level { 745 bool enable; 746 u32 pri_val; 747 u32 spr_val; 748 u32 cur_val; 749 u32 fbc_val; 750 }; 751 752 struct ilk_wm_values { 753 u32 wm_pipe[3]; 754 u32 wm_lp[3]; 755 u32 wm_lp_spr[3]; 756 u32 wm_linetime[3]; 757 bool enable_fbc_wm; 758 enum intel_ddb_partitioning partitioning; 759 }; 760 761 struct g4x_pipe_wm { 762 u16 plane[I915_MAX_PLANES]; 763 u16 fbc; 764 }; 765 766 struct g4x_sr_wm { 767 u16 plane; 768 u16 cursor; 769 u16 fbc; 770 }; 771 772 struct vlv_wm_ddl_values { 773 u8 plane[I915_MAX_PLANES]; 774 }; 775 776 struct vlv_wm_values { 777 struct g4x_pipe_wm pipe[3]; 778 struct g4x_sr_wm sr; 779 struct vlv_wm_ddl_values ddl[3]; 780 u8 level; 781 bool cxsr; 782 }; 783 784 struct g4x_wm_values { 785 struct g4x_pipe_wm pipe[2]; 786 struct g4x_sr_wm sr; 787 struct g4x_sr_wm hpll; 788 bool cxsr; 789 bool hpll_en; 790 bool fbc_en; 791 }; 792 793 struct skl_ddb_entry { 794 u16 start, end; /* in number of blocks, 'end' is exclusive */ 795 }; 796 797 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) 798 { 799 return entry->end - entry->start; 800 } 801 802 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 803 const struct skl_ddb_entry *e2) 804 { 805 if (e1->start == e2->start && e1->end == e2->end) 806 return true; 807 808 return false; 809 } 810 811 struct skl_ddb_allocation { 812 u8 enabled_slices; /* GEN11 has configurable 2 slices */ 813 }; 814 815 struct skl_ddb_values { 816 unsigned dirty_pipes; 817 struct skl_ddb_allocation ddb; 818 }; 819 820 struct skl_wm_level { 821 u16 min_ddb_alloc; 822 u16 plane_res_b; 823 u8 plane_res_l; 824 bool plane_en; 825 bool ignore_lines; 826 }; 827 828 /* Stores plane specific WM parameters */ 829 struct skl_wm_params { 830 bool x_tiled, y_tiled; 831 bool rc_surface; 832 bool is_planar; 833 u32 width; 834 u8 cpp; 835 u32 plane_pixel_rate; 836 u32 y_min_scanlines; 837 u32 plane_bytes_per_line; 838 uint_fixed_16_16_t plane_blocks_per_line; 839 uint_fixed_16_16_t y_tile_minimum; 840 u32 linetime_us; 841 u32 dbuf_block_size; 842 }; 843 844 enum intel_pipe_crc_source { 845 INTEL_PIPE_CRC_SOURCE_NONE, 846 INTEL_PIPE_CRC_SOURCE_PLANE1, 847 INTEL_PIPE_CRC_SOURCE_PLANE2, 848 INTEL_PIPE_CRC_SOURCE_PLANE3, 849 INTEL_PIPE_CRC_SOURCE_PLANE4, 850 INTEL_PIPE_CRC_SOURCE_PLANE5, 851 INTEL_PIPE_CRC_SOURCE_PLANE6, 852 INTEL_PIPE_CRC_SOURCE_PLANE7, 853 INTEL_PIPE_CRC_SOURCE_PIPE, 854 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 855 INTEL_PIPE_CRC_SOURCE_TV, 856 INTEL_PIPE_CRC_SOURCE_DP_B, 857 INTEL_PIPE_CRC_SOURCE_DP_C, 858 INTEL_PIPE_CRC_SOURCE_DP_D, 859 INTEL_PIPE_CRC_SOURCE_AUTO, 860 INTEL_PIPE_CRC_SOURCE_MAX, 861 }; 862 863 #define INTEL_PIPE_CRC_ENTRIES_NR 128 864 struct intel_pipe_crc { 865 spinlock_t lock; 866 int skipped; 867 enum intel_pipe_crc_source source; 868 }; 869 870 struct i915_frontbuffer_tracking { 871 spinlock_t lock; 872 873 /* 874 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 875 * scheduled flips. 876 */ 877 unsigned busy_bits; 878 unsigned flip_bits; 879 }; 880 881 struct i915_virtual_gpu { 882 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 883 bool active; 884 u32 caps; 885 }; 886 887 /* used in computing the new watermarks state */ 888 struct intel_wm_config { 889 unsigned int num_pipes_active; 890 bool sprites_enabled; 891 bool sprites_scaled; 892 }; 893 894 struct intel_cdclk_state { 895 unsigned int cdclk, vco, ref, bypass; 896 u8 voltage_level; 897 }; 898 899 struct i915_selftest_stash { 900 atomic_t counter; 901 }; 902 903 #ifdef __NetBSD__ 904 # define __i915_iomem 905 # define __iomem __i915_iomem 906 #endif 907 908 struct drm_i915_private { 909 struct drm_device drm; 910 911 struct intel_device_info __info; /* Use INTEL_INFO() to access. */ 912 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 913 struct intel_driver_caps caps; 914 915 /** 916 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and 917 * end of stolen which we can optionally use to create GEM objects 918 * backed by stolen memory. Note that stolen_usable_size tells us 919 * exactly how much of this we are actually allowed to use, given that 920 * some portion of it is in fact reserved for use by hardware functions. 921 */ 922 struct resource dsm; 923 /** 924 * Reseved portion of Data Stolen Memory 925 */ 926 struct resource dsm_reserved; 927 928 /* 929 * Stolen memory is segmented in hardware with different portions 930 * offlimits to certain functions. 931 * 932 * The drm_mm is initialised to the total accessible range, as found 933 * from the PCI config. On Broadwell+, this is further restricted to 934 * avoid the first page! The upper end of stolen memory is reserved for 935 * hardware functions and similarly removed from the accessible range. 936 */ 937 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ 938 939 struct intel_uncore uncore; 940 struct intel_uncore_mmio_debug mmio_debug; 941 942 struct i915_virtual_gpu vgpu; 943 944 struct intel_gvt *gvt; 945 946 struct intel_wopcm wopcm; 947 948 struct intel_csr csr; 949 950 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 951 952 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 953 * controller on different i2c buses. */ 954 struct mutex gmbus_mutex; 955 956 /** 957 * Base address of where the gmbus and gpio blocks are located (either 958 * on PCH or on SoC for platforms without PCH). 959 */ 960 u32 gpio_mmio_base; 961 962 u32 hsw_psr_mmio_adjust; 963 964 /* MMIO base address for MIPI regs */ 965 u32 mipi_mmio_base; 966 967 u32 pps_mmio_base; 968 969 #ifdef __NetBSD__ 970 spinlock_t gmbus_wait_lock; 971 drm_waitqueue_t gmbus_wait_queue; 972 #else 973 wait_queue_head_t gmbus_wait_queue; 974 #endif 975 976 struct pci_dev *bridge_dev; 977 978 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 979 struct rb_root uabi_engines; 980 struct llist_head uabi_engines_llist; 981 982 struct resource mch_res; 983 984 /* protects the irq masks */ 985 spinlock_t irq_lock; 986 987 bool display_irqs_enabled; 988 989 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 990 struct pm_qos_request pm_qos; 991 992 /* Sideband mailbox protection */ 993 struct mutex sb_lock; 994 struct pm_qos_request sb_qos; 995 996 /** Cached value of IMR to avoid reads in updating the bitfield */ 997 union { 998 u32 irq_mask; 999 u32 de_irq_mask[I915_MAX_PIPES]; 1000 }; 1001 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1002 1003 struct i915_hotplug hotplug; 1004 struct intel_fbc fbc; 1005 struct i915_drrs drrs; 1006 struct intel_opregion opregion; 1007 struct intel_vbt_data vbt; 1008 1009 bool preserve_bios_swizzle; 1010 1011 /* overlay */ 1012 struct intel_overlay *overlay; 1013 1014 /* backlight registers and fields in struct intel_panel */ 1015 struct mutex backlight_lock; 1016 1017 /* protects panel power sequencer state */ 1018 struct mutex pps_mutex; 1019 1020 unsigned int fsb_freq, mem_freq, is_ddr3; 1021 unsigned int skl_preferred_vco_freq; 1022 unsigned int max_cdclk_freq; 1023 1024 unsigned int max_dotclk_freq; 1025 unsigned int rawclk_freq; 1026 unsigned int hpll_freq; 1027 unsigned int fdi_pll_freq; 1028 unsigned int czclk_freq; 1029 1030 /* 1031 * For reading holding any crtc lock is sufficient, 1032 * for writing must hold all of them. 1033 */ 1034 struct { 1035 /* 1036 * The current logical cdclk state. 1037 * See intel_atomic_state.cdclk.logical 1038 */ 1039 struct intel_cdclk_state logical; 1040 /* 1041 * The current actual cdclk state. 1042 * See intel_atomic_state.cdclk.actual 1043 */ 1044 struct intel_cdclk_state actual; 1045 /* The current hardware cdclk state */ 1046 struct intel_cdclk_state hw; 1047 1048 /* cdclk, divider, and ratio table from bspec */ 1049 const struct intel_cdclk_vals *table; 1050 1051 int force_min_cdclk; 1052 } cdclk; 1053 1054 /** 1055 * wq - Driver workqueue for GEM. 1056 * 1057 * NOTE: Work items scheduled here are not allowed to grab any modeset 1058 * locks, for otherwise the flushing done in the pageflip code will 1059 * result in deadlocks. 1060 */ 1061 struct workqueue_struct *wq; 1062 1063 /* ordered wq for modesets */ 1064 struct workqueue_struct *modeset_wq; 1065 /* unbound hipri wq for page flips/plane updates */ 1066 struct workqueue_struct *flip_wq; 1067 1068 /* Display functions */ 1069 struct drm_i915_display_funcs display; 1070 1071 /* PCH chipset type */ 1072 enum intel_pch pch_type; 1073 unsigned short pch_id; 1074 1075 unsigned long quirks; 1076 1077 struct drm_atomic_state *modeset_restore_state; 1078 struct drm_modeset_acquire_ctx reset_ctx; 1079 1080 spinlock_t atomic_commit_lock; 1081 drm_waitqueue_t atomic_commit_wq; 1082 1083 struct i915_ggtt ggtt; /* VM representing the global address space */ 1084 1085 struct i915_gem_mm mm; 1086 DECLARE_HASHTABLE(mm_structs, 7); 1087 struct mutex mm_lock; 1088 1089 /* Kernel Modesetting */ 1090 1091 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1092 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1093 1094 #ifdef CONFIG_DEBUG_FS 1095 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1096 #endif 1097 1098 /* dpll and cdclk state is protected by connection_mutex */ 1099 int num_shared_dpll; 1100 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1101 const struct intel_dpll_mgr *dpll_mgr; 1102 1103 /* 1104 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1105 * Must be global rather than per dpll, because on some platforms 1106 * plls share registers. 1107 */ 1108 struct mutex dpll_lock; 1109 1110 /* 1111 * For reading active_pipes, min_cdclk, min_voltage_level holding 1112 * any crtc lock is sufficient, for writing must hold all of them. 1113 */ 1114 u8 active_pipes; 1115 /* minimum acceptable cdclk for each pipe */ 1116 int min_cdclk[I915_MAX_PIPES]; 1117 /* minimum acceptable voltage level for each pipe */ 1118 u8 min_voltage_level[I915_MAX_PIPES]; 1119 1120 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1121 1122 struct i915_wa_list gt_wa_list; 1123 1124 struct i915_frontbuffer_tracking fb_tracking; 1125 1126 struct intel_atomic_helper { 1127 struct llist_head free_list; 1128 struct work_struct free_work; 1129 } atomic_helper; 1130 1131 u16 orig_clock; 1132 1133 bool mchbar_need_disable; 1134 1135 struct intel_l3_parity l3_parity; 1136 1137 /* 1138 * edram size in MB. 1139 * Cannot be determined by PCIID. You must always read a register. 1140 */ 1141 u32 edram_size_mb; 1142 1143 struct i915_power_domains power_domains; 1144 1145 struct i915_psr psr; 1146 1147 struct i915_gpu_error gpu_error; 1148 1149 struct drm_i915_gem_object *vlv_pctx; 1150 1151 /* list of fbdev register on this device */ 1152 struct intel_fbdev *fbdev; 1153 struct work_struct fbdev_suspend_work; 1154 1155 struct drm_property *broadcast_rgb_property; 1156 struct drm_property *force_audio_property; 1157 1158 /* hda/i915 audio component */ 1159 struct i915_audio_component *audio_component; 1160 bool audio_component_registered; 1161 /** 1162 * av_mutex - mutex for audio/video sync 1163 * 1164 */ 1165 struct mutex av_mutex; 1166 int audio_power_refcount; 1167 u32 audio_freq_cntrl; 1168 1169 u32 fdi_rx_config; 1170 1171 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1172 u32 chv_phy_control; 1173 /* 1174 * Shadows for CHV DPLL_MD regs to keep the state 1175 * checker somewhat working in the presence hardware 1176 * crappiness (can't read out DPLL_MD for pipes B & C). 1177 */ 1178 u32 chv_dpll_md[I915_MAX_PIPES]; 1179 u32 bxt_phy_grc; 1180 1181 u32 suspend_count; 1182 bool power_domains_suspended; 1183 struct i915_suspend_saved_registers regfile; 1184 struct vlv_s0ix_state *vlv_s0ix_state; 1185 1186 enum { 1187 I915_SAGV_UNKNOWN = 0, 1188 I915_SAGV_DISABLED, 1189 I915_SAGV_ENABLED, 1190 I915_SAGV_NOT_CONTROLLED 1191 } sagv_status; 1192 1193 u32 sagv_block_time_us; 1194 1195 struct { 1196 /* 1197 * Raw watermark latency values: 1198 * in 0.1us units for WM0, 1199 * in 0.5us units for WM1+. 1200 */ 1201 /* primary */ 1202 u16 pri_latency[5]; 1203 /* sprite */ 1204 u16 spr_latency[5]; 1205 /* cursor */ 1206 u16 cur_latency[5]; 1207 /* 1208 * Raw watermark memory latency values 1209 * for SKL for all 8 levels 1210 * in 1us units. 1211 */ 1212 u16 skl_latency[8]; 1213 1214 /* current hardware state */ 1215 union { 1216 struct ilk_wm_values hw; 1217 struct skl_ddb_values skl_hw; 1218 struct vlv_wm_values vlv; 1219 struct g4x_wm_values g4x; 1220 }; 1221 1222 u8 max_level; 1223 1224 /* 1225 * Should be held around atomic WM register writing; also 1226 * protects * intel_crtc->wm.active and 1227 * crtc_state->wm.need_postvbl_update. 1228 */ 1229 struct mutex wm_mutex; 1230 1231 /* 1232 * Set during HW readout of watermarks/DDB. Some platforms 1233 * need to know when we're still using BIOS-provided values 1234 * (which we don't fully trust). 1235 */ 1236 bool distrust_bios_wm; 1237 } wm; 1238 1239 struct dram_info { 1240 bool valid; 1241 bool is_16gb_dimm; 1242 u8 num_channels; 1243 u8 ranks; 1244 u32 bandwidth_kbps; 1245 bool symmetric_memory; 1246 enum intel_dram_type { 1247 INTEL_DRAM_UNKNOWN, 1248 INTEL_DRAM_DDR3, 1249 INTEL_DRAM_DDR4, 1250 INTEL_DRAM_LPDDR3, 1251 INTEL_DRAM_LPDDR4 1252 } type; 1253 } dram_info; 1254 1255 struct intel_bw_info { 1256 /* for each QGV point */ 1257 unsigned int deratedbw[I915_NUM_QGV_POINTS]; 1258 u8 num_qgv_points; 1259 u8 num_planes; 1260 } max_bw[6]; 1261 1262 struct drm_private_obj bw_obj; 1263 1264 struct intel_runtime_pm runtime_pm; 1265 1266 struct i915_perf perf; 1267 1268 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1269 struct intel_gt gt; 1270 1271 struct { 1272 struct i915_gem_contexts { 1273 spinlock_t lock; /* locks list */ 1274 struct list_head list; 1275 1276 struct llist_head free_list; 1277 struct work_struct free_work; 1278 } contexts; 1279 1280 /* 1281 * We replace the local file with a global mappings as the 1282 * backing storage for the mmap is on the device and not 1283 * on the struct file, and we do not want to prolong the 1284 * lifetime of the local fd. To minimise the number of 1285 * anonymous inodes we create, we use a global singleton to 1286 * share the global mapping. 1287 */ 1288 struct file *mmap_singleton; 1289 } gem; 1290 1291 u8 pch_ssc_use; 1292 1293 /* For i915gm/i945gm vblank irq workaround */ 1294 u8 vblank_enabled; 1295 1296 /* perform PHY state sanity checks? */ 1297 bool chv_phy_assert[2]; 1298 1299 bool ipc_enabled; 1300 1301 /* Used to save the pipe-to-encoder mapping for audio */ 1302 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 1303 1304 /* necessary resource sharing with HDMI LPE audio driver. */ 1305 struct { 1306 struct platform_device *platdev; 1307 int irq; 1308 } lpe_audio; 1309 1310 struct i915_pmu pmu; 1311 1312 struct i915_hdcp_comp_master *hdcp_master; 1313 bool hdcp_comp_added; 1314 1315 /* Mutex to protect the above hdcp component related values. */ 1316 struct mutex hdcp_comp_mutex; 1317 1318 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 1319 1320 #ifdef __NetBSD__ 1321 pci_intr_handle_t *pci_ihp; 1322 void *pci_intrcookie; 1323 #endif 1324 1325 /* 1326 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1327 * will be rejected. Instead look for a better place. 1328 */ 1329 }; 1330 1331 #ifdef __NetBSD__ 1332 # undef __iomem 1333 # undef __i915_iomem 1334 #endif 1335 1336 struct dram_dimm_info { 1337 u8 size, width, ranks; 1338 }; 1339 1340 struct dram_channel_info { 1341 struct dram_dimm_info dimm_l, dimm_s; 1342 u8 ranks; 1343 bool is_16gb_dimm; 1344 }; 1345 1346 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1347 { 1348 return __UNCONST(const_container_of(dev, struct drm_i915_private, drm)); 1349 } 1350 1351 #ifndef __NetBSD__ 1352 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 1353 { 1354 return dev_get_drvdata(kdev); 1355 } 1356 #endif 1357 1358 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 1359 { 1360 return pci_get_drvdata(pdev); 1361 } 1362 1363 /* Simple iterator over all initialised engines */ 1364 #define for_each_engine(engine__, dev_priv__, id__) \ 1365 for ((id__) = 0; \ 1366 (id__) < I915_NUM_ENGINES; \ 1367 (id__)++) \ 1368 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 1369 1370 /* Iterator over subset of engines selected by mask */ 1371 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 1372 for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \ 1373 (tmp__) ? \ 1374 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 1375 0;) 1376 1377 #define rb_to_uabi_engine(rb) \ 1378 rb_entry_safe(rb, struct intel_engine_cs, uabi_node.rbtree) 1379 1380 #define for_each_uabi_engine(engine__, i915__) \ 1381 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 1382 (engine__); \ 1383 (engine__) = rb_to_uabi_engine(rb_next2(&(i915__)->uabi_engines, &(engine__)->uabi_node.rbtree))) 1384 1385 #define I915_GTT_OFFSET_NONE ((u32)-1) 1386 1387 /* 1388 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1389 * considered to be the frontbuffer for the given plane interface-wise. This 1390 * doesn't mean that the hw necessarily already scans it out, but that any 1391 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1392 * 1393 * We have one bit per pipe and per scanout plane type. 1394 */ 1395 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 1396 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 1397 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 1398 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 1399 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 1400 }) 1401 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 1402 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1403 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 1404 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 1405 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 1406 1407 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) 1408 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) 1409 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) 1410 1411 #define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen) 1412 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) 1413 1414 #define REVID_FOREVER 0xff 1415 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 1416 1417 #define INTEL_GEN_MASK(s, e) ( \ 1418 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ 1419 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ 1420 GENMASK((e) - 1, (s) - 1)) 1421 1422 /* Returns true if Gen is in inclusive range [Start, End] */ 1423 #define IS_GEN_RANGE(dev_priv, s, e) \ 1424 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e)))) 1425 1426 #define IS_GEN(dev_priv, n) \ 1427 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ 1428 INTEL_INFO(dev_priv)->gen == (n)) 1429 1430 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) 1431 1432 /* 1433 * Return true if revision is in range [since,until] inclusive. 1434 * 1435 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 1436 */ 1437 #define IS_REVID(p, since, until) \ 1438 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 1439 1440 static __always_inline unsigned int 1441 __platform_mask_index(const struct intel_runtime_info *info, 1442 enum intel_platform p) 1443 { 1444 const unsigned int pbits = 1445 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1446 1447 /* Expand the platform_mask array if this fails. */ 1448 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 1449 pbits * ARRAY_SIZE(info->platform_mask)); 1450 1451 return p / pbits; 1452 } 1453 1454 static __always_inline unsigned int 1455 __platform_mask_bit(const struct intel_runtime_info *info, 1456 enum intel_platform p) 1457 { 1458 const unsigned int pbits = 1459 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 1460 1461 return p % pbits + INTEL_SUBPLATFORM_BITS; 1462 } 1463 1464 static inline u32 1465 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 1466 { 1467 const unsigned int pi = __platform_mask_index(info, p); 1468 1469 return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; 1470 } 1471 1472 static __always_inline bool 1473 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 1474 { 1475 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1476 const unsigned int pi = __platform_mask_index(info, p); 1477 const unsigned int pb = __platform_mask_bit(info, p); 1478 1479 #if 0 1480 BUILD_BUG_ON(!__builtin_constant_p(p)); 1481 #endif 1482 1483 return info->platform_mask[pi] & BIT(pb); 1484 } 1485 1486 static __always_inline inline bool 1487 IS_SUBPLATFORM(const struct drm_i915_private *i915, 1488 enum intel_platform p, unsigned int s) 1489 { 1490 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 1491 const unsigned int pi = __platform_mask_index(info, p); 1492 const unsigned int pb = __platform_mask_bit(info, p); 1493 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 1494 const u32 mask = info->platform_mask[pi]; 1495 1496 #if 0 1497 BUILD_BUG_ON(!__builtin_constant_p(p)); 1498 BUILD_BUG_ON(!__builtin_constant_p(s)); 1499 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 1500 #endif 1501 1502 /* Shift and test on the MSB position so sign flag can be used. */ 1503 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 1504 } 1505 1506 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) 1507 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) 1508 1509 #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) 1510 #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) 1511 #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) 1512 #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) 1513 #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) 1514 #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) 1515 #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) 1516 #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) 1517 #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) 1518 #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) 1519 #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) 1520 #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) 1521 #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) 1522 #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) 1523 #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) 1524 #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE) 1525 #define IS_IRONLAKE_M(dev_priv) \ 1526 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv)) 1527 #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) 1528 #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ 1529 INTEL_INFO(dev_priv)->gt == 1) 1530 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) 1531 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) 1532 #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) 1533 #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) 1534 #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) 1535 #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) 1536 #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) 1537 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 1538 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 1539 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) 1540 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 1541 #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) 1542 #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) 1543 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 1544 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 1545 #define IS_BDW_ULT(dev_priv) \ 1546 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 1547 #define IS_BDW_ULX(dev_priv) \ 1548 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 1549 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 1550 INTEL_INFO(dev_priv)->gt == 3) 1551 #define IS_HSW_ULT(dev_priv) \ 1552 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 1553 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 1554 INTEL_INFO(dev_priv)->gt == 3) 1555 #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \ 1556 INTEL_INFO(dev_priv)->gt == 1) 1557 /* ULX machines are also considered ULT. */ 1558 #define IS_HSW_ULX(dev_priv) \ 1559 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 1560 #define IS_SKL_ULT(dev_priv) \ 1561 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 1562 #define IS_SKL_ULX(dev_priv) \ 1563 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 1564 #define IS_KBL_ULT(dev_priv) \ 1565 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 1566 #define IS_KBL_ULX(dev_priv) \ 1567 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 1568 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1569 INTEL_INFO(dev_priv)->gt == 2) 1570 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1571 INTEL_INFO(dev_priv)->gt == 3) 1572 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 1573 INTEL_INFO(dev_priv)->gt == 4) 1574 #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1575 INTEL_INFO(dev_priv)->gt == 2) 1576 #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ 1577 INTEL_INFO(dev_priv)->gt == 3) 1578 #define IS_CFL_ULT(dev_priv) \ 1579 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 1580 #define IS_CFL_ULX(dev_priv) \ 1581 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 1582 #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1583 INTEL_INFO(dev_priv)->gt == 2) 1584 #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ 1585 INTEL_INFO(dev_priv)->gt == 3) 1586 #define IS_CNL_WITH_PORT_F(dev_priv) \ 1587 IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF) 1588 #define IS_ICL_WITH_PORT_F(dev_priv) \ 1589 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 1590 1591 #define SKL_REVID_A0 0x0 1592 #define SKL_REVID_B0 0x1 1593 #define SKL_REVID_C0 0x2 1594 #define SKL_REVID_D0 0x3 1595 #define SKL_REVID_E0 0x4 1596 #define SKL_REVID_F0 0x5 1597 #define SKL_REVID_G0 0x6 1598 #define SKL_REVID_H0 0x7 1599 1600 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 1601 1602 #define BXT_REVID_A0 0x0 1603 #define BXT_REVID_A1 0x1 1604 #define BXT_REVID_B0 0x3 1605 #define BXT_REVID_B_LAST 0x8 1606 #define BXT_REVID_C0 0x9 1607 1608 #define IS_BXT_REVID(dev_priv, since, until) \ 1609 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 1610 1611 #define KBL_REVID_A0 0x0 1612 #define KBL_REVID_B0 0x1 1613 #define KBL_REVID_C0 0x2 1614 #define KBL_REVID_D0 0x3 1615 #define KBL_REVID_E0 0x4 1616 1617 #define IS_KBL_REVID(dev_priv, since, until) \ 1618 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 1619 1620 #define GLK_REVID_A0 0x0 1621 #define GLK_REVID_A1 0x1 1622 1623 #define IS_GLK_REVID(dev_priv, since, until) \ 1624 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 1625 1626 #define CNL_REVID_A0 0x0 1627 #define CNL_REVID_B0 0x1 1628 #define CNL_REVID_C0 0x2 1629 1630 #define IS_CNL_REVID(p, since, until) \ 1631 (IS_CANNONLAKE(p) && IS_REVID(p, since, until)) 1632 1633 #define ICL_REVID_A0 0x0 1634 #define ICL_REVID_A2 0x1 1635 #define ICL_REVID_B0 0x3 1636 #define ICL_REVID_B2 0x4 1637 #define ICL_REVID_C0 0x5 1638 1639 #define IS_ICL_REVID(p, since, until) \ 1640 (IS_ICELAKE(p) && IS_REVID(p, since, until)) 1641 1642 #define TGL_REVID_A0 0x0 1643 1644 #define IS_TGL_REVID(p, since, until) \ 1645 (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) 1646 1647 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) 1648 #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) 1649 #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) 1650 1651 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) 1652 1653 #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ 1654 unsigned int first__ = (first); \ 1655 unsigned int count__ = (count); \ 1656 (INTEL_INFO(dev_priv)->engine_mask & \ 1657 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1658 }) 1659 #define VDBOX_MASK(dev_priv) \ 1660 ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) 1661 #define VEBOX_MASK(dev_priv) \ 1662 ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) 1663 1664 /* 1665 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 1666 * All later gens can run the final buffer from the ppgtt 1667 */ 1668 #define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7) 1669 1670 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) 1671 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) 1672 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) 1673 #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) 1674 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 1675 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 1676 1677 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) 1678 1679 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 1680 (INTEL_INFO(dev_priv)->has_logical_ring_contexts) 1681 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ 1682 (INTEL_INFO(dev_priv)->has_logical_ring_elsq) 1683 #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ 1684 (INTEL_INFO(dev_priv)->has_logical_ring_preemption) 1685 1686 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) 1687 1688 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) 1689 #define HAS_PPGTT(dev_priv) \ 1690 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) 1691 #define HAS_FULL_PPGTT(dev_priv) \ 1692 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) 1693 1694 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ 1695 GEM_BUG_ON((sizes) == 0); \ 1696 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ 1697 }) 1698 1699 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) 1700 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 1701 (INTEL_INFO(dev_priv)->display.overlay_needs_physical) 1702 1703 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1704 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) 1705 1706 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \ 1707 (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9)) 1708 1709 /* WaRsDisableCoarsePowerGating:skl,cnl */ 1710 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 1711 (IS_CANNONLAKE(dev_priv) || \ 1712 IS_SKL_GT3(dev_priv) || \ 1713 IS_SKL_GT4(dev_priv)) 1714 1715 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) 1716 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ 1717 IS_GEMINILAKE(dev_priv) || \ 1718 IS_KABYLAKE(dev_priv)) 1719 1720 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1721 * rows, which changed the alignment requirements and fence programming. 1722 */ 1723 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \ 1724 !(IS_I915G(dev_priv) || \ 1725 IS_I915GM(dev_priv))) 1726 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) 1727 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) 1728 1729 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 1730 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) 1731 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7) 1732 1733 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 1734 1735 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) 1736 1737 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) 1738 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) 1739 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) 1740 #define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0) 1741 1742 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) 1743 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) 1744 #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ 1745 1746 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) 1747 1748 #define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr) 1749 1750 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) 1751 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) 1752 1753 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 1754 1755 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) 1756 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1757 1758 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) 1759 1760 /* Having GuC is not the same as using GuC */ 1761 #define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc) 1762 #define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc) 1763 1764 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) 1765 1766 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) 1767 1768 1769 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) 1770 1771 #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) 1772 1773 /* DPF == dynamic parity feature */ 1774 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) 1775 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 1776 2 : HAS_L3_DPF(dev_priv)) 1777 1778 #define GT_FREQUENCY_MULTIPLIER 50 1779 #define GEN9_FREQ_SCALER 3 1780 1781 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) 1782 1783 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) 1784 1785 /* Only valid when HAS_DISPLAY() is true */ 1786 #define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display) 1787 1788 static inline bool intel_vtd_active(void) 1789 { 1790 #ifdef CONFIG_INTEL_IOMMU 1791 if (intel_iommu_gfx_mapped) 1792 return true; 1793 #endif 1794 return false; 1795 } 1796 1797 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 1798 { 1799 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active(); 1800 } 1801 1802 static inline bool 1803 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) 1804 { 1805 return IS_BROXTON(dev_priv) && intel_vtd_active(); 1806 } 1807 1808 /* i915_drv.c */ 1809 #ifdef CONFIG_COMPAT 1810 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 1811 #else 1812 #define i915_compat_ioctl NULL 1813 #endif 1814 extern const struct dev_pm_ops i915_pm_ops; 1815 1816 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 1817 void i915_driver_remove(struct drm_i915_private *i915); 1818 1819 int i915_drm_resume(struct drm_device *); 1820 int i915_drm_resume_early(struct drm_device *); 1821 int i915_drm_prepare(struct drm_device *); 1822 int i915_drm_suspend(struct drm_device *); 1823 int i915_drm_suspend_late(struct drm_device *, bool); 1824 1825 int i915_resume_switcheroo(struct drm_i915_private *i915); 1826 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); 1827 1828 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 1829 1830 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 1831 { 1832 return dev_priv->gvt; 1833 } 1834 1835 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 1836 { 1837 return dev_priv->vgpu.active; 1838 } 1839 1840 int i915_getparam_ioctl(struct drm_device *dev, void *data, 1841 struct drm_file *file_priv); 1842 1843 /* i915_gem.c */ 1844 int i915_gem_init_userptr(struct drm_i915_private *dev_priv); 1845 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); 1846 void i915_gem_init_early(struct drm_i915_private *dev_priv); 1847 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); 1848 int i915_gem_freeze(struct drm_i915_private *dev_priv); 1849 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 1850 1851 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915); 1852 1853 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1854 { 1855 /* 1856 * A single pass should suffice to release all the freed objects (along 1857 * most call paths) , but be a little more paranoid in that freeing 1858 * the objects does take a little amount of time, during which the rcu 1859 * callbacks could have added new objects into the freed list, and 1860 * armed the work again. 1861 */ 1862 while (atomic_read(&i915->mm.free_count)) { 1863 flush_work(&i915->mm.free_work); 1864 rcu_barrier(); 1865 } 1866 } 1867 1868 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1869 { 1870 /* 1871 * Similar to objects above (see i915_gem_drain_freed-objects), in 1872 * general we have workers that are armed by RCU and then rearm 1873 * themselves in their callbacks. To be paranoid, we need to 1874 * drain the workqueue a second time after waiting for the RCU 1875 * grace period so that we catch work queued via RCU from the first 1876 * pass. As neither drain_workqueue() nor flush_workqueue() report 1877 * a result, we make an assumption that we only don't require more 1878 * than 3 passes to catch all _recursive_ RCU delayed work. 1879 * 1880 */ 1881 int pass = 3; 1882 do { 1883 flush_workqueue(i915->wq); 1884 rcu_barrier(); 1885 i915_gem_drain_freed_objects(i915); 1886 } while (--pass); 1887 drain_workqueue(i915->wq); 1888 } 1889 1890 struct i915_vma * __must_check 1891 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1892 const struct i915_ggtt_view *view, 1893 u64 size, 1894 u64 alignment, 1895 u64 flags); 1896 1897 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 1898 unsigned long flags); 1899 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) 1900 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) 1901 1902 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 1903 1904 static inline int __must_check 1905 i915_mutex_lock_interruptible(struct drm_device *dev) 1906 { 1907 return mutex_lock_interruptible(&dev->struct_mutex); 1908 } 1909 1910 int i915_gem_dumb_create(struct drm_file *file_priv, 1911 struct drm_device *dev, 1912 struct drm_mode_create_dumb *args); 1913 1914 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 1915 1916 static inline u32 i915_reset_count(struct i915_gpu_error *error) 1917 { 1918 return atomic_read(&error->reset_count); 1919 } 1920 1921 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 1922 const struct intel_engine_cs *engine) 1923 { 1924 return atomic_read(&error->reset_engine_count[engine->uabi_class]); 1925 } 1926 1927 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); 1928 void i915_gem_driver_register(struct drm_i915_private *i915); 1929 void i915_gem_driver_unregister(struct drm_i915_private *i915); 1930 void i915_gem_driver_remove(struct drm_i915_private *dev_priv); 1931 void i915_gem_driver_release(struct drm_i915_private *dev_priv); 1932 void i915_gem_suspend(struct drm_i915_private *dev_priv); 1933 void i915_gem_suspend_late(struct drm_i915_private *dev_priv); 1934 void i915_gem_resume(struct drm_i915_private *dev_priv); 1935 1936 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); 1937 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1938 1939 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1940 enum i915_cache_level cache_level); 1941 1942 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 1943 struct dma_buf *dma_buf); 1944 1945 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags); 1946 1947 static inline struct i915_gem_context * 1948 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) 1949 { 1950 return xa_load(&file_priv->context_xa, id); 1951 } 1952 1953 static inline struct i915_gem_context * 1954 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 1955 { 1956 struct i915_gem_context *ctx; 1957 1958 rcu_read_lock(); 1959 ctx = __i915_gem_context_lookup_rcu(file_priv, id); 1960 if (ctx && !kref_get_unless_zero(&ctx->ref)) 1961 ctx = NULL; 1962 rcu_read_unlock(); 1963 1964 return ctx; 1965 } 1966 1967 /* i915_gem_evict.c */ 1968 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 1969 u64 min_size, u64 alignment, 1970 unsigned long color, 1971 u64 start, u64 end, 1972 unsigned flags); 1973 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, 1974 struct drm_mm_node *node, 1975 unsigned int flags); 1976 int i915_gem_evict_vm(struct i915_address_space *vm); 1977 1978 /* i915_gem_internal.c */ 1979 struct drm_i915_gem_object * 1980 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 1981 phys_addr_t size); 1982 1983 /* i915_gem_tiling.c */ 1984 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 1985 { 1986 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1987 1988 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1989 i915_gem_object_is_tiled(obj); 1990 } 1991 1992 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, 1993 unsigned int tiling, unsigned int stride); 1994 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, 1995 unsigned int tiling, unsigned int stride); 1996 1997 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 1998 1999 /* i915_cmd_parser.c */ 2000 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 2001 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 2002 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 2003 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 2004 struct i915_vma *batch, 2005 u32 batch_offset, 2006 u32 batch_length, 2007 struct i915_vma *shadow, 2008 bool trampoline); 2009 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 2010 2011 /* intel_device_info.c */ 2012 static inline struct intel_device_info * 2013 mkwrite_device_info(struct drm_i915_private *dev_priv) 2014 { 2015 return (struct intel_device_info *)INTEL_INFO(dev_priv); 2016 } 2017 2018 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 2019 struct drm_file *file); 2020 2021 #define __I915_REG_OP(op__, dev_priv__, ...) \ 2022 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) 2023 2024 #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) 2025 #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) 2026 2027 #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) 2028 2029 /* These are untraced mmio-accessors that are only valid to be used inside 2030 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 2031 * controlled. 2032 * 2033 * Think twice, and think again, before using these. 2034 * 2035 * As an example, these accessors can possibly be used between: 2036 * 2037 * spin_lock_irq(&dev_priv->uncore.lock); 2038 * intel_uncore_forcewake_get__locked(); 2039 * 2040 * and 2041 * 2042 * intel_uncore_forcewake_put__locked(); 2043 * spin_unlock_irq(&dev_priv->uncore.lock); 2044 * 2045 * 2046 * Note: some registers may not need forcewake held, so 2047 * intel_uncore_forcewake_{get,put} can be omitted, see 2048 * intel_uncore_forcewake_for_reg(). 2049 * 2050 * Certain architectures will die if the same cacheline is concurrently accessed 2051 * by different clients (e.g. on Ivybridge). Access to registers should 2052 * therefore generally be serialised, by either the dev_priv->uncore.lock or 2053 * a more localised lock guarding all access to that bank of registers. 2054 */ 2055 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) 2056 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) 2057 2058 /* register wait wrappers for display regs */ 2059 #define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \ 2060 intel_wait_for_register(&(dev_priv_)->uncore, \ 2061 (reg_), (mask_), (value_), (timeout_)) 2062 2063 #define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \ 2064 u32 mask__ = (mask_); \ 2065 intel_de_wait_for_register((dev_priv_), (reg_), \ 2066 mask__, mask__, (timeout_)); \ 2067 }) 2068 2069 #define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \ 2070 intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_)) 2071 2072 /* i915_mm.c */ 2073 #ifndef __NetBSD__ 2074 int remap_io_mapping(struct vm_area_struct *vma, 2075 unsigned long addr, unsigned long pfn, unsigned long size, 2076 struct io_mapping *iomap); 2077 int remap_io_sg(struct vm_area_struct *vma, 2078 unsigned long addr, unsigned long size, 2079 struct scatterlist *sgl, resource_size_t iobase); 2080 #endif 2081 2082 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) 2083 { 2084 if (INTEL_GEN(i915) >= 10) 2085 return CNL_HWS_CSB_WRITE_INDEX; 2086 else 2087 return I915_HWS_CSB_WRITE_INDEX; 2088 } 2089 2090 static inline enum i915_map_type 2091 i915_coherent_map_type(struct drm_i915_private *i915) 2092 { 2093 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; 2094 } 2095 2096 static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc) 2097 { 2098 return intel_guc_is_submission_supported(guc) && 2099 intel_guc_is_running(guc); 2100 } 2101 2102 #endif 2103