1 /* $NetBSD: intel_uncore.h,v 1.11 2021/12/19 12:40:43 riastradh Exp $ */ 2 3 /* 4 * Copyright 2017 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 * 25 */ 26 27 #ifndef __INTEL_UNCORE_H__ 28 #define __INTEL_UNCORE_H__ 29 30 #include <linux/spinlock.h> 31 #include <linux/notifier.h> 32 #include <linux/hrtimer.h> 33 #include <linux/io-64-nonatomic-lo-hi.h> 34 35 #include "i915_reg.h" 36 37 struct drm_i915_private; 38 struct intel_runtime_pm; 39 struct intel_uncore; 40 41 struct intel_uncore_mmio_debug { 42 spinlock_t lock; /** lock is also taken in irq contexts. */ 43 int unclaimed_mmio_check; 44 int saved_mmio_check; 45 u32 suspend_count; 46 }; 47 48 enum forcewake_domain_id { 49 FW_DOMAIN_ID_RENDER = 0, 50 FW_DOMAIN_ID_BLITTER, 51 FW_DOMAIN_ID_MEDIA, 52 FW_DOMAIN_ID_MEDIA_VDBOX0, 53 FW_DOMAIN_ID_MEDIA_VDBOX1, 54 FW_DOMAIN_ID_MEDIA_VDBOX2, 55 FW_DOMAIN_ID_MEDIA_VDBOX3, 56 FW_DOMAIN_ID_MEDIA_VEBOX0, 57 FW_DOMAIN_ID_MEDIA_VEBOX1, 58 59 FW_DOMAIN_ID_COUNT 60 }; 61 62 enum forcewake_domains { 63 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 64 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 65 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 66 FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0), 67 FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1), 68 FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2), 69 FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3), 70 FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0), 71 FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1), 72 73 FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1 74 }; 75 76 struct intel_uncore_funcs { 77 void (*force_wake_get)(struct intel_uncore *uncore, 78 enum forcewake_domains domains); 79 void (*force_wake_put)(struct intel_uncore *uncore, 80 enum forcewake_domains domains); 81 82 enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore, 83 i915_reg_t r); 84 enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore, 85 i915_reg_t r); 86 87 u8 (*mmio_readb)(struct intel_uncore *uncore, 88 i915_reg_t r, bool trace); 89 u16 (*mmio_readw)(struct intel_uncore *uncore, 90 i915_reg_t r, bool trace); 91 u32 (*mmio_readl)(struct intel_uncore *uncore, 92 i915_reg_t r, bool trace); 93 u64 (*mmio_readq)(struct intel_uncore *uncore, 94 i915_reg_t r, bool trace); 95 96 void (*mmio_writeb)(struct intel_uncore *uncore, 97 i915_reg_t r, u8 val, bool trace); 98 void (*mmio_writew)(struct intel_uncore *uncore, 99 i915_reg_t r, u16 val, bool trace); 100 void (*mmio_writel)(struct intel_uncore *uncore, 101 i915_reg_t r, u32 val, bool trace); 102 }; 103 104 struct intel_forcewake_range { 105 u32 start; 106 u32 end; 107 108 enum forcewake_domains domains; 109 }; 110 111 112 struct intel_uncore { 113 #ifdef __NetBSD__ 114 # define __iomem /* XXX */ 115 #endif 116 void __iomem *regs; 117 #ifdef __NetBSD__ 118 # undef __iomem 119 #endif 120 121 #ifdef __NetBSD__ 122 bus_space_tag_t regs_bst; 123 bus_space_handle_t regs_bsh; 124 #endif 125 126 struct drm_i915_private *i915; 127 struct intel_runtime_pm *rpm; 128 129 spinlock_t lock; /** lock is also taken in irq contexts. */ 130 131 unsigned int flags; 132 #define UNCORE_HAS_FORCEWAKE BIT(0) 133 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) 134 #define UNCORE_HAS_DBG_UNCLAIMED BIT(2) 135 #define UNCORE_HAS_FIFO BIT(3) 136 137 const struct intel_forcewake_range *fw_domains_table; 138 unsigned int fw_domains_table_entries; 139 140 struct notifier_block pmic_bus_access_nb; 141 struct intel_uncore_funcs funcs; 142 143 unsigned int fifo_count; 144 145 enum forcewake_domains fw_domains; 146 enum forcewake_domains fw_domains_active; 147 enum forcewake_domains fw_domains_timer; 148 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ 149 150 struct intel_uncore_forcewake_domain { 151 struct intel_uncore *uncore; 152 enum forcewake_domain_id id; 153 enum forcewake_domains mask; 154 unsigned int wake_count; 155 bool active; 156 struct hrtimer timer; 157 #ifdef __NetBSD__ 158 bus_size_t reg_set; 159 bus_size_t reg_ack; 160 #else 161 u32 __iomem *reg_set; 162 u32 __iomem *reg_ack; 163 #endif 164 } *fw_domain[FW_DOMAIN_ID_COUNT]; 165 166 unsigned int user_forcewake_count; 167 168 struct intel_uncore_mmio_debug *debug; 169 }; 170 171 /* Iterate over initialised fw domains */ 172 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ 173 for (tmp__ = (mask__); tmp__ ;) \ 174 for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)]) 175 176 #define for_each_fw_domain(domain__, uncore__, tmp__) \ 177 for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) 178 179 static inline bool 180 intel_uncore_has_forcewake(const struct intel_uncore *uncore) 181 { 182 return uncore->flags & UNCORE_HAS_FORCEWAKE; 183 } 184 185 static inline bool 186 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore) 187 { 188 return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED; 189 } 190 191 static inline bool 192 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore) 193 { 194 return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED; 195 } 196 197 static inline bool 198 intel_uncore_has_fifo(const struct intel_uncore *uncore) 199 { 200 return uncore->flags & UNCORE_HAS_FIFO; 201 } 202 203 void 204 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); 205 void 206 intel_uncore_mmio_debug_fini_early(struct intel_uncore_mmio_debug *mmio_debug); 207 void intel_uncore_init_early(struct intel_uncore *uncore, 208 struct drm_i915_private *i915); 209 void intel_uncore_fini_early(struct intel_uncore *uncore, 210 struct drm_i915_private *i915); 211 int intel_uncore_init_mmio(struct intel_uncore *uncore); 212 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); 213 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); 214 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); 215 void intel_uncore_fini_mmio(struct intel_uncore *uncore); 216 void intel_uncore_suspend(struct intel_uncore *uncore); 217 void intel_uncore_resume_early(struct intel_uncore *uncore); 218 void intel_uncore_runtime_resume(struct intel_uncore *uncore); 219 220 void assert_forcewakes_inactive(struct intel_uncore *uncore); 221 void assert_forcewakes_active(struct intel_uncore *uncore, 222 enum forcewake_domains fw_domains); 223 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 224 225 enum forcewake_domains 226 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, 227 i915_reg_t reg, unsigned int op); 228 #define FW_REG_READ (1) 229 #define FW_REG_WRITE (2) 230 231 void intel_uncore_forcewake_get(struct intel_uncore *uncore, 232 enum forcewake_domains domains); 233 void intel_uncore_forcewake_put(struct intel_uncore *uncore, 234 enum forcewake_domains domains); 235 /* Like above but the caller must manage the uncore.lock itself. 236 * Must be used with I915_READ_FW and friends. 237 */ 238 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 239 enum forcewake_domains domains); 240 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, 241 enum forcewake_domains domains); 242 243 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore); 244 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore); 245 246 int __intel_wait_for_register(struct intel_uncore *uncore, 247 i915_reg_t reg, 248 u32 mask, 249 u32 value, 250 unsigned int fast_timeout_us, 251 unsigned int slow_timeout_ms, 252 u32 *out_value); 253 static inline int 254 intel_wait_for_register(struct intel_uncore *uncore, 255 i915_reg_t reg, 256 u32 mask, 257 u32 value, 258 unsigned int timeout_ms) 259 { 260 return __intel_wait_for_register(uncore, reg, mask, value, 2, 261 timeout_ms, NULL); 262 } 263 264 int __intel_wait_for_register_fw(struct intel_uncore *uncore, 265 i915_reg_t reg, 266 u32 mask, 267 u32 value, 268 unsigned int fast_timeout_us, 269 unsigned int slow_timeout_ms, 270 u32 *out_value); 271 static inline int 272 intel_wait_for_register_fw(struct intel_uncore *uncore, 273 i915_reg_t reg, 274 u32 mask, 275 u32 value, 276 unsigned int timeout_ms) 277 { 278 return __intel_wait_for_register_fw(uncore, reg, mask, value, 279 2, timeout_ms, NULL); 280 } 281 282 /* register access functions */ 283 #ifdef __NetBSD__ 284 285 static inline uint8_t __raw_uncore_read8(const struct intel_uncore *uncore, 286 i915_reg_t reg) { 287 return bus_space_read_1(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg)); 288 } 289 static inline uint16_t __raw_uncore_read16(const struct intel_uncore *uncore, 290 i915_reg_t reg) { 291 return bus_space_read_2(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg)); 292 } 293 static inline uint32_t __raw_uncore_read32(const struct intel_uncore *uncore, 294 i915_reg_t reg) { 295 return bus_space_read_4(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg)); 296 } 297 static inline uint64_t __raw_uncore_read64(const struct intel_uncore *uncore, 298 i915_reg_t reg) { 299 #ifdef _LP64 300 return bus_space_read_8(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg)); 301 #else 302 uint64_t lo, hi; 303 lo = bus_space_read_4(uncore->regs_bst, uncore->regs_bsh, 304 i915_mmio_reg_offset(reg)); 305 hi = bus_space_read_4(uncore->regs_bst, uncore->regs_bsh, 306 i915_mmio_reg_offset(reg) + 4); 307 return lo | (hi << 32); 308 #endif 309 } 310 static inline void __raw_uncore_write8(const struct intel_uncore *uncore, 311 i915_reg_t reg, uint8_t val) { 312 bus_space_write_1(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg), val); 313 } 314 static inline void __raw_uncore_write16(const struct intel_uncore *uncore, 315 i915_reg_t reg, uint16_t val) { 316 bus_space_write_2(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg), val); 317 } 318 static inline void __raw_uncore_write32(const struct intel_uncore *uncore, 319 i915_reg_t reg, uint32_t val) { 320 bus_space_write_4(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg), val); 321 } 322 static inline void __raw_uncore_write64(const struct intel_uncore *uncore, 323 i915_reg_t reg, uint64_t val) { 324 #ifdef _LP64 325 bus_space_write_8(uncore->regs_bst, uncore->regs_bsh, i915_mmio_reg_offset(reg), val); 326 #else 327 bus_space_write_4(uncore->regs_bst, uncore->regs_bsh, 328 i915_mmio_reg_offset(reg), val & 0xffffffffU); 329 bus_space_write_4(uncore->regs_bst, uncore->regs_bsh, 330 i915_mmio_reg_offset(reg) + 4, val >> 32); 331 #endif 332 } 333 334 #else /* !__NetBSD__ */ 335 336 #define __raw_read(x__, s__) \ 337 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ 338 i915_reg_t reg) \ 339 { \ 340 return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ 341 } 342 343 #define __raw_write(x__, s__) \ 344 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ 345 i915_reg_t reg, u##x__ val) \ 346 { \ 347 write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ 348 } 349 __raw_read(8, b) 350 __raw_read(16, w) 351 __raw_read(32, l) 352 __raw_read(64, q) 353 354 __raw_write(8, b) 355 __raw_write(16, w) 356 __raw_write(32, l) 357 __raw_write(64, q) 358 359 #undef __raw_read 360 #undef __raw_write 361 362 #endif /* __NetBSD__ */ 363 364 #define __uncore_read(name__, x__, s__, trace__) \ 365 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \ 366 i915_reg_t reg) \ 367 { \ 368 return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \ 369 } 370 371 #define __uncore_write(name__, x__, s__, trace__) \ 372 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \ 373 i915_reg_t reg, u##x__ val) \ 374 { \ 375 uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \ 376 } 377 378 __uncore_read(read8, 8, b, true) 379 __uncore_read(read16, 16, w, true) 380 __uncore_read(read, 32, l, true) 381 __uncore_read(read16_notrace, 16, w, false) 382 __uncore_read(read_notrace, 32, l, false) 383 384 __uncore_write(write8, 8, b, true) 385 __uncore_write(write16, 16, w, true) 386 __uncore_write(write, 32, l, true) 387 __uncore_write(write_notrace, 32, l, false) 388 389 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 390 * will be implemented using 2 32-bit writes in an arbitrary order with 391 * an arbitrary delay between them. This can cause the hardware to 392 * act upon the intermediate value, possibly leading to corruption and 393 * machine death. For this reason we do not support I915_WRITE64, or 394 * uncore->funcs.mmio_writeq. 395 * 396 * When reading a 64-bit value as two 32-bit values, the delay may cause 397 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 398 * occasionally a 64-bit register does not actually support a full readq 399 * and must be read using two 32-bit reads. 400 * 401 * You have been warned. 402 */ 403 __uncore_read(read64, 64, q, true) 404 405 static inline u64 406 intel_uncore_read64_2x32(struct intel_uncore *uncore, 407 i915_reg_t lower_reg, i915_reg_t upper_reg) 408 { 409 u32 upper, lower, old_upper, loop = 0; 410 upper = intel_uncore_read(uncore, upper_reg); 411 do { 412 old_upper = upper; 413 lower = intel_uncore_read(uncore, lower_reg); 414 upper = intel_uncore_read(uncore, upper_reg); 415 } while (upper != old_upper && loop++ < 2); 416 return (u64)upper << 32 | lower; 417 } 418 419 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__)) 420 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) 421 422 #undef __uncore_read 423 #undef __uncore_write 424 425 /* These are untraced mmio-accessors that are only valid to be used inside 426 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 427 * controlled. 428 * 429 * Think twice, and think again, before using these. 430 * 431 * As an example, these accessors can possibly be used between: 432 * 433 * spin_lock_irq(&uncore->lock); 434 * intel_uncore_forcewake_get__locked(); 435 * 436 * and 437 * 438 * intel_uncore_forcewake_put__locked(); 439 * spin_unlock_irq(&uncore->lock); 440 * 441 * 442 * Note: some registers may not need forcewake held, so 443 * intel_uncore_forcewake_{get,put} can be omitted, see 444 * intel_uncore_forcewake_for_reg(). 445 * 446 * Certain architectures will die if the same cacheline is concurrently accessed 447 * by different clients (e.g. on Ivybridge). Access to registers should 448 * therefore generally be serialised, by either the dev_priv->uncore.lock or 449 * a more localised lock guarding all access to that bank of registers. 450 */ 451 #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__) 452 #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__) 453 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__) 454 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__)) 455 456 static inline void intel_uncore_rmw(struct intel_uncore *uncore, 457 i915_reg_t reg, u32 clear, u32 set) 458 { 459 u32 old, val; 460 461 old = intel_uncore_read(uncore, reg); 462 val = (old & ~clear) | set; 463 if (val != old) 464 intel_uncore_write(uncore, reg, val); 465 } 466 467 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, 468 i915_reg_t reg, u32 clear, u32 set) 469 { 470 u32 old, val; 471 472 old = intel_uncore_read_fw(uncore, reg); 473 val = (old & ~clear) | set; 474 if (val != old) 475 intel_uncore_write_fw(uncore, reg, val); 476 } 477 478 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, 479 i915_reg_t reg, u32 val, 480 u32 mask, u32 expected_val) 481 { 482 u32 reg_val; 483 484 intel_uncore_write(uncore, reg, val); 485 reg_val = intel_uncore_read(uncore, reg); 486 487 return (reg_val & mask) != expected_val ? -EINVAL : 0; 488 } 489 490 #ifdef __NetBSD__ 491 #define raw_reg_read(uncore, reg) \ 492 bus_space_read_4((uncore)->regs_bst, (uncore)->regs_bsh, \ 493 i915_mmio_reg_offset(reg)) 494 #define raw_reg_write(uncore, reg, value) \ 495 bus_space_write_4((uncore)->regs_bst, (uncore)->regs_bsh, \ 496 i915_mmio_reg_offset(reg), (value)) 497 #else 498 #define raw_reg_read(base, reg) \ 499 readl(base + i915_mmio_reg_offset(reg)) 500 #define raw_reg_write(base, reg, value) \ 501 writel(value, base + i915_mmio_reg_offset(reg)) 502 #endif 503 504 #endif /* !__INTEL_UNCORE_H__ */ 505